ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.22
Committed: 2006-02-27T00:04:12Z (18 years, 2 months ago) by gbeauche
Branch: MAIN
Changes since 1.21: +0 -15 lines
Log Message:
Memory allocated through win32 VirtualAlloc() is zero-filled. Likewise for
memory mmap()'ed with MAP_ANONYMOUS (equivalent to /dev/zero mapping but
faster)

File Contents

# Content
1 /*
2 * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3 * (supports mmap, vm_allocate or fallbacks to malloc)
4 *
5 * Basilisk II (C) 1997-2005 Christian Bauer
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #ifdef HAVE_FCNTL_H
27 #include <fcntl.h>
28 #endif
29
30 #ifdef HAVE_WIN32_VM
31 #define WIN32_LEAN_AND_MEAN /* avoid including junk */
32 #include <windows.h>
33 #endif
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <limits.h>
39 #include "vm_alloc.h"
40
41 #ifdef HAVE_MACH_VM
42 #ifndef HAVE_MACH_TASK_SELF
43 #ifdef HAVE_TASK_SELF
44 #define mach_task_self task_self
45 #else
46 #error "No task_self(), you lose."
47 #endif
48 #endif
49 #endif
50
51 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
52 because the emulated target is 32-bit and this helps to allocate
53 memory so that branches could be resolved more easily (32-bit
54 displacement to code in .text), on AMD64 for example. */
55 #ifndef MAP_32BIT
56 #define MAP_32BIT 0
57 #endif
58 #ifndef MAP_ANON
59 #define MAP_ANON 0
60 #endif
61 #ifndef MAP_ANONYMOUS
62 #define MAP_ANONYMOUS 0
63 #endif
64
65 #define MAP_EXTRA_FLAGS (MAP_32BIT)
66
67 #ifdef HAVE_MMAP_VM
68 #if defined(__linux__) && defined(__i386__)
69 /* Force a reasonnable address below 0x80000000 on x86 so that we
70 don't get addresses above when the program is run on AMD64.
71 NOTE: this is empirically determined on Linux/x86. */
72 #define MAP_BASE 0x10000000
73 #else
74 #define MAP_BASE 0x00000000
75 #endif
76 static char * next_address = (char *)MAP_BASE;
77 #ifdef HAVE_MMAP_ANON
78 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
79 #define zero_fd -1
80 #else
81 #ifdef HAVE_MMAP_ANONYMOUS
82 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
83 #define zero_fd -1
84 #else
85 #define map_flags (MAP_EXTRA_FLAGS)
86 static int zero_fd = -1;
87 #endif
88 #endif
89 #endif
90
91 /* Utility functions for POSIX SHM handling. */
92
93 #ifdef USE_33BIT_ADDRESSING
94 struct shm_range_t {
95 const char *file;
96 void *base;
97 unsigned int size;
98 shm_range_t *next;
99 };
100
101 static shm_range_t *shm_ranges = NULL;
102
103 static bool add_shm_range(const char *file, void *base, unsigned int size)
104 {
105 shm_range_t *r = (shm_range_t *)malloc(sizeof(shm_range_t));
106 if (r) {
107 r->file = file;
108 r->base = base;
109 r->size = size;
110 r->next = shm_ranges ? shm_ranges : NULL;
111 shm_ranges = r;
112 return true;
113 }
114 return false;
115 }
116
117 static shm_range_t *find_shm_range(void *base, unsigned int size)
118 {
119 for (shm_range_t *r = shm_ranges; r != NULL; r = r->next)
120 if (r->base == base && r->size == size)
121 return r;
122 return NULL;
123 }
124
125 static bool remove_shm_range(shm_range_t *r)
126 {
127 if (r) {
128 for (shm_range_t *p = shm_ranges; p != NULL; p = p->next) {
129 if (p->next == r) {
130 p->next = r->next;
131 free(r);
132 return true;
133 }
134 }
135 }
136 return false;
137 }
138
139 static bool remove_shm_range(void *base, unsigned int size)
140 {
141 remove_shm_range(find_shm_range(base, size));
142 }
143 #endif
144
145 /* Build a POSIX SHM memory segment file descriptor name. */
146
147 #ifdef USE_33BIT_ADDRESSING
148 static const char *build_shm_filename(void)
149 {
150 static int id = 0;
151 static char filename[PATH_MAX];
152
153 int ret = snprintf(filename, sizeof(filename), "/BasiliskII-%d-shm-%d", getpid(), id);
154 if (ret == -1 || ret >= sizeof(filename))
155 return NULL;
156
157 id++;
158 return filename;
159 }
160 #endif
161
162 /* Translate generic VM map flags to host values. */
163
164 #ifdef HAVE_MMAP_VM
165 static int translate_map_flags(int vm_flags)
166 {
167 int flags = 0;
168 if (vm_flags & VM_MAP_SHARED)
169 flags |= MAP_SHARED;
170 if (vm_flags & VM_MAP_PRIVATE)
171 flags |= MAP_PRIVATE;
172 if (vm_flags & VM_MAP_FIXED)
173 flags |= MAP_FIXED;
174 if (vm_flags & VM_MAP_32BIT)
175 flags |= MAP_32BIT;
176 return flags;
177 }
178 #endif
179
180 /* Align ADDR and SIZE to 64K boundaries. */
181
182 #ifdef HAVE_WIN32_VM
183 static inline LPVOID align_addr_segment(LPVOID addr)
184 {
185 return (LPVOID)(((DWORD)addr) & -65536);
186 }
187
188 static inline DWORD align_size_segment(LPVOID addr, DWORD size)
189 {
190 return size + ((DWORD)addr - (DWORD)align_addr_segment(addr));
191 }
192 #endif
193
194 /* Translate generic VM prot flags to host values. */
195
196 #ifdef HAVE_WIN32_VM
197 static int translate_prot_flags(int prot_flags)
198 {
199 int prot = PAGE_READWRITE;
200 if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
201 prot = PAGE_EXECUTE_READWRITE;
202 else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
203 prot = PAGE_EXECUTE_READ;
204 else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
205 prot = PAGE_READWRITE;
206 else if (prot_flags == VM_PAGE_READ)
207 prot = PAGE_READONLY;
208 else if (prot_flags == 0)
209 prot = PAGE_NOACCESS;
210 return prot;
211 }
212 #endif
213
214 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
215
216 int vm_init(void)
217 {
218 #ifdef HAVE_MMAP_VM
219 #ifndef zero_fd
220 zero_fd = open("/dev/zero", O_RDWR);
221 if (zero_fd < 0)
222 return -1;
223 #endif
224 #endif
225 return 0;
226 }
227
228 /* Deallocate all internal data used to wrap virtual memory allocators. */
229
230 void vm_exit(void)
231 {
232 #ifdef HAVE_MMAP_VM
233 #ifndef zero_fd
234 if (zero_fd != -1) {
235 close(zero_fd);
236 zero_fd = -1;
237 }
238 #endif
239 #endif
240 }
241
242 /* Allocate zero-filled memory of SIZE bytes. The mapping is private
243 and default protection bits are read / write. The return value
244 is the actual mapping address chosen or VM_MAP_FAILED for errors. */
245
246 void * vm_acquire(size_t size, int options)
247 {
248 void * addr;
249
250 // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
251 if (options & VM_MAP_FIXED)
252 return VM_MAP_FAILED;
253
254 #ifdef HAVE_MACH_VM
255 // vm_allocate() returns a zero-filled memory region
256 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
257 return VM_MAP_FAILED;
258 #else
259 #ifdef HAVE_MMAP_VM
260 int fd = zero_fd;
261 int the_map_flags = translate_map_flags(options) | map_flags;
262
263 #ifdef USE_33BIT_ADDRESSING
264 const char *shm_file = NULL;
265 if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
266 the_map_flags &= ~(MAP_PRIVATE | MAP_ANON | MAP_ANONYMOUS);
267 the_map_flags |= MAP_SHARED;
268
269 if ((shm_file = build_shm_filename()) == NULL)
270 return VM_MAP_FAILED;
271
272 if ((fd = shm_open(shm_file, O_RDWR | O_CREAT | O_EXCL, 0644)) < 0)
273 return VM_MAP_FAILED;
274
275 if (ftruncate(fd, size) < 0)
276 return VM_MAP_FAILED;
277 }
278 #endif
279
280 if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
281 return VM_MAP_FAILED;
282
283 // Sanity checks for 64-bit platforms
284 if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
285 return VM_MAP_FAILED;
286
287 next_address = (char *)addr + size;
288
289 // Remap to 33-bit space
290 #ifdef USE_33BIT_ADDRESSING
291 if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
292 if (!add_shm_range(strdup(shm_file), addr, size))
293 return VM_MAP_FAILED;
294
295 if (mmap((char *)addr + (1L << 32), size, VM_PAGE_DEFAULT, the_map_flags | MAP_FIXED, fd, 0) == (void *)MAP_FAILED)
296 return VM_MAP_FAILED;
297 close(fd);
298 }
299 #endif
300 #else
301 #ifdef HAVE_WIN32_VM
302 if ((addr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE)) == NULL)
303 return VM_MAP_FAILED;
304 #else
305 if ((addr = calloc(size, 1)) == 0)
306 return VM_MAP_FAILED;
307
308 // Omit changes for protections because they are not supported in this mode
309 return addr;
310 #endif
311 #endif
312 #endif
313
314 // Explicitely protect the newly mapped region here because on some systems,
315 // say MacOS X, mmap() doesn't honour the requested protection flags.
316 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
317 return VM_MAP_FAILED;
318
319 return addr;
320 }
321
322 /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
323 Retuns 0 if successful, -1 on errors. */
324
325 int vm_acquire_fixed(void * addr, size_t size, int options)
326 {
327 // Fixed mappings are required to be private
328 if (options & VM_MAP_SHARED)
329 return -1;
330
331 #ifdef HAVE_MACH_VM
332 // vm_allocate() returns a zero-filled memory region
333 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
334 return -1;
335 #else
336 #ifdef HAVE_MMAP_VM
337 int fd = zero_fd;
338 int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED;
339
340 #ifdef USE_33BIT_ADDRESSING
341 const char *shm_file = NULL;
342 if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
343 the_map_flags &= ~(MAP_PRIVATE | MAP_ANON | MAP_ANONYMOUS);
344 the_map_flags |= MAP_SHARED;
345
346 if ((shm_file = build_shm_filename()) == NULL)
347 return -1;
348
349 if ((fd = shm_open(shm_file, O_RDWR | O_CREAT | O_EXCL, 0644)) < 0)
350 return -1;
351
352 if (ftruncate(fd, size) < 0)
353 return -1;
354 }
355 #endif
356
357 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
358 return -1;
359
360 // Remap to 33-bit space
361 #ifdef USE_33BIT_ADDRESSING
362 if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
363 if (!add_shm_range(strdup(shm_file), addr, size))
364 return -1;
365
366 if (mmap((char *)addr + (1L << 32), size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
367 return -1;
368 close(fd);
369 }
370 #endif
371 #else
372 #ifdef HAVE_WIN32_VM
373 // Windows cannot allocate Low Memory
374 if (addr == NULL)
375 return -1;
376
377 // Allocate a possibly offset region to align on 64K boundaries
378 LPVOID req_addr = align_addr_segment(addr);
379 DWORD req_size = align_size_segment(addr, size);
380 LPVOID ret_addr = VirtualAlloc(req_addr, req_size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
381 if (ret_addr != req_addr)
382 return -1;
383 return -1;
384 #else
385 // Unsupported
386 return -1;
387 #endif
388 #endif
389 #endif
390
391 // Explicitely protect the newly mapped region here because on some systems,
392 // say MacOS X, mmap() doesn't honour the requested protection flags.
393 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
394 return -1;
395
396 return 0;
397 }
398
399 /* Deallocate any mapping for the region starting at ADDR and extending
400 LEN bytes. Returns 0 if successful, -1 on errors. */
401
402 int vm_release(void * addr, size_t size)
403 {
404 // Safety check: don't try to release memory that was not allocated
405 if (addr == VM_MAP_FAILED)
406 return 0;
407
408 #ifdef HAVE_MACH_VM
409 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
410 return -1;
411 #else
412 #ifdef HAVE_MMAP_VM
413 if (munmap((caddr_t)addr, size) != 0)
414 return -1;
415
416 #ifdef USE_33BIT_ADDRESSING
417 shm_range_t *r = find_shm_range(addr, size);
418 if (r) {
419 if (munmap((char *)r->base + (1L << 32), size) != 0)
420 return -1;
421
422 if (shm_unlink(r->file) < 0)
423 return -1;
424 free((char *)r->file);
425
426 if (!remove_shm_range(r))
427 return -1;
428 }
429 #endif
430 #else
431 #ifdef HAVE_WIN32_VM
432 if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
433 return -1;
434 #else
435 free(addr);
436 #endif
437 #endif
438 #endif
439
440 return 0;
441 }
442
443 /* Change the memory protection of the region starting at ADDR and
444 extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
445
446 int vm_protect(void * addr, size_t size, int prot)
447 {
448 #ifdef HAVE_MACH_VM
449 int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
450 return ret_code == KERN_SUCCESS ? 0 : -1;
451 #else
452 #ifdef HAVE_MMAP_VM
453 int ret_code = mprotect((caddr_t)addr, size, prot);
454 return ret_code == 0 ? 0 : -1;
455 #else
456 #ifdef HAVE_WIN32_VM
457 DWORD old_prot;
458 int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
459 return ret_code != 0 ? 0 : -1;
460 #else
461 // Unsupported
462 return -1;
463 #endif
464 #endif
465 #endif
466 }
467
468 /* Returns the size of a page. */
469
470 int vm_get_page_size(void)
471 {
472 #ifdef HAVE_WIN32_VM
473 static unsigned long page_size = 0;
474 if (page_size == 0) {
475 SYSTEM_INFO si;
476 GetSystemInfo(&si);
477 page_size = si.dwAllocationGranularity;
478 }
479 return page_size;
480 #else
481 return getpagesize();
482 #endif
483 }
484
485 #ifdef CONFIGURE_TEST_VM_MAP
486 #include <stdlib.h>
487 #include <signal.h>
488
489 static void fault_handler(int sig)
490 {
491 exit(1);
492 }
493
494 /* Tests covered here:
495 - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
496 - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
497 */
498 int main(void)
499 {
500 vm_init();
501
502 signal(SIGSEGV, fault_handler);
503 #ifdef SIGBUS
504 signal(SIGBUS, fault_handler);
505 #endif
506
507 #define page_align(address) ((char *)((unsigned long)(address) & -page_size))
508 unsigned long page_size = vm_get_page_size();
509
510 const int area_size = 6 * page_size;
511 volatile char * area = (volatile char *) vm_acquire(area_size);
512 volatile char * fault_address = area + (page_size * 7) / 2;
513
514 #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
515 if (area == VM_MAP_FAILED)
516 return 1;
517
518 if (vm_release((char *)area, area_size) < 0)
519 return 1;
520
521 return 0;
522 #endif
523
524 #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
525 if (area == VM_MAP_FAILED)
526 return 0;
527
528 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
529 return 0;
530 #endif
531
532 #if defined(TEST_VM_PROT_RDWR_WRITE)
533 if (area == VM_MAP_FAILED)
534 return 1;
535
536 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
537 return 1;
538
539 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
540 return 1;
541 #endif
542
543 #if defined(TEST_VM_PROT_READ_WRITE)
544 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
545 return 0;
546 #endif
547
548 #if defined(TEST_VM_PROT_NONE_READ)
549 // this should cause a core dump
550 char foo = *fault_address;
551 return 0;
552 #endif
553
554 #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
555 // this should cause a core dump
556 *fault_address = 'z';
557 return 0;
558 #endif
559
560 #if defined(TEST_VM_PROT_RDWR_WRITE)
561 // this should not cause a core dump
562 *fault_address = 'z';
563 return 0;
564 #endif
565 }
566 #endif