ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.32
Committed: 2009-08-11T07:43:46Z (14 years, 9 months ago) by asvitkine
Branch: MAIN
Changes since 1.31: +31 -2 lines
Log Message:
[Michael Schmitt]
SheepShaver includes the C errno string in many error messages. One case is when it calls the memory allocation routines in the Basilisk II vm_alloc.cpp program.

This works when the memory allocation routine uses functions that set errno (such as mmap or malloc). For example, running SheepShaver on a Linux hosts produces meaningful error messages.

The problem is that when run on an OS X host, the memory allocation uses Mach routines such as vm_allocate, which do not set errno.

So when SheepShaver reported the error, it used a stale value of errno, which happened to be 17. The result was an extremely misleading error message: "Cannot map RAM: File already exists".

The fix is to change vm_alloc so that it translates Mac return codes into POSIX errno values.

It also initializes errno to 0 at the start of the memory allocation routine, so that no matter what path it takes, it won't return a stale value.

File Contents

# User Rev Content
1 gbeauche 1.1 /*
2     * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3     * (supports mmap, vm_allocate or fallbacks to malloc)
4     *
5 gbeauche 1.27 * Basilisk II (C) 1997-2008 Christian Bauer
6 gbeauche 1.1 *
7     * This program is free software; you can redistribute it and/or modify
8     * it under the terms of the GNU General Public License as published by
9     * the Free Software Foundation; either version 2 of the License, or
10     * (at your option) any later version.
11     *
12     * This program is distributed in the hope that it will be useful,
13     * but WITHOUT ANY WARRANTY; without even the implied warranty of
14     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15     * GNU General Public License for more details.
16     *
17     * You should have received a copy of the GNU General Public License
18     * along with this program; if not, write to the Free Software
19     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20     */
21    
22     #ifdef HAVE_CONFIG_H
23     #include "config.h"
24     #endif
25    
26 gbeauche 1.13 #ifdef HAVE_FCNTL_H
27     #include <fcntl.h>
28     #endif
29    
30 gbeauche 1.14 #ifdef HAVE_WIN32_VM
31     #define WIN32_LEAN_AND_MEAN /* avoid including junk */
32     #include <windows.h>
33     #endif
34    
35 asvitkine 1.32 #include <errno.h>
36 gbeauche 1.13 #include <stdio.h>
37 gbeauche 1.1 #include <stdlib.h>
38     #include <string.h>
39 gbeauche 1.13 #include <limits.h>
40 gbeauche 1.1 #include "vm_alloc.h"
41    
42     #ifdef HAVE_MACH_VM
43     #ifndef HAVE_MACH_TASK_SELF
44     #ifdef HAVE_TASK_SELF
45     #define mach_task_self task_self
46     #else
47     #error "No task_self(), you lose."
48     #endif
49     #endif
50     #endif
51    
52 gbeauche 1.28 #ifdef HAVE_WIN32_VM
53     /* Windows is either ILP32 or LLP64 */
54     typedef UINT_PTR vm_uintptr_t;
55     #else
56     /* Other systems are sane as they are either ILP32 or LP64 */
57     typedef unsigned long vm_uintptr_t;
58     #endif
59    
60 gbeauche 1.9 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
61     because the emulated target is 32-bit and this helps to allocate
62     memory so that branches could be resolved more easily (32-bit
63     displacement to code in .text), on AMD64 for example. */
64 gbeauche 1.31 #if defined(__hpux)
65     #define MAP_32BIT MAP_ADDR32
66     #endif
67 gbeauche 1.9 #ifndef MAP_32BIT
68     #define MAP_32BIT 0
69     #endif
70 gbeauche 1.13 #ifndef MAP_ANON
71     #define MAP_ANON 0
72     #endif
73     #ifndef MAP_ANONYMOUS
74     #define MAP_ANONYMOUS 0
75     #endif
76 gbeauche 1.9
77     #define MAP_EXTRA_FLAGS (MAP_32BIT)
78    
79 gbeauche 1.1 #ifdef HAVE_MMAP_VM
80 gbeauche 1.23 #if (defined(__linux__) && defined(__i386__)) || HAVE_LINKER_SCRIPT
81 gbeauche 1.9 /* Force a reasonnable address below 0x80000000 on x86 so that we
82     don't get addresses above when the program is run on AMD64.
83     NOTE: this is empirically determined on Linux/x86. */
84     #define MAP_BASE 0x10000000
85     #else
86     #define MAP_BASE 0x00000000
87     #endif
88     static char * next_address = (char *)MAP_BASE;
89 gbeauche 1.1 #ifdef HAVE_MMAP_ANON
90 gbeauche 1.10 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
91 gbeauche 1.1 #define zero_fd -1
92     #else
93     #ifdef HAVE_MMAP_ANONYMOUS
94 gbeauche 1.10 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
95 gbeauche 1.1 #define zero_fd -1
96     #else
97 gbeauche 1.10 #define map_flags (MAP_EXTRA_FLAGS)
98 gbeauche 1.1 static int zero_fd = -1;
99     #endif
100     #endif
101     #endif
102    
103 gbeauche 1.10 /* Translate generic VM map flags to host values. */
104    
105     #ifdef HAVE_MMAP_VM
106     static int translate_map_flags(int vm_flags)
107     {
108     int flags = 0;
109     if (vm_flags & VM_MAP_SHARED)
110     flags |= MAP_SHARED;
111     if (vm_flags & VM_MAP_PRIVATE)
112     flags |= MAP_PRIVATE;
113     if (vm_flags & VM_MAP_FIXED)
114     flags |= MAP_FIXED;
115     if (vm_flags & VM_MAP_32BIT)
116     flags |= MAP_32BIT;
117     return flags;
118     }
119     #endif
120    
121 gbeauche 1.14 /* Align ADDR and SIZE to 64K boundaries. */
122    
123     #ifdef HAVE_WIN32_VM
124     static inline LPVOID align_addr_segment(LPVOID addr)
125     {
126 gbeauche 1.28 return (LPVOID)(((vm_uintptr_t)addr) & -((vm_uintptr_t)65536));
127 gbeauche 1.14 }
128    
129     static inline DWORD align_size_segment(LPVOID addr, DWORD size)
130     {
131 gbeauche 1.28 return size + ((vm_uintptr_t)addr - (vm_uintptr_t)align_addr_segment(addr));
132 gbeauche 1.14 }
133     #endif
134    
135     /* Translate generic VM prot flags to host values. */
136    
137     #ifdef HAVE_WIN32_VM
138     static int translate_prot_flags(int prot_flags)
139     {
140     int prot = PAGE_READWRITE;
141     if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
142     prot = PAGE_EXECUTE_READWRITE;
143     else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
144     prot = PAGE_EXECUTE_READ;
145     else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
146     prot = PAGE_READWRITE;
147     else if (prot_flags == VM_PAGE_READ)
148     prot = PAGE_READONLY;
149     else if (prot_flags == 0)
150     prot = PAGE_NOACCESS;
151     return prot;
152     }
153     #endif
154    
155 asvitkine 1.32 /* Translate Mach return codes to POSIX errno values. */
156     #ifdef HAVE_MACH_VM
157     static int vm_error(kern_return_t ret_code)
158     {
159     switch (ret_code) {
160     case KERN_SUCCESS:
161     return 0;
162     case KERN_INVALID_ADDRESS:
163     case KERN_NO_SPACE:
164     return ENOMEM;
165     case KERN_PROTECTION_FAILURE:
166     return EACCES;
167     default:
168     return EINVAL;
169     }
170     }
171     #endif
172    
173 gbeauche 1.1 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
174    
175     int vm_init(void)
176     {
177     #ifdef HAVE_MMAP_VM
178     #ifndef zero_fd
179     zero_fd = open("/dev/zero", O_RDWR);
180     if (zero_fd < 0)
181     return -1;
182     #endif
183     #endif
184     return 0;
185     }
186    
187     /* Deallocate all internal data used to wrap virtual memory allocators. */
188    
189     void vm_exit(void)
190     {
191     #ifdef HAVE_MMAP_VM
192     #ifndef zero_fd
193 gbeauche 1.19 if (zero_fd != -1) {
194     close(zero_fd);
195     zero_fd = -1;
196     }
197 gbeauche 1.1 #endif
198     #endif
199     }
200    
201     /* Allocate zero-filled memory of SIZE bytes. The mapping is private
202     and default protection bits are read / write. The return value
203     is the actual mapping address chosen or VM_MAP_FAILED for errors. */
204    
205 gbeauche 1.10 void * vm_acquire(size_t size, int options)
206 gbeauche 1.1 {
207     void * addr;
208 asvitkine 1.32
209     errno = 0;
210 gbeauche 1.10
211     // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
212     if (options & VM_MAP_FIXED)
213     return VM_MAP_FAILED;
214    
215 gbeauche 1.29 #ifndef HAVE_VM_WRITE_WATCH
216     if (options & VM_MAP_WRITE_WATCH)
217     return VM_MAP_FAILED;
218     #endif
219    
220 gbeauche 1.1 #ifdef HAVE_MACH_VM
221     // vm_allocate() returns a zero-filled memory region
222 asvitkine 1.32 kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE);
223     if (ret_code != KERN_SUCCESS) {
224     errno = vm_error(ret_code);
225 gbeauche 1.1 return VM_MAP_FAILED;
226 asvitkine 1.32 }
227 gbeauche 1.1 #else
228     #ifdef HAVE_MMAP_VM
229 gbeauche 1.13 int fd = zero_fd;
230     int the_map_flags = translate_map_flags(options) | map_flags;
231    
232     if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
233 gbeauche 1.1 return VM_MAP_FAILED;
234    
235 gbeauche 1.10 // Sanity checks for 64-bit platforms
236     if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
237     return VM_MAP_FAILED;
238    
239 gbeauche 1.3 next_address = (char *)addr + size;
240 gbeauche 1.1 #else
241 gbeauche 1.14 #ifdef HAVE_WIN32_VM
242 gbeauche 1.29 int alloc_type = MEM_RESERVE | MEM_COMMIT;
243     if (options & VM_MAP_WRITE_WATCH)
244     alloc_type |= MEM_WRITE_WATCH;
245    
246     if ((addr = VirtualAlloc(NULL, size, alloc_type, PAGE_EXECUTE_READWRITE)) == NULL)
247 gbeauche 1.14 return VM_MAP_FAILED;
248     #else
249 gbeauche 1.1 if ((addr = calloc(size, 1)) == 0)
250     return VM_MAP_FAILED;
251    
252     // Omit changes for protections because they are not supported in this mode
253     return addr;
254     #endif
255     #endif
256 gbeauche 1.14 #endif
257 cebix 1.2
258 gbeauche 1.1 // Explicitely protect the newly mapped region here because on some systems,
259     // say MacOS X, mmap() doesn't honour the requested protection flags.
260     if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
261     return VM_MAP_FAILED;
262    
263     return addr;
264     }
265    
266     /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
267     Retuns 0 if successful, -1 on errors. */
268    
269 gbeauche 1.10 int vm_acquire_fixed(void * addr, size_t size, int options)
270 gbeauche 1.1 {
271 asvitkine 1.32 errno = 0;
272    
273 gbeauche 1.10 // Fixed mappings are required to be private
274     if (options & VM_MAP_SHARED)
275     return -1;
276    
277 gbeauche 1.29 #ifndef HAVE_VM_WRITE_WATCH
278     if (options & VM_MAP_WRITE_WATCH)
279     return -1;
280     #endif
281    
282 gbeauche 1.1 #ifdef HAVE_MACH_VM
283     // vm_allocate() returns a zero-filled memory region
284 asvitkine 1.32 kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0);
285     if (ret_code != KERN_SUCCESS) {
286     errno = vm_error(ret_code);
287 gbeauche 1.1 return -1;
288 asvitkine 1.32 }
289 gbeauche 1.1 #else
290     #ifdef HAVE_MMAP_VM
291 gbeauche 1.21 int fd = zero_fd;
292     int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED;
293 gbeauche 1.10
294 gbeauche 1.21 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
295 gbeauche 1.1 return -1;
296     #else
297 gbeauche 1.14 #ifdef HAVE_WIN32_VM
298     // Windows cannot allocate Low Memory
299     if (addr == NULL)
300     return -1;
301    
302 gbeauche 1.29 int alloc_type = MEM_RESERVE | MEM_COMMIT;
303     if (options & VM_MAP_WRITE_WATCH)
304     alloc_type |= MEM_WRITE_WATCH;
305    
306 gbeauche 1.14 // Allocate a possibly offset region to align on 64K boundaries
307     LPVOID req_addr = align_addr_segment(addr);
308     DWORD req_size = align_size_segment(addr, size);
309 gbeauche 1.29 LPVOID ret_addr = VirtualAlloc(req_addr, req_size, alloc_type, PAGE_EXECUTE_READWRITE);
310 gbeauche 1.14 if (ret_addr != req_addr)
311     return -1;
312     #else
313 gbeauche 1.1 // Unsupported
314     return -1;
315     #endif
316     #endif
317 gbeauche 1.14 #endif
318 gbeauche 1.1
319     // Explicitely protect the newly mapped region here because on some systems,
320     // say MacOS X, mmap() doesn't honour the requested protection flags.
321 gbeauche 1.6 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
322 gbeauche 1.1 return -1;
323    
324     return 0;
325     }
326    
327     /* Deallocate any mapping for the region starting at ADDR and extending
328     LEN bytes. Returns 0 if successful, -1 on errors. */
329    
330     int vm_release(void * addr, size_t size)
331     {
332 gbeauche 1.3 // Safety check: don't try to release memory that was not allocated
333     if (addr == VM_MAP_FAILED)
334     return 0;
335    
336 gbeauche 1.1 #ifdef HAVE_MACH_VM
337 gbeauche 1.4 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
338     return -1;
339 gbeauche 1.1 #else
340     #ifdef HAVE_MMAP_VM
341 gbeauche 1.7 if (munmap((caddr_t)addr, size) != 0)
342 gbeauche 1.4 return -1;
343 gbeauche 1.1 #else
344 gbeauche 1.14 #ifdef HAVE_WIN32_VM
345     if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
346     return -1;
347     #else
348 gbeauche 1.1 free(addr);
349     #endif
350     #endif
351 gbeauche 1.14 #endif
352 gbeauche 1.4
353     return 0;
354 gbeauche 1.1 }
355    
356     /* Change the memory protection of the region starting at ADDR and
357     extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
358    
359     int vm_protect(void * addr, size_t size, int prot)
360     {
361     #ifdef HAVE_MACH_VM
362     int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
363     return ret_code == KERN_SUCCESS ? 0 : -1;
364     #else
365     #ifdef HAVE_MMAP_VM
366 gbeauche 1.7 int ret_code = mprotect((caddr_t)addr, size, prot);
367 gbeauche 1.1 return ret_code == 0 ? 0 : -1;
368     #else
369 gbeauche 1.14 #ifdef HAVE_WIN32_VM
370     DWORD old_prot;
371     int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
372     return ret_code != 0 ? 0 : -1;
373     #else
374 gbeauche 1.1 // Unsupported
375     return -1;
376     #endif
377     #endif
378 gbeauche 1.14 #endif
379 gbeauche 1.1 }
380    
381 gbeauche 1.29 /* Return the addresses of the pages that got modified in the
382     specified range [ ADDR, ADDR + SIZE [ since the last reset of the watch
383     bits. Returns 0 if successful, -1 for errors. */
384    
385     int vm_get_write_watch(void * addr, size_t size,
386     void ** pages, unsigned int * n_pages,
387     int options)
388     {
389     #ifdef HAVE_VM_WRITE_WATCH
390     #ifdef HAVE_WIN32_VM
391     DWORD flags = 0;
392     if (options & VM_WRITE_WATCH_RESET)
393     flags |= WRITE_WATCH_FLAG_RESET;
394    
395     ULONG page_size;
396 gbeauche 1.30 ULONG_PTR count = *n_pages;
397 gbeauche 1.29 int ret_code = GetWriteWatch(flags, addr, size, pages, &count, &page_size);
398     if (ret_code != 0)
399     return -1;
400    
401     *n_pages = count;
402     return 0;
403     #endif
404     #endif
405     // Unsupported
406     return -1;
407     }
408    
409     /* Reset the write-tracking state for the specified range [ ADDR, ADDR
410     + SIZE [. Returns 0 if successful, -1 for errors. */
411    
412     int vm_reset_write_watch(void * addr, size_t size)
413     {
414     #ifdef HAVE_VM_WRITE_WATCH
415     #ifdef HAVE_WIN32_VM
416     int ret_code = ResetWriteWatch(addr, size);
417     return ret_code == 0 ? 0 : -1;
418     #endif
419     #endif
420     // Unsupported
421     return -1;
422     }
423    
424 gbeauche 1.15 /* Returns the size of a page. */
425    
426 gbeauche 1.16 int vm_get_page_size(void)
427 gbeauche 1.15 {
428 gbeauche 1.20 #ifdef HAVE_WIN32_VM
429 gbeauche 1.28 static vm_uintptr_t page_size = 0;
430 gbeauche 1.20 if (page_size == 0) {
431     SYSTEM_INFO si;
432     GetSystemInfo(&si);
433     page_size = si.dwAllocationGranularity;
434     }
435     return page_size;
436 gbeauche 1.15 #else
437 gbeauche 1.20 return getpagesize();
438 gbeauche 1.15 #endif
439     }
440    
441 gbeauche 1.29 #ifdef CONFIGURE_TEST_VM_WRITE_WATCH
442     int main(void)
443     {
444     int i, j;
445    
446     vm_init();
447    
448     vm_uintptr_t page_size = vm_get_page_size();
449    
450     char *area;
451     const int n_pages = 7;
452     const int area_size = n_pages * page_size;
453     const int map_options = VM_MAP_DEFAULT | VM_MAP_WRITE_WATCH;
454     if ((area = (char *)vm_acquire(area_size, map_options)) == VM_MAP_FAILED)
455     return 1;
456    
457     unsigned int n_modified_pages_expected = 0;
458     static const int touch_page[n_pages] = { 0, 1, 1, 0, 1, 0, 1 };
459     for (i = 0; i < n_pages; i++) {
460     if (touch_page[i]) {
461     area[i * page_size] = 1;
462     ++n_modified_pages_expected;
463     }
464     }
465    
466     char *modified_pages[n_pages];
467     unsigned int n_modified_pages = n_pages;
468     if (vm_get_write_watch(area, area_size, (void **)modified_pages, &n_modified_pages) < 0)
469     return 2;
470     if (n_modified_pages != n_modified_pages_expected)
471     return 3;
472     for (i = 0, j = 0; i < n_pages; i++) {
473     char v = area[i * page_size];
474     if ((touch_page[i] && !v) || (!touch_page[i] && v))
475     return 4;
476     if (!touch_page[i])
477     continue;
478     if (modified_pages[j] != (area + i * page_size))
479     return 5;
480     ++j;
481     }
482    
483     vm_release(area, area_size);
484     return 0;
485     }
486     #endif
487    
488 gbeauche 1.1 #ifdef CONFIGURE_TEST_VM_MAP
489 gbeauche 1.18 #include <stdlib.h>
490     #include <signal.h>
491    
492     static void fault_handler(int sig)
493     {
494     exit(1);
495     }
496    
497 gbeauche 1.1 /* Tests covered here:
498     - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
499     - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
500     */
501     int main(void)
502     {
503     vm_init();
504 gbeauche 1.18
505     signal(SIGSEGV, fault_handler);
506     #ifdef SIGBUS
507     signal(SIGBUS, fault_handler);
508     #endif
509 gbeauche 1.1
510 gbeauche 1.28 #define page_align(address) ((char *)((vm_uintptr_t)(address) & -page_size))
511     vm_uintptr_t page_size = vm_get_page_size();
512 gbeauche 1.1
513     const int area_size = 6 * page_size;
514     volatile char * area = (volatile char *) vm_acquire(area_size);
515     volatile char * fault_address = area + (page_size * 7) / 2;
516    
517     #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
518     if (area == VM_MAP_FAILED)
519     return 1;
520    
521     if (vm_release((char *)area, area_size) < 0)
522     return 1;
523    
524     return 0;
525     #endif
526    
527     #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
528     if (area == VM_MAP_FAILED)
529     return 0;
530    
531     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
532     return 0;
533     #endif
534    
535     #if defined(TEST_VM_PROT_RDWR_WRITE)
536     if (area == VM_MAP_FAILED)
537     return 1;
538    
539     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
540     return 1;
541    
542     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
543     return 1;
544     #endif
545    
546     #if defined(TEST_VM_PROT_READ_WRITE)
547     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
548     return 0;
549     #endif
550    
551     #if defined(TEST_VM_PROT_NONE_READ)
552     // this should cause a core dump
553     char foo = *fault_address;
554     return 0;
555     #endif
556    
557     #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
558     // this should cause a core dump
559     *fault_address = 'z';
560     return 0;
561     #endif
562    
563     #if defined(TEST_VM_PROT_RDWR_WRITE)
564     // this should not cause a core dump
565     *fault_address = 'z';
566     return 0;
567     #endif
568     }
569     #endif