mmap.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. /*
  2. * Wine memory mappings support
  3. *
  4. * Copyright 2000, 2004 Alexandre Julliard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. */
  20. #include "config.h"
  21. #include "wine/port.h"
  22. #include <assert.h>
  23. #include <ctype.h>
  24. #include <errno.h>
  25. #include <fcntl.h>
  26. #include <stdlib.h>
  27. #include <stdio.h>
  28. #include <string.h>
  29. #include <sys/types.h>
  30. #ifdef HAVE_SYS_WAIT_H
  31. #include <sys/wait.h>
  32. #endif
  33. #ifdef HAVE_SYS_MMAN_H
  34. #include <sys/mman.h>
  35. #endif
  36. #ifdef HAVE_UNISTD_H
  37. # include <unistd.h>
  38. #endif
  39. #ifdef HAVE_STDINT_H
  40. # include <stdint.h>
  41. #endif
  42. #include "wine/list.h"
  43. #include "wine/asm.h"
  44. #ifndef MAP_NORESERVE
  45. #define MAP_NORESERVE 0
  46. #endif
  47. #ifndef MAP_PRIVATE
  48. #define MAP_PRIVATE 0
  49. #endif
  50. #ifndef MAP_ANON
  51. #define MAP_ANON 0
  52. #endif
  53. static inline int get_fdzero(void)
  54. {
  55. static int fd = -1;
  56. if (MAP_ANON == 0 && fd == -1)
  57. {
  58. if ((fd = open( "/dev/zero", O_RDONLY )) == -1)
  59. {
  60. perror( "/dev/zero: open" );
  61. exit(1);
  62. }
  63. }
  64. return fd;
  65. }
  66. #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
  67. /***********************************************************************
  68. * try_mmap_fixed
  69. *
  70. * The purpose of this routine is to emulate the behaviour of
  71. * the Linux mmap() routine if a non-NULL address is passed,
  72. * but the MAP_FIXED flag is not set. Linux in this case tries
  73. * to place the mapping at the specified address, *unless* the
  74. * range is already in use. Solaris, however, completely ignores
  75. * the address argument in this case.
  76. *
  77. * As Wine code occasionally relies on the Linux behaviour, e.g. to
  78. * be able to map non-relocatable PE executables to their proper
  79. * start addresses, or to map the DOS memory to 0, this routine
  80. * emulates the Linux behaviour by checking whether the desired
  81. * address range is still available, and placing the mapping there
  82. * using MAP_FIXED if so.
  83. */
  84. static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
  85. int fildes, off_t off)
  86. {
  87. char * volatile result = NULL;
  88. const size_t pagesize = sysconf( _SC_PAGESIZE );
  89. pid_t pid, wret;
  90. /* We only try to map to a fixed address if
  91. addr is non-NULL and properly aligned,
  92. and MAP_FIXED isn't already specified. */
  93. if ( !addr )
  94. return 0;
  95. if ( (uintptr_t)addr & (pagesize-1) )
  96. return 0;
  97. if ( flags & MAP_FIXED )
  98. return 0;
  99. /* We use vfork() to freeze all threads of the
  100. current process. This allows us to check without
  101. race condition whether the desired memory range is
  102. already in use. Note that because vfork() shares
  103. the address spaces between parent and child, we
  104. can actually perform the mapping in the child. */
  105. if ( (pid = vfork()) == -1 )
  106. {
  107. perror("try_mmap_fixed: vfork");
  108. exit(1);
  109. }
  110. if ( pid == 0 )
  111. {
  112. int i;
  113. char vec;
  114. /* We call mincore() for every page in the desired range.
  115. If any of these calls succeeds, the page is already
  116. mapped and we must fail. */
  117. for ( i = 0; i < len; i += pagesize )
  118. if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
  119. _exit(1);
  120. /* Perform the mapping with MAP_FIXED set. This is safe
  121. now, as none of the pages is currently in use. */
  122. result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
  123. if ( result == addr )
  124. _exit(0);
  125. if ( result != (void *) -1 ) /* This should never happen ... */
  126. munmap( result, len );
  127. _exit(1);
  128. }
  129. /* reap child */
  130. do {
  131. wret = waitpid(pid, NULL, 0);
  132. } while (wret < 0 && errno == EINTR);
  133. return result == addr;
  134. }
  135. #elif defined(__APPLE__)
  136. #include <mach/mach_init.h>
  137. #include <mach/mach_vm.h>
  138. /*
  139. * On Darwin, we can use the Mach call mach_vm_map to allocate
  140. * anonymous memory at the specified address and then, if necessary, use
  141. * mmap with MAP_FIXED to replace the mapping.
  142. */
  143. static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
  144. int fildes, off_t off)
  145. {
  146. mach_vm_address_t result = (mach_vm_address_t)addr;
  147. int vm_flags = VM_FLAGS_FIXED;
  148. if (flags & MAP_NOCACHE)
  149. vm_flags |= VM_FLAGS_NO_CACHE;
  150. if (!mach_vm_map( mach_task_self(), &result, len, 0, vm_flags, MEMORY_OBJECT_NULL,
  151. 0, 0, prot, VM_PROT_ALL, VM_INHERIT_COPY ))
  152. {
  153. flags |= MAP_FIXED;
  154. if (((flags & ~(MAP_NORESERVE | MAP_NOCACHE)) == (MAP_ANON | MAP_FIXED | MAP_PRIVATE)) ||
  155. mmap( (void *)result, len, prot, flags, fildes, off ) != MAP_FAILED)
  156. return 1;
  157. mach_vm_deallocate(mach_task_self(),result,len);
  158. }
  159. return 0;
  160. }
  161. #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
  162. /***********************************************************************
  163. * wine_anon_mmap
  164. *
  165. * Portable wrapper for anonymous mmaps
  166. */
  167. void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
  168. {
  169. #ifdef MAP_SHARED
  170. flags &= ~MAP_SHARED;
  171. #endif
  172. /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
  173. flags |= MAP_PRIVATE | MAP_ANON;
  174. if (!(flags & MAP_FIXED))
  175. {
  176. #ifdef MAP_TRYFIXED
  177. /* If available, this will attempt a fixed mapping in-kernel */
  178. flags |= MAP_TRYFIXED;
  179. #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  180. if ( start && mmap( start, size, prot, flags | MAP_FIXED | MAP_EXCL, get_fdzero(), 0 ) != MAP_FAILED )
  181. return start;
  182. #elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
  183. if ( try_mmap_fixed( start, size, prot, flags, get_fdzero(), 0 ) )
  184. return start;
  185. #endif
  186. #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
  187. /* Even FreeBSD 5.3 does not properly support NULL here. */
  188. if( start == NULL ) start = (void *)0x110000;
  189. #endif
  190. }
  191. return mmap( start, size, prot, flags, get_fdzero(), 0 );
  192. }
  193. #ifdef __ASM_OBSOLETE
  194. struct reserved_area
  195. {
  196. struct list entry;
  197. void *base;
  198. size_t size;
  199. };
  200. static struct list reserved_areas = LIST_INIT(reserved_areas);
  201. #ifndef __APPLE__
  202. static const unsigned int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
  203. #endif
  204. void wine_mmap_add_reserved_area_obsolete( void *addr, size_t size );
  205. #ifdef __APPLE__
  206. /***********************************************************************
  207. * reserve_area
  208. *
  209. * Reserve as much memory as possible in the given area.
  210. */
  211. static inline void reserve_area( void *addr, void *end )
  212. {
  213. #ifdef __i386__
  214. static const mach_vm_address_t max_address = VM_MAX_ADDRESS;
  215. #else
  216. static const mach_vm_address_t max_address = MACH_VM_MAX_ADDRESS;
  217. #endif
  218. mach_vm_address_t address = (mach_vm_address_t)addr;
  219. mach_vm_address_t end_address = (mach_vm_address_t)end;
  220. if (!end_address || max_address < end_address)
  221. end_address = max_address;
  222. while (address < end_address)
  223. {
  224. mach_vm_address_t hole_address = address;
  225. kern_return_t ret;
  226. mach_vm_size_t size;
  227. vm_region_basic_info_data_64_t info;
  228. mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
  229. mach_port_t dummy_object_name = MACH_PORT_NULL;
  230. /* find the mapped region at or above the current address. */
  231. ret = mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64,
  232. (vm_region_info_t)&info, &count, &dummy_object_name);
  233. if (ret != KERN_SUCCESS)
  234. {
  235. address = max_address;
  236. size = 0;
  237. }
  238. if (end_address < address)
  239. address = end_address;
  240. if (hole_address < address)
  241. {
  242. /* found a hole, attempt to reserve it. */
  243. size_t hole_size = address - hole_address;
  244. mach_vm_address_t alloc_address = hole_address;
  245. ret = mach_vm_map( mach_task_self(), &alloc_address, hole_size, 0, VM_FLAGS_FIXED,
  246. MEMORY_OBJECT_NULL, 0, 0, PROT_NONE, VM_PROT_ALL, VM_INHERIT_COPY );
  247. if (!ret)
  248. wine_mmap_add_reserved_area_obsolete( (void*)hole_address, hole_size );
  249. else if (ret == KERN_NO_SPACE)
  250. {
  251. /* something filled (part of) the hole before we could.
  252. go back and look again. */
  253. address = hole_address;
  254. continue;
  255. }
  256. }
  257. address += size;
  258. }
  259. }
  260. #else
  261. /***********************************************************************
  262. * mmap_reserve
  263. *
  264. * mmap wrapper used for reservations, only maps the specified address
  265. */
  266. static inline int mmap_reserve( void *addr, size_t size )
  267. {
  268. void *ptr;
  269. int flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
  270. #ifdef MAP_TRYFIXED
  271. flags |= MAP_TRYFIXED;
  272. #elif defined(__APPLE__)
  273. return try_mmap_fixed( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
  274. #endif
  275. ptr = mmap( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
  276. if (ptr != addr && ptr != (void *)-1) munmap( ptr, size );
  277. return (ptr == addr);
  278. }
  279. /***********************************************************************
  280. * reserve_area
  281. *
  282. * Reserve as much memory as possible in the given area.
  283. */
  284. static inline void reserve_area( void *addr, void *end )
  285. {
  286. size_t size = (char *)end - (char *)addr;
  287. #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
  288. /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
  289. /* we assume no other thread is running at this point */
  290. size_t i, pagesize = sysconf( _SC_PAGESIZE );
  291. char vec;
  292. while (size)
  293. {
  294. for (i = 0; i < size; i += pagesize)
  295. if (mincore( (caddr_t)addr + i, pagesize, &vec ) != -1) break;
  296. i &= ~granularity_mask;
  297. if (i && mmap( addr, i, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
  298. get_fdzero(), 0 ) != (void *)-1)
  299. wine_mmap_add_reserved_area_obsolete( addr, i );
  300. i += granularity_mask + 1;
  301. if ((char *)addr + i < (char *)addr) break; /* overflow */
  302. addr = (char *)addr + i;
  303. if (addr >= end) break;
  304. size = (char *)end - (char *)addr;
  305. }
  306. #else
  307. if (!size) return;
  308. if (mmap_reserve( addr, size ))
  309. {
  310. wine_mmap_add_reserved_area_obsolete( addr, size );
  311. return;
  312. }
  313. size = (size / 2) & ~granularity_mask;
  314. if (size)
  315. {
  316. reserve_area( addr, (char *)addr + size );
  317. reserve_area( (char *)addr + size, end );
  318. }
  319. #endif
  320. }
  321. #endif /* __APPLE__ */
  322. #ifdef __i386__
  323. /***********************************************************************
  324. * reserve_malloc_space
  325. *
  326. * Solaris malloc is not smart enough to obtain space through mmap(), so try to make
  327. * sure that there is some available sbrk() space before we reserve other things.
  328. */
  329. static inline void reserve_malloc_space( size_t size )
  330. {
  331. #ifdef __sun
  332. size_t i, count = size / 1024;
  333. void **ptrs = malloc( count * sizeof(ptrs[0]) );
  334. if (!ptrs) return;
  335. for (i = 0; i < count; i++) if (!(ptrs[i] = malloc( 1024 ))) break;
  336. if (i--) /* free everything except the last one */
  337. while (i) free( ptrs[--i] );
  338. free( ptrs );
  339. #endif
  340. }
  341. /***********************************************************************
  342. * reserve_dos_area
  343. *
  344. * Reserve the DOS area (0x00000000-0x00110000).
  345. */
  346. static inline void reserve_dos_area(void)
  347. {
  348. const size_t first_page = 0x1000;
  349. const size_t dos_area_size = 0x110000;
  350. void *ptr;
  351. /* first page has to be handled specially */
  352. ptr = wine_anon_mmap( (void *)first_page, dos_area_size - first_page, PROT_NONE, MAP_NORESERVE );
  353. if (ptr != (void *)first_page)
  354. {
  355. if (ptr != (void *)-1) munmap( ptr, dos_area_size - first_page );
  356. return;
  357. }
  358. /* now add first page with MAP_FIXED */
  359. wine_anon_mmap( NULL, first_page, PROT_NONE, MAP_NORESERVE|MAP_FIXED );
  360. wine_mmap_add_reserved_area_obsolete( NULL, dos_area_size );
  361. }
  362. #endif
  363. /***********************************************************************
  364. * mmap_init
  365. */
  366. void mmap_init(void)
  367. {
  368. #ifdef __i386__
  369. struct reserved_area *area;
  370. struct list *ptr;
  371. #ifndef __APPLE__
  372. char stack;
  373. char * const stack_ptr = &stack;
  374. #endif
  375. char *user_space_limit = (char *)0x7ffe0000;
  376. reserve_malloc_space( 8 * 1024 * 1024 );
  377. if (!list_head( &reserved_areas ))
  378. {
  379. /* if we don't have a preloader, try to reserve some space below 2Gb */
  380. reserve_area( (void *)0x00110000, (void *)0x40000000 );
  381. }
  382. /* check for a reserved area starting at the user space limit */
  383. /* to avoid wasting time trying to allocate it again */
  384. LIST_FOR_EACH( ptr, &reserved_areas )
  385. {
  386. area = LIST_ENTRY( ptr, struct reserved_area, entry );
  387. if ((char *)area->base > user_space_limit) break;
  388. if ((char *)area->base + area->size > user_space_limit)
  389. {
  390. user_space_limit = (char *)area->base + area->size;
  391. break;
  392. }
  393. }
  394. #ifndef __APPLE__
  395. if (stack_ptr >= user_space_limit)
  396. {
  397. char *end = 0;
  398. char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
  399. if (base > user_space_limit) reserve_area( user_space_limit, base );
  400. base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
  401. #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
  402. /* Heuristic: assume the stack is near the end of the address */
  403. /* space, this avoids a lot of futile allocation attempts */
  404. end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
  405. #endif
  406. reserve_area( base, end );
  407. }
  408. else
  409. #endif
  410. reserve_area( user_space_limit, 0 );
  411. /* reserve the DOS area if not already done */
  412. ptr = list_head( &reserved_areas );
  413. if (ptr)
  414. {
  415. area = LIST_ENTRY( ptr, struct reserved_area, entry );
  416. if (!area->base) return; /* already reserved */
  417. }
  418. reserve_dos_area();
  419. #elif defined(__x86_64__) || defined(__aarch64__)
  420. if (!list_head( &reserved_areas ))
  421. {
  422. /* if we don't have a preloader, try to reserve the space now */
  423. reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
  424. reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
  425. reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
  426. }
  427. #endif
  428. }
  429. /***********************************************************************
  430. * wine_mmap_add_reserved_area
  431. *
  432. * Add an address range to the list of reserved areas.
  433. * Caller must have made sure the range is not used by anything else.
  434. *
  435. * Note: the reserved areas functions are not reentrant, caller is
  436. * responsible for proper locking.
  437. */
  438. void wine_mmap_add_reserved_area_obsolete( void *addr, size_t size )
  439. {
  440. struct reserved_area *area;
  441. struct list *ptr;
  442. if (!((char *)addr + size)) size--; /* avoid wrap-around */
  443. LIST_FOR_EACH( ptr, &reserved_areas )
  444. {
  445. area = LIST_ENTRY( ptr, struct reserved_area, entry );
  446. if (area->base > addr)
  447. {
  448. /* try to merge with the next one */
  449. if ((char *)addr + size == (char *)area->base)
  450. {
  451. area->base = addr;
  452. area->size += size;
  453. return;
  454. }
  455. break;
  456. }
  457. else if ((char *)area->base + area->size == (char *)addr)
  458. {
  459. /* merge with the previous one */
  460. area->size += size;
  461. /* try to merge with the next one too */
  462. if ((ptr = list_next( &reserved_areas, ptr )))
  463. {
  464. struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
  465. if ((char *)addr + size == (char *)next->base)
  466. {
  467. area->size += next->size;
  468. list_remove( &next->entry );
  469. free( next );
  470. }
  471. }
  472. return;
  473. }
  474. }
  475. if ((area = malloc( sizeof(*area) )))
  476. {
  477. area->base = addr;
  478. area->size = size;
  479. list_add_before( ptr, &area->entry );
  480. }
  481. }
  482. /***********************************************************************
  483. * wine_mmap_remove_reserved_area
  484. *
  485. * Remove an address range from the list of reserved areas.
  486. * If 'unmap' is non-zero the range is unmapped too.
  487. *
  488. * Note: the reserved areas functions are not reentrant, caller is
  489. * responsible for proper locking.
  490. */
  491. void wine_mmap_remove_reserved_area_obsolete( void *addr, size_t size, int unmap )
  492. {
  493. struct reserved_area *area;
  494. struct list *ptr;
  495. if (!((char *)addr + size)) size--; /* avoid wrap-around */
  496. ptr = list_head( &reserved_areas );
  497. /* find the first area covering address */
  498. while (ptr)
  499. {
  500. area = LIST_ENTRY( ptr, struct reserved_area, entry );
  501. if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
  502. if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
  503. {
  504. if (area->base >= addr)
  505. {
  506. if ((char *)area->base + area->size > (char *)addr + size)
  507. {
  508. /* range overlaps beginning of area only -> shrink area */
  509. if (unmap) munmap( area->base, (char *)addr + size - (char *)area->base );
  510. area->size -= (char *)addr + size - (char *)area->base;
  511. area->base = (char *)addr + size;
  512. break;
  513. }
  514. else
  515. {
  516. /* range contains the whole area -> remove area completely */
  517. ptr = list_next( &reserved_areas, ptr );
  518. if (unmap) munmap( area->base, area->size );
  519. list_remove( &area->entry );
  520. free( area );
  521. continue;
  522. }
  523. }
  524. else
  525. {
  526. if ((char *)area->base + area->size > (char *)addr + size)
  527. {
  528. /* range is in the middle of area -> split area in two */
  529. struct reserved_area *new_area = malloc( sizeof(*new_area) );
  530. if (new_area)
  531. {
  532. new_area->base = (char *)addr + size;
  533. new_area->size = (char *)area->base + area->size - (char *)new_area->base;
  534. list_add_after( ptr, &new_area->entry );
  535. }
  536. else size = (char *)area->base + area->size - (char *)addr;
  537. area->size = (char *)addr - (char *)area->base;
  538. if (unmap) munmap( addr, size );
  539. break;
  540. }
  541. else
  542. {
  543. /* range overlaps end of area only -> shrink area */
  544. if (unmap) munmap( addr, (char *)area->base + area->size - (char *)addr );
  545. area->size = (char *)addr - (char *)area->base;
  546. }
  547. }
  548. }
  549. ptr = list_next( &reserved_areas, ptr );
  550. }
  551. }
  552. /***********************************************************************
  553. * wine_mmap_is_in_reserved_area
  554. *
  555. * Check if the specified range is included in a reserved area.
  556. * Returns 1 if range is fully included, 0 if range is not included
  557. * at all, and -1 if it is only partially included.
  558. *
  559. * Note: the reserved areas functions are not reentrant, caller is
  560. * responsible for proper locking.
  561. */
  562. int wine_mmap_is_in_reserved_area_obsolete( void *addr, size_t size )
  563. {
  564. struct reserved_area *area;
  565. struct list *ptr;
  566. LIST_FOR_EACH( ptr, &reserved_areas )
  567. {
  568. area = LIST_ENTRY( ptr, struct reserved_area, entry );
  569. if (area->base > addr) break;
  570. if ((char *)area->base + area->size <= (char *)addr) continue;
  571. /* area must contain block completely */
  572. if ((char *)area->base + area->size < (char *)addr + size) return -1;
  573. return 1;
  574. }
  575. return 0;
  576. }
  577. /***********************************************************************
  578. * wine_mmap_enum_reserved_areas
  579. *
  580. * Enumerate the list of reserved areas, sorted by addresses.
  581. * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
  582. *
  583. * Note: the reserved areas functions are not reentrant, caller is
  584. * responsible for proper locking.
  585. */
  586. int wine_mmap_enum_reserved_areas_obsolete( int (*enum_func)(void *base, size_t size, void *arg), void *arg,
  587. int top_down )
  588. {
  589. int ret = 0;
  590. struct list *ptr;
  591. if (top_down)
  592. {
  593. for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
  594. {
  595. struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
  596. if ((ret = enum_func( area->base, area->size, arg ))) break;
  597. }
  598. }
  599. else
  600. {
  601. for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
  602. {
  603. struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
  604. if ((ret = enum_func( area->base, area->size, arg ))) break;
  605. }
  606. }
  607. return ret;
  608. }
  609. __ASM_OBSOLETE(wine_mmap_add_reserved_area);
  610. __ASM_OBSOLETE(wine_mmap_remove_reserved_area);
  611. __ASM_OBSOLETE(wine_mmap_is_in_reserved_area);
  612. __ASM_OBSOLETE(wine_mmap_enum_reserved_areas);
  613. #endif /* __ASM_OBSOLETE */