mapping.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /*
  2. * Server-side file mapping management
  3. *
  4. * Copyright (C) 1999 Alexandre Julliard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. */
  20. #include "config.h"
  21. #include "wine/port.h"
  22. #include <assert.h>
  23. #include <stdarg.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <sys/stat.h>
  27. #ifdef HAVE_SYS_MMAN_H
  28. # include <sys/mman.h>
  29. #endif
  30. #ifdef HAVE_SYS_SYSCALL_H
  31. # include <sys/syscall.h>
  32. #endif
  33. #include <unistd.h>
  34. #if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
  35. /* __NR_memfd_create might not yet be available when buildservers use an old kernel */
  36. #ifndef __NR_memfd_create
  37. #ifdef __x86_64__
  38. #define __NR_memfd_create 319
  39. #else
  40. #define __NR_memfd_create 356
  41. #endif
  42. #endif
  43. /* the following declarations are only available in linux/fcntl.h, but not fcntl.h */
  44. #define F_LINUX_SPECIFIC_BASE 1024
  45. #define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
  46. #define MFD_ALLOW_SEALING 0x0002U
  47. #define F_SEAL_SEAL 0x0001
  48. #define F_SEAL_SHRINK 0x0002
  49. #define F_SEAL_GROW 0x0004
  50. #endif
  51. #include "ntstatus.h"
  52. #define WIN32_NO_STATUS
  53. #include "windef.h"
  54. #include "winternl.h"
  55. #include "file.h"
  56. #include "handle.h"
  57. #include "thread.h"
  58. #include "process.h"
  59. #include "request.h"
  60. #include "security.h"
  61. /* list of memory ranges, used to store committed info */
  62. struct ranges
  63. {
  64. struct object obj; /* object header */
  65. unsigned int count; /* number of used ranges */
  66. unsigned int max; /* number of allocated ranges */
  67. struct range
  68. {
  69. file_pos_t start;
  70. file_pos_t end;
  71. } *ranges;
  72. };
  73. static void ranges_dump( struct object *obj, int verbose );
  74. static void ranges_destroy( struct object *obj );
  75. static const struct object_ops ranges_ops =
  76. {
  77. sizeof(struct ranges), /* size */
  78. ranges_dump, /* dump */
  79. no_get_type, /* get_type */
  80. no_add_queue, /* add_queue */
  81. NULL, /* remove_queue */
  82. NULL, /* signaled */
  83. NULL, /* get_esync_fd */
  84. NULL, /* satisfied */
  85. no_signal, /* signal */
  86. no_get_fd, /* get_fd */
  87. no_map_access, /* map_access */
  88. default_get_sd, /* get_sd */
  89. default_set_sd, /* set_sd */
  90. no_lookup_name, /* lookup_name */
  91. no_link_name, /* link_name */
  92. NULL, /* unlink_name */
  93. no_open_file, /* open_file */
  94. no_kernel_obj_list, /* get_kernel_obj_list */
  95. no_alloc_handle, /* alloc_handle */
  96. no_close_handle, /* close_handle */
  97. ranges_destroy /* destroy */
  98. };
  99. /* file backing the shared sections of a PE image mapping */
  100. struct shared_map
  101. {
  102. struct object obj; /* object header */
  103. struct fd *fd; /* file descriptor of the mapped PE file */
  104. struct file *file; /* temp file holding the shared data */
  105. struct list entry; /* entry in global shared maps list */
  106. };
  107. static void shared_map_dump( struct object *obj, int verbose );
  108. static void shared_map_destroy( struct object *obj );
  109. static const struct object_ops shared_map_ops =
  110. {
  111. sizeof(struct shared_map), /* size */
  112. shared_map_dump, /* dump */
  113. no_get_type, /* get_type */
  114. no_add_queue, /* add_queue */
  115. NULL, /* remove_queue */
  116. NULL, /* signaled */
  117. NULL, /* get_esync_fd */
  118. NULL, /* satisfied */
  119. no_signal, /* signal */
  120. no_get_fd, /* get_fd */
  121. no_map_access, /* map_access */
  122. default_get_sd, /* get_sd */
  123. default_set_sd, /* set_sd */
  124. no_lookup_name, /* lookup_name */
  125. no_link_name, /* link_name */
  126. NULL, /* unlink_name */
  127. no_open_file, /* open_file */
  128. no_kernel_obj_list, /* get_kernel_obj_list */
  129. no_alloc_handle, /* alloc_handle */
  130. no_close_handle, /* close_handle */
  131. shared_map_destroy /* destroy */
  132. };
  133. static struct list shared_map_list = LIST_INIT( shared_map_list );
  134. /* memory view mapped in client address space */
  135. struct memory_view
  136. {
  137. struct list entry; /* entry in per-process view list */
  138. struct fd *fd; /* fd for mapped file */
  139. struct ranges *committed; /* list of committed ranges in this mapping */
  140. struct shared_map *shared; /* temp file for shared PE mapping */
  141. unsigned int flags; /* SEC_* flags */
  142. client_ptr_t base; /* view base address (in process addr space) */
  143. mem_size_t size; /* view size */
  144. file_pos_t start; /* start offset in mapping */
  145. };
  146. struct mapping
  147. {
  148. struct object obj; /* object header */
  149. mem_size_t size; /* mapping size */
  150. unsigned int flags; /* SEC_* flags */
  151. struct fd *fd; /* fd for mapped file */
  152. pe_image_info_t image; /* image info (for PE image mapping) */
  153. struct ranges *committed; /* list of committed ranges in this mapping */
  154. struct shared_map *shared; /* temp file for shared PE mapping */
  155. };
  156. static void mapping_dump( struct object *obj, int verbose );
  157. static struct object_type *mapping_get_type( struct object *obj );
  158. static struct fd *mapping_get_fd( struct object *obj );
  159. static unsigned int mapping_map_access( struct object *obj, unsigned int access );
  160. static void mapping_destroy( struct object *obj );
  161. static enum server_fd_type mapping_get_fd_type( struct fd *fd );
  162. static const struct object_ops mapping_ops =
  163. {
  164. sizeof(struct mapping), /* size */
  165. mapping_dump, /* dump */
  166. mapping_get_type, /* get_type */
  167. no_add_queue, /* add_queue */
  168. NULL, /* remove_queue */
  169. NULL, /* signaled */
  170. NULL, /* get_esync_fd */
  171. NULL, /* satisfied */
  172. no_signal, /* signal */
  173. mapping_get_fd, /* get_fd */
  174. mapping_map_access, /* map_access */
  175. default_get_sd, /* get_sd */
  176. default_set_sd, /* set_sd */
  177. no_lookup_name, /* lookup_name */
  178. directory_link_name, /* link_name */
  179. default_unlink_name, /* unlink_name */
  180. no_open_file, /* open_file */
  181. no_kernel_obj_list, /* get_kernel_obj_list */
  182. no_alloc_handle, /* alloc_handle */
  183. fd_close_handle, /* close_handle */
  184. mapping_destroy /* destroy */
  185. };
  186. static const struct fd_ops mapping_fd_ops =
  187. {
  188. default_fd_get_poll_events, /* get_poll_events */
  189. default_poll_event, /* poll_event */
  190. mapping_get_fd_type, /* get_fd_type */
  191. no_fd_read, /* read */
  192. no_fd_write, /* write */
  193. no_fd_flush, /* flush */
  194. no_fd_get_file_info, /* get_file_info */
  195. no_fd_get_volume_info, /* get_volume_info */
  196. no_fd_ioctl, /* ioctl */
  197. no_fd_queue_async, /* queue_async */
  198. default_fd_reselect_async /* reselect_async */
  199. };
  200. static size_t page_mask;
  201. /* global shared memory */
  202. shmglobal_t *shmglobal;
  203. int shmglobal_fd;
  204. #define ROUND_SIZE(size) (((size) + page_mask) & ~page_mask)
  205. static void ranges_dump( struct object *obj, int verbose )
  206. {
  207. struct ranges *ranges = (struct ranges *)obj;
  208. fprintf( stderr, "Memory ranges count=%u\n", ranges->count );
  209. }
  210. static void ranges_destroy( struct object *obj )
  211. {
  212. struct ranges *ranges = (struct ranges *)obj;
  213. free( ranges->ranges );
  214. }
  215. static void shared_map_dump( struct object *obj, int verbose )
  216. {
  217. struct shared_map *shared = (struct shared_map *)obj;
  218. fprintf( stderr, "Shared mapping fd=%p file=%p\n", shared->fd, shared->file );
  219. }
  220. static void shared_map_destroy( struct object *obj )
  221. {
  222. struct shared_map *shared = (struct shared_map *)obj;
  223. release_object( shared->fd );
  224. release_object( shared->file );
  225. list_remove( &shared->entry );
  226. }
  227. /* extend a file beyond the current end of file */
  228. static int grow_file( int unix_fd, file_pos_t new_size )
  229. {
  230. static const char zero;
  231. off_t size = new_size;
  232. if (sizeof(new_size) > sizeof(size) && size != new_size)
  233. {
  234. set_error( STATUS_INVALID_PARAMETER );
  235. return 0;
  236. }
  237. /* extend the file one byte beyond the requested size and then truncate it */
  238. /* this should work around ftruncate implementations that can't extend files */
  239. if (pwrite( unix_fd, &zero, 1, size ) != -1)
  240. {
  241. ftruncate( unix_fd, size );
  242. return 1;
  243. }
  244. file_set_error();
  245. return 0;
  246. }
  247. /* check if the current directory allows exec mappings */
  248. static int check_current_dir_for_exec(void)
  249. {
  250. int fd;
  251. char tmpfn[] = "anonmap.XXXXXX";
  252. void *ret = MAP_FAILED;
  253. fd = mkstemps( tmpfn, 0 );
  254. if (fd == -1) return 0;
  255. if (grow_file( fd, 1 ))
  256. {
  257. ret = mmap( NULL, get_page_size(), PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0 );
  258. if (ret != MAP_FAILED) munmap( ret, get_page_size() );
  259. }
  260. close( fd );
  261. unlink( tmpfn );
  262. return (ret != MAP_FAILED);
  263. }
  264. /* allocates a block of shared memory */
  265. int allocate_shared_memory( int *fd, void **memory, size_t size )
  266. {
  267. #if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
  268. void *shm_mem;
  269. int shm_fd;
  270. shm_fd = syscall( __NR_memfd_create, "wineserver_shm", MFD_ALLOW_SEALING );
  271. if (shm_fd == -1) goto err;
  272. if (grow_file( shm_fd, size ))
  273. {
  274. if (fcntl( shm_fd, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_SHRINK | F_SEAL_GROW ) >= 0)
  275. {
  276. shm_mem = mmap( 0, size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0 );
  277. if (shm_mem != MAP_FAILED)
  278. {
  279. memset( shm_mem, 0, size );
  280. *fd = shm_fd;
  281. *memory = shm_mem;
  282. return 1;
  283. }
  284. }
  285. }
  286. close( shm_fd );
  287. err:
  288. #endif
  289. *memory = NULL;
  290. *fd = -1;
  291. return 0;
  292. }
  293. /* releases a block of shared memory */
  294. void release_shared_memory( int fd, void *memory, size_t size )
  295. {
  296. #if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
  297. if (memory) munmap( memory, size );
  298. if (fd != -1) close( fd );
  299. #endif
  300. }
  301. /* intialize shared memory management */
  302. void init_shared_memory( void )
  303. {
  304. allocate_shared_memory( &shmglobal_fd, (void **)&shmglobal, sizeof(*shmglobal) );
  305. }
  306. /* create a temp file for anonymous mappings */
  307. static int create_temp_file( file_pos_t size )
  308. {
  309. static int temp_dir_fd = -1;
  310. char tmpfn[] = "anonmap.XXXXXX";
  311. int fd;
  312. if (temp_dir_fd == -1)
  313. {
  314. temp_dir_fd = server_dir_fd;
  315. if (!check_current_dir_for_exec())
  316. {
  317. /* the server dir is noexec, try the config dir instead */
  318. fchdir( config_dir_fd );
  319. if (check_current_dir_for_exec())
  320. temp_dir_fd = config_dir_fd;
  321. else /* neither works, fall back to server dir */
  322. fchdir( server_dir_fd );
  323. }
  324. }
  325. else if (temp_dir_fd != server_dir_fd) fchdir( temp_dir_fd );
  326. fd = mkstemps( tmpfn, 0 );
  327. if (fd != -1)
  328. {
  329. if (!grow_file( fd, size ))
  330. {
  331. close( fd );
  332. fd = -1;
  333. }
  334. unlink( tmpfn );
  335. }
  336. else file_set_error();
  337. if (temp_dir_fd != server_dir_fd) fchdir( server_dir_fd );
  338. return fd;
  339. }
  340. /* find a memory view from its base address */
  341. static struct memory_view *find_mapped_view( struct process *process, client_ptr_t base )
  342. {
  343. struct memory_view *view;
  344. LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
  345. if (view->base == base) return view;
  346. set_error( STATUS_NOT_MAPPED_VIEW );
  347. return NULL;
  348. }
  349. static void free_memory_view( struct memory_view *view )
  350. {
  351. if (view->fd) release_object( view->fd );
  352. if (view->committed) release_object( view->committed );
  353. if (view->shared) release_object( view->shared );
  354. list_remove( &view->entry );
  355. free( view );
  356. }
  357. /* free all mapped views at process exit */
  358. void free_mapped_views( struct process *process )
  359. {
  360. struct list *ptr;
  361. while ((ptr = list_head( &process->views )))
  362. free_memory_view( LIST_ENTRY( ptr, struct memory_view, entry ));
  363. }
  364. /* find the shared PE mapping for a given mapping */
  365. static struct shared_map *get_shared_file( struct fd *fd )
  366. {
  367. struct shared_map *ptr;
  368. LIST_FOR_EACH_ENTRY( ptr, &shared_map_list, struct shared_map, entry )
  369. if (is_same_file_fd( ptr->fd, fd ))
  370. return (struct shared_map *)grab_object( ptr );
  371. return NULL;
  372. }
  373. /* return the size of the memory mapping and file range of a given section */
  374. static inline void get_section_sizes( const IMAGE_SECTION_HEADER *sec, size_t *map_size,
  375. off_t *file_start, size_t *file_size )
  376. {
  377. static const unsigned int sector_align = 0x1ff;
  378. if (!sec->Misc.VirtualSize) *map_size = ROUND_SIZE( sec->SizeOfRawData );
  379. else *map_size = ROUND_SIZE( sec->Misc.VirtualSize );
  380. *file_start = sec->PointerToRawData & ~sector_align;
  381. *file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
  382. if (*file_size > *map_size) *file_size = *map_size;
  383. }
  384. /* add a range to the committed list */
  385. static void add_committed_range( struct memory_view *view, file_pos_t start, file_pos_t end )
  386. {
  387. unsigned int i, j;
  388. struct ranges *committed = view->committed;
  389. struct range *ranges;
  390. if ((start & page_mask) || (end & page_mask) ||
  391. start >= view->size || end >= view->size ||
  392. start >= end)
  393. {
  394. set_error( STATUS_INVALID_PARAMETER );
  395. return;
  396. }
  397. if (!committed) return; /* everything committed already */
  398. start += view->start;
  399. end += view->start;
  400. for (i = 0, ranges = committed->ranges; i < committed->count; i++)
  401. {
  402. if (ranges[i].start > end) break;
  403. if (ranges[i].end < start) continue;
  404. if (ranges[i].start > start) ranges[i].start = start; /* extend downwards */
  405. if (ranges[i].end < end) /* extend upwards and maybe merge with next */
  406. {
  407. for (j = i + 1; j < committed->count; j++)
  408. {
  409. if (ranges[j].start > end) break;
  410. if (ranges[j].end > end) end = ranges[j].end;
  411. }
  412. if (j > i + 1)
  413. {
  414. memmove( &ranges[i + 1], &ranges[j], (committed->count - j) * sizeof(*ranges) );
  415. committed->count -= j - (i + 1);
  416. }
  417. ranges[i].end = end;
  418. }
  419. return;
  420. }
  421. /* now add a new range */
  422. if (committed->count == committed->max)
  423. {
  424. unsigned int new_size = committed->max * 2;
  425. struct range *new_ptr = realloc( committed->ranges, new_size * sizeof(*new_ptr) );
  426. if (!new_ptr) return;
  427. committed->max = new_size;
  428. ranges = committed->ranges = new_ptr;
  429. }
  430. memmove( &ranges[i + 1], &ranges[i], (committed->count - i) * sizeof(*ranges) );
  431. ranges[i].start = start;
  432. ranges[i].end = end;
  433. committed->count++;
  434. }
  435. /* find the range containing start and return whether it's committed */
  436. static int find_committed_range( struct memory_view *view, file_pos_t start, mem_size_t *size )
  437. {
  438. unsigned int i;
  439. struct ranges *committed = view->committed;
  440. struct range *ranges;
  441. if ((start & page_mask) || start >= view->size)
  442. {
  443. set_error( STATUS_INVALID_PARAMETER );
  444. return 0;
  445. }
  446. if (!committed) /* everything is committed */
  447. {
  448. *size = view->size - start;
  449. return 1;
  450. }
  451. for (i = 0, ranges = committed->ranges; i < committed->count; i++)
  452. {
  453. if (ranges[i].start > view->start + start)
  454. {
  455. *size = min( ranges[i].start, view->start + view->size ) - (view->start + start);
  456. return 0;
  457. }
  458. if (ranges[i].end > view->start + start)
  459. {
  460. *size = min( ranges[i].end, view->start + view->size ) - (view->start + start);
  461. return 1;
  462. }
  463. }
  464. *size = view->size - start;
  465. return 0;
  466. }
  467. /* allocate and fill the temp file for a shared PE image mapping */
  468. static int build_shared_mapping( struct mapping *mapping, int fd,
  469. IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
  470. {
  471. struct shared_map *shared;
  472. struct file *file;
  473. unsigned int i;
  474. mem_size_t total_size;
  475. size_t file_size, map_size, max_size;
  476. off_t shared_pos, read_pos, write_pos;
  477. char *buffer = NULL;
  478. int shared_fd;
  479. long toread;
  480. /* compute the total size of the shared mapping */
  481. total_size = max_size = 0;
  482. for (i = 0; i < nb_sec; i++)
  483. {
  484. if ((sec[i].Characteristics & IMAGE_SCN_MEM_SHARED) &&
  485. (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE))
  486. {
  487. get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
  488. if (file_size > max_size) max_size = file_size;
  489. total_size += map_size;
  490. }
  491. }
  492. if (!total_size) return 1; /* nothing to do */
  493. if ((mapping->shared = get_shared_file( mapping->fd ))) return 1;
  494. /* create a temp file for the mapping */
  495. if ((shared_fd = create_temp_file( total_size )) == -1) return 0;
  496. if (!(file = create_file_for_fd( shared_fd, FILE_GENERIC_READ|FILE_GENERIC_WRITE, 0 ))) return 0;
  497. if (!(buffer = malloc( max_size ))) goto error;
  498. /* copy the shared sections data into the temp file */
  499. shared_pos = 0;
  500. for (i = 0; i < nb_sec; i++)
  501. {
  502. if (!(sec[i].Characteristics & IMAGE_SCN_MEM_SHARED)) continue;
  503. if (!(sec[i].Characteristics & IMAGE_SCN_MEM_WRITE)) continue;
  504. get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
  505. write_pos = shared_pos;
  506. shared_pos += map_size;
  507. if (!sec[i].PointerToRawData || !file_size) continue;
  508. toread = file_size;
  509. while (toread)
  510. {
  511. long res = pread( fd, buffer + file_size - toread, toread, read_pos );
  512. if (!res && toread < 0x200) /* partial sector at EOF is not an error */
  513. {
  514. file_size -= toread;
  515. break;
  516. }
  517. if (res <= 0) goto error;
  518. toread -= res;
  519. read_pos += res;
  520. }
  521. if (pwrite( shared_fd, buffer, file_size, write_pos ) != file_size) goto error;
  522. }
  523. if (!(shared = alloc_object( &shared_map_ops ))) goto error;
  524. shared->fd = (struct fd *)grab_object( mapping->fd );
  525. shared->file = file;
  526. list_add_head( &shared_map_list, &shared->entry );
  527. mapping->shared = shared;
  528. free( buffer );
  529. return 1;
  530. error:
  531. release_object( file );
  532. free( buffer );
  533. return 0;
  534. }
  535. /* load the CLR header from its section */
  536. static int load_clr_header( IMAGE_COR20_HEADER *hdr, size_t va, size_t size, int unix_fd,
  537. IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
  538. {
  539. ssize_t ret;
  540. size_t map_size, file_size;
  541. off_t file_start;
  542. unsigned int i;
  543. if (!va || !size) return 0;
  544. for (i = 0; i < nb_sec; i++)
  545. {
  546. if (va < sec[i].VirtualAddress) continue;
  547. if (sec[i].Misc.VirtualSize && va - sec[i].VirtualAddress >= sec[i].Misc.VirtualSize) continue;
  548. get_section_sizes( &sec[i], &map_size, &file_start, &file_size );
  549. if (size >= map_size) continue;
  550. if (va - sec[i].VirtualAddress >= map_size - size) continue;
  551. file_size = min( file_size, map_size );
  552. size = min( size, sizeof(*hdr) );
  553. ret = pread( unix_fd, hdr, min( size, file_size ), file_start + va - sec[i].VirtualAddress );
  554. if (ret <= 0) break;
  555. if (ret < sizeof(*hdr)) memset( (char *)hdr + ret, 0, sizeof(*hdr) - ret );
  556. return (hdr->MajorRuntimeVersion > COR_VERSION_MAJOR_V2 ||
  557. (hdr->MajorRuntimeVersion == COR_VERSION_MAJOR_V2 &&
  558. hdr->MinorRuntimeVersion >= COR_VERSION_MINOR));
  559. }
  560. return 0;
  561. }
  562. /* retrieve the mapping parameters for an executable (PE) image */
  563. static unsigned int get_image_params( struct mapping *mapping, file_pos_t file_size, int unix_fd )
  564. {
  565. static const char builtin_signature[] = "Wine builtin DLL";
  566. static const char fakedll_signature[] = "Wine placeholder DLL";
  567. IMAGE_COR20_HEADER clr;
  568. IMAGE_SECTION_HEADER sec[96];
  569. struct
  570. {
  571. IMAGE_DOS_HEADER dos;
  572. char buffer[32];
  573. } mz;
  574. struct
  575. {
  576. DWORD Signature;
  577. IMAGE_FILE_HEADER FileHeader;
  578. union
  579. {
  580. IMAGE_OPTIONAL_HEADER32 hdr32;
  581. IMAGE_OPTIONAL_HEADER64 hdr64;
  582. } opt;
  583. } nt;
  584. off_t pos;
  585. int size;
  586. size_t mz_size, clr_va, clr_size;
  587. unsigned int i, cpu_mask = get_supported_cpu_mask();
  588. /* load the headers */
  589. if (!file_size) return STATUS_INVALID_FILE_FOR_SECTION;
  590. size = pread( unix_fd, &mz, sizeof(mz), 0 );
  591. if (size < sizeof(mz.dos)) return STATUS_INVALID_IMAGE_NOT_MZ;
  592. if (mz.dos.e_magic != IMAGE_DOS_SIGNATURE) return STATUS_INVALID_IMAGE_NOT_MZ;
  593. mz_size = size;
  594. pos = mz.dos.e_lfanew;
  595. /* zero out Optional header in the case it's not present or partial */
  596. memset( &nt, 0, sizeof(nt) );
  597. size = pread( unix_fd, &nt, sizeof(nt), pos );
  598. if (size < sizeof(nt.Signature) + sizeof(nt.FileHeader)) return STATUS_INVALID_IMAGE_PROTECT;
  599. if (nt.Signature != IMAGE_NT_SIGNATURE)
  600. {
  601. IMAGE_OS2_HEADER *os2 = (IMAGE_OS2_HEADER *)&nt;
  602. if (os2->ne_magic != IMAGE_OS2_SIGNATURE) return STATUS_INVALID_IMAGE_PROTECT;
  603. if (os2->ne_exetyp == 2) return STATUS_INVALID_IMAGE_WIN_16;
  604. if (os2->ne_exetyp == 5) return STATUS_INVALID_IMAGE_PROTECT;
  605. return STATUS_INVALID_IMAGE_NE_FORMAT;
  606. }
  607. switch (nt.opt.hdr32.Magic)
  608. {
  609. case IMAGE_NT_OPTIONAL_HDR32_MAGIC:
  610. /* All fields up to CheckSum are mandatory regardless of SizeOfOptionalHeader value */
  611. size = max( nt.FileHeader.SizeOfOptionalHeader, offsetof(IMAGE_OPTIONAL_HEADER32, CheckSum) );
  612. if (size < sizeof(nt.opt.hdr32)) memset( (char *)&nt.opt.hdr32 + size, 0, sizeof(nt.opt.hdr32) - size );
  613. switch (nt.FileHeader.Machine)
  614. {
  615. case IMAGE_FILE_MACHINE_I386:
  616. mapping->image.cpu = CPU_x86;
  617. if (cpu_mask & (CPU_FLAG(CPU_x86) | CPU_FLAG(CPU_x86_64))) break;
  618. return STATUS_INVALID_IMAGE_FORMAT;
  619. case IMAGE_FILE_MACHINE_ARM:
  620. case IMAGE_FILE_MACHINE_THUMB:
  621. case IMAGE_FILE_MACHINE_ARMNT:
  622. mapping->image.cpu = CPU_ARM;
  623. if (cpu_mask & (CPU_FLAG(CPU_ARM) | CPU_FLAG(CPU_ARM64))) break;
  624. return STATUS_INVALID_IMAGE_FORMAT;
  625. case IMAGE_FILE_MACHINE_POWERPC:
  626. mapping->image.cpu = CPU_POWERPC;
  627. if (cpu_mask & CPU_FLAG(CPU_POWERPC)) break;
  628. return STATUS_INVALID_IMAGE_FORMAT;
  629. default:
  630. return STATUS_INVALID_IMAGE_FORMAT;
  631. }
  632. clr_va = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
  633. clr_size = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
  634. mapping->image.base = nt.opt.hdr32.ImageBase;
  635. mapping->image.entry_point = nt.opt.hdr32.ImageBase + nt.opt.hdr32.AddressOfEntryPoint;
  636. mapping->image.map_size = ROUND_SIZE( nt.opt.hdr32.SizeOfImage );
  637. mapping->image.stack_size = nt.opt.hdr32.SizeOfStackReserve;
  638. mapping->image.stack_commit = nt.opt.hdr32.SizeOfStackCommit;
  639. mapping->image.subsystem = nt.opt.hdr32.Subsystem;
  640. mapping->image.subsystem_low = nt.opt.hdr32.MinorSubsystemVersion;
  641. mapping->image.subsystem_high = nt.opt.hdr32.MajorSubsystemVersion;
  642. mapping->image.dll_charact = nt.opt.hdr32.DllCharacteristics;
  643. mapping->image.contains_code = (nt.opt.hdr32.SizeOfCode ||
  644. nt.opt.hdr32.AddressOfEntryPoint ||
  645. nt.opt.hdr32.SectionAlignment & page_mask);
  646. mapping->image.header_size = nt.opt.hdr32.SizeOfHeaders;
  647. mapping->image.checksum = nt.opt.hdr32.CheckSum;
  648. mapping->image.image_flags = 0;
  649. if (nt.opt.hdr32.SectionAlignment & page_mask)
  650. mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
  651. if ((nt.opt.hdr32.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
  652. mapping->image.contains_code && !(clr_va && clr_size))
  653. mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
  654. break;
  655. case IMAGE_NT_OPTIONAL_HDR64_MAGIC:
  656. /* All fields up to CheckSum are mandatory regardless of SizeOfOptionalHeader value */
  657. size = max( nt.FileHeader.SizeOfOptionalHeader, offsetof(IMAGE_OPTIONAL_HEADER64, CheckSum) );
  658. if (size < sizeof(nt.opt.hdr64)) memset( (char *)&nt.opt.hdr64 + size, 0, sizeof(nt.opt.hdr64) - size );
  659. if (!(cpu_mask & CPU_64BIT_MASK)) return STATUS_INVALID_IMAGE_WIN_64;
  660. switch (nt.FileHeader.Machine)
  661. {
  662. case IMAGE_FILE_MACHINE_AMD64:
  663. mapping->image.cpu = CPU_x86_64;
  664. if (cpu_mask & (CPU_FLAG(CPU_x86) | CPU_FLAG(CPU_x86_64))) break;
  665. return STATUS_INVALID_IMAGE_FORMAT;
  666. case IMAGE_FILE_MACHINE_ARM64:
  667. mapping->image.cpu = CPU_ARM64;
  668. if (cpu_mask & (CPU_FLAG(CPU_ARM) | CPU_FLAG(CPU_ARM64))) break;
  669. return STATUS_INVALID_IMAGE_FORMAT;
  670. default:
  671. return STATUS_INVALID_IMAGE_FORMAT;
  672. }
  673. clr_va = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
  674. clr_size = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
  675. mapping->image.base = nt.opt.hdr64.ImageBase;
  676. mapping->image.entry_point = nt.opt.hdr64.ImageBase + nt.opt.hdr64.AddressOfEntryPoint;
  677. mapping->image.map_size = ROUND_SIZE( nt.opt.hdr64.SizeOfImage );
  678. mapping->image.stack_size = nt.opt.hdr64.SizeOfStackReserve;
  679. mapping->image.stack_commit = nt.opt.hdr64.SizeOfStackCommit;
  680. mapping->image.subsystem = nt.opt.hdr64.Subsystem;
  681. mapping->image.subsystem_low = nt.opt.hdr64.MinorSubsystemVersion;
  682. mapping->image.subsystem_high = nt.opt.hdr64.MajorSubsystemVersion;
  683. mapping->image.dll_charact = nt.opt.hdr64.DllCharacteristics;
  684. mapping->image.contains_code = (nt.opt.hdr64.SizeOfCode ||
  685. nt.opt.hdr64.AddressOfEntryPoint ||
  686. nt.opt.hdr64.SectionAlignment & page_mask);
  687. mapping->image.header_size = nt.opt.hdr64.SizeOfHeaders;
  688. mapping->image.checksum = nt.opt.hdr64.CheckSum;
  689. mapping->image.image_flags = 0;
  690. if (nt.opt.hdr64.SectionAlignment & page_mask)
  691. mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
  692. if ((nt.opt.hdr64.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
  693. mapping->image.contains_code && !(clr_va && clr_size))
  694. mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
  695. break;
  696. default:
  697. return STATUS_INVALID_IMAGE_FORMAT;
  698. }
  699. mapping->image.image_charact = nt.FileHeader.Characteristics;
  700. mapping->image.machine = nt.FileHeader.Machine;
  701. mapping->image.zerobits = 0; /* FIXME */
  702. mapping->image.gp = 0; /* FIXME */
  703. mapping->image.file_size = file_size;
  704. mapping->image.loader_flags = clr_va && clr_size;
  705. mapping->image.__pad = 0;
  706. if (mz_size == sizeof(mz) && !memcmp( mz.buffer, builtin_signature, sizeof(builtin_signature) ))
  707. mapping->image.image_flags |= IMAGE_FLAGS_WineBuiltin;
  708. else if (mz_size == sizeof(mz) && !memcmp( mz.buffer, fakedll_signature, sizeof(fakedll_signature) ))
  709. mapping->image.image_flags |= IMAGE_FLAGS_WineFakeDll;
  710. /* load the section headers */
  711. pos += sizeof(nt.Signature) + sizeof(nt.FileHeader) + nt.FileHeader.SizeOfOptionalHeader;
  712. if (nt.FileHeader.NumberOfSections > ARRAY_SIZE( sec )) return STATUS_INVALID_IMAGE_FORMAT;
  713. size = sizeof(*sec) * nt.FileHeader.NumberOfSections;
  714. if (!mapping->size) mapping->size = mapping->image.map_size;
  715. else if (mapping->size > mapping->image.map_size) return STATUS_SECTION_TOO_BIG;
  716. if (pos + size > mapping->image.map_size) return STATUS_INVALID_FILE_FOR_SECTION;
  717. if (pos + size > mapping->image.header_size) mapping->image.header_size = pos + size;
  718. if (pread( unix_fd, sec, size, pos ) != size) return STATUS_INVALID_FILE_FOR_SECTION;
  719. for (i = 0; i < nt.FileHeader.NumberOfSections && !mapping->image.contains_code; i++)
  720. if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) mapping->image.contains_code = 1;
  721. if (load_clr_header( &clr, clr_va, clr_size, unix_fd, sec, nt.FileHeader.NumberOfSections ) &&
  722. (clr.Flags & COMIMAGE_FLAGS_ILONLY))
  723. {
  724. mapping->image.image_flags |= IMAGE_FLAGS_ComPlusILOnly;
  725. if (nt.opt.hdr32.Magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC &&
  726. !(clr.Flags & COMIMAGE_FLAGS_32BITREQUIRED))
  727. {
  728. mapping->image.image_flags |= IMAGE_FLAGS_ComPlusNativeReady;
  729. if (cpu_mask & CPU_FLAG(CPU_x86_64)) mapping->image.cpu = CPU_x86_64;
  730. else if (cpu_mask & CPU_FLAG(CPU_ARM64)) mapping->image.cpu = CPU_ARM64;
  731. }
  732. }
  733. if (!build_shared_mapping( mapping, unix_fd, sec, nt.FileHeader.NumberOfSections ))
  734. return STATUS_INVALID_FILE_FOR_SECTION;
  735. return STATUS_SUCCESS;
  736. }
  737. static struct ranges *create_ranges(void)
  738. {
  739. struct ranges *ranges = alloc_object( &ranges_ops );
  740. if (!ranges) return NULL;
  741. ranges->count = 0;
  742. ranges->max = 8;
  743. if (!(ranges->ranges = mem_alloc( ranges->max * sizeof(*ranges->ranges) )))
  744. {
  745. release_object( ranges );
  746. return NULL;
  747. }
  748. return ranges;
  749. }
  750. static unsigned int get_mapping_flags( obj_handle_t handle, unsigned int flags )
  751. {
  752. switch (flags & (SEC_IMAGE | SEC_RESERVE | SEC_COMMIT | SEC_FILE))
  753. {
  754. case SEC_IMAGE:
  755. if (flags & (SEC_WRITECOMBINE | SEC_LARGE_PAGES)) break;
  756. if (handle) return SEC_FILE | SEC_IMAGE;
  757. set_error( STATUS_INVALID_FILE_FOR_SECTION );
  758. return 0;
  759. case SEC_COMMIT:
  760. if (!handle) return flags;
  761. /* fall through */
  762. case SEC_RESERVE:
  763. if (flags & SEC_LARGE_PAGES) break;
  764. if (handle) return SEC_FILE | (flags & (SEC_NOCACHE | SEC_WRITECOMBINE));
  765. return flags;
  766. }
  767. set_error( STATUS_INVALID_PARAMETER );
  768. return 0;
  769. }
  770. static struct object *create_mapping( struct object *root, const struct unicode_str *name,
  771. unsigned int attr, mem_size_t size, unsigned int flags,
  772. obj_handle_t handle, unsigned int file_access,
  773. const struct security_descriptor *sd )
  774. {
  775. struct mapping *mapping;
  776. struct file *file;
  777. struct fd *fd;
  778. int unix_fd;
  779. struct stat st;
  780. if (!page_mask) page_mask = sysconf( _SC_PAGESIZE ) - 1;
  781. if (!(mapping = create_named_object( root, &mapping_ops, name, attr, sd )))
  782. return NULL;
  783. if (get_error() == STATUS_OBJECT_NAME_EXISTS)
  784. return &mapping->obj; /* Nothing else to do */
  785. mapping->size = size;
  786. mapping->fd = NULL;
  787. mapping->shared = NULL;
  788. mapping->committed = NULL;
  789. if (!(mapping->flags = get_mapping_flags( handle, flags ))) goto error;
  790. if (handle)
  791. {
  792. const unsigned int sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
  793. unsigned int mapping_access = FILE_MAPPING_ACCESS;
  794. if (!(file = get_file_obj( current->process, handle, file_access ))) goto error;
  795. fd = get_obj_fd( (struct object *)file );
  796. /* file sharing rules for mappings are different so we use magic the access rights */
  797. if (flags & SEC_IMAGE) mapping_access |= FILE_MAPPING_IMAGE;
  798. else if (file_access & FILE_WRITE_DATA) mapping_access |= FILE_MAPPING_WRITE;
  799. if (!(mapping->fd = get_fd_object_for_mapping( fd, mapping_access, sharing )))
  800. {
  801. mapping->fd = dup_fd_object( fd, mapping_access, sharing, FILE_SYNCHRONOUS_IO_NONALERT );
  802. if (mapping->fd) set_fd_user( mapping->fd, &mapping_fd_ops, NULL );
  803. }
  804. release_object( file );
  805. release_object( fd );
  806. if (!mapping->fd) goto error;
  807. if ((unix_fd = get_unix_fd( mapping->fd )) == -1) goto error;
  808. if (fstat( unix_fd, &st ) == -1)
  809. {
  810. file_set_error();
  811. goto error;
  812. }
  813. if (flags & SEC_IMAGE)
  814. {
  815. unsigned int err = get_image_params( mapping, st.st_size, unix_fd );
  816. if (!err) return &mapping->obj;
  817. set_error( err );
  818. goto error;
  819. }
  820. if (!mapping->size)
  821. {
  822. if (!(mapping->size = st.st_size))
  823. {
  824. set_error( STATUS_MAPPED_FILE_SIZE_ZERO );
  825. goto error;
  826. }
  827. }
  828. else if (st.st_size < mapping->size)
  829. {
  830. if (!(file_access & FILE_WRITE_DATA))
  831. {
  832. set_error( STATUS_SECTION_TOO_BIG );
  833. goto error;
  834. }
  835. if (!grow_file( unix_fd, mapping->size )) goto error;
  836. }
  837. }
  838. else /* Anonymous mapping (no associated file) */
  839. {
  840. if (!mapping->size)
  841. {
  842. set_error( STATUS_INVALID_PARAMETER );
  843. goto error;
  844. }
  845. if ((flags & SEC_RESERVE) && !(mapping->committed = create_ranges())) goto error;
  846. mapping->size = (mapping->size + page_mask) & ~((mem_size_t)page_mask);
  847. if ((unix_fd = create_temp_file( mapping->size )) == -1) goto error;
  848. if (!(mapping->fd = create_anonymous_fd( &mapping_fd_ops, unix_fd, &mapping->obj,
  849. FILE_SYNCHRONOUS_IO_NONALERT ))) goto error;
  850. allow_fd_caching( mapping->fd );
  851. }
  852. return &mapping->obj;
  853. error:
  854. release_object( mapping );
  855. return NULL;
  856. }
  857. struct mapping *get_mapping_obj( struct process *process, obj_handle_t handle, unsigned int access )
  858. {
  859. return (struct mapping *)get_handle_obj( process, handle, access, &mapping_ops );
  860. }
  861. /* open a new file for the file descriptor backing the mapping */
  862. struct file *get_mapping_file( struct process *process, client_ptr_t base,
  863. unsigned int access, unsigned int sharing )
  864. {
  865. struct memory_view *view = find_mapped_view( process, base );
  866. if (!view || !view->fd) return NULL;
  867. return create_file_for_fd_obj( view->fd, access, sharing );
  868. }
  869. static void mapping_dump( struct object *obj, int verbose )
  870. {
  871. struct mapping *mapping = (struct mapping *)obj;
  872. assert( obj->ops == &mapping_ops );
  873. fprintf( stderr, "Mapping size=%08x%08x flags=%08x fd=%p shared=%p\n",
  874. (unsigned int)(mapping->size >> 32), (unsigned int)mapping->size,
  875. mapping->flags, mapping->fd, mapping->shared );
  876. }
  877. static struct object_type *mapping_get_type( struct object *obj )
  878. {
  879. static const struct unicode_str str = { type_Section, sizeof(type_Section) };
  880. return get_object_type( &str );
  881. }
  882. static struct fd *mapping_get_fd( struct object *obj )
  883. {
  884. struct mapping *mapping = (struct mapping *)obj;
  885. return (struct fd *)grab_object( mapping->fd );
  886. }
  887. static unsigned int mapping_map_access( struct object *obj, unsigned int access )
  888. {
  889. if (access & GENERIC_READ) access |= STANDARD_RIGHTS_READ | SECTION_QUERY | SECTION_MAP_READ;
  890. if (access & GENERIC_WRITE) access |= STANDARD_RIGHTS_WRITE | SECTION_MAP_WRITE;
  891. if (access & GENERIC_EXECUTE) access |= STANDARD_RIGHTS_EXECUTE | SECTION_MAP_EXECUTE;
  892. if (access & GENERIC_ALL) access |= SECTION_ALL_ACCESS;
  893. return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL);
  894. }
  895. static void mapping_destroy( struct object *obj )
  896. {
  897. struct mapping *mapping = (struct mapping *)obj;
  898. assert( obj->ops == &mapping_ops );
  899. if (mapping->fd) release_object( mapping->fd );
  900. if (mapping->committed) release_object( mapping->committed );
  901. if (mapping->shared) release_object( mapping->shared );
  902. }
  903. static enum server_fd_type mapping_get_fd_type( struct fd *fd )
  904. {
  905. return FD_TYPE_FILE;
  906. }
  907. int get_page_size(void)
  908. {
  909. if (!page_mask) page_mask = sysconf( _SC_PAGESIZE ) - 1;
  910. return page_mask + 1;
  911. }
  912. /* create a file mapping */
  913. DECL_HANDLER(create_mapping)
  914. {
  915. struct object *root, *obj;
  916. struct unicode_str name;
  917. const struct security_descriptor *sd;
  918. const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root );
  919. if (!objattr) return;
  920. if ((obj = create_mapping( root, &name, objattr->attributes, req->size, req->flags,
  921. req->file_handle, req->file_access, sd )))
  922. {
  923. if (get_error() == STATUS_OBJECT_NAME_EXISTS)
  924. reply->handle = alloc_handle( current->process, obj, req->access, objattr->attributes );
  925. else
  926. reply->handle = alloc_handle_no_access_check( current->process, obj,
  927. req->access, objattr->attributes );
  928. release_object( obj );
  929. }
  930. if (root) release_object( root );
  931. }
  932. /* open a handle to a mapping */
  933. DECL_HANDLER(open_mapping)
  934. {
  935. struct unicode_str name = get_req_unicode_str();
  936. reply->handle = open_object( current->process, req->rootdir, req->access,
  937. &mapping_ops, &name, req->attributes );
  938. }
  939. /* get a mapping information */
  940. DECL_HANDLER(get_mapping_info)
  941. {
  942. struct mapping *mapping;
  943. if (!(mapping = get_mapping_obj( current->process, req->handle, req->access ))) return;
  944. reply->size = mapping->size;
  945. reply->flags = mapping->flags;
  946. if (mapping->flags & SEC_IMAGE)
  947. set_reply_data( &mapping->image, min( sizeof(mapping->image), get_reply_max_size() ));
  948. if (!(req->access & (SECTION_MAP_READ | SECTION_MAP_WRITE))) /* query only */
  949. {
  950. release_object( mapping );
  951. return;
  952. }
  953. if (mapping->shared)
  954. reply->shared_file = alloc_handle( current->process, mapping->shared->file,
  955. GENERIC_READ|GENERIC_WRITE, 0 );
  956. release_object( mapping );
  957. }
  958. /* add a memory view in the current process */
  959. DECL_HANDLER(map_view)
  960. {
  961. struct mapping *mapping = NULL;
  962. struct memory_view *view;
  963. if (!req->size || (req->base & page_mask) || req->base + req->size < req->base) /* overflow */
  964. {
  965. set_error( STATUS_INVALID_PARAMETER );
  966. return;
  967. }
  968. /* make sure we don't already have an overlapping view */
  969. LIST_FOR_EACH_ENTRY( view, &current->process->views, struct memory_view, entry )
  970. {
  971. if (view->base + view->size <= req->base) continue;
  972. if (view->base >= req->base + req->size) continue;
  973. set_error( STATUS_INVALID_PARAMETER );
  974. return;
  975. }
  976. if (!(mapping = get_mapping_obj( current->process, req->mapping, req->access ))) return;
  977. if (mapping->flags & SEC_IMAGE)
  978. {
  979. if (req->start || req->size > mapping->image.map_size)
  980. {
  981. set_error( STATUS_INVALID_PARAMETER );
  982. goto done;
  983. }
  984. }
  985. else if (req->start >= mapping->size ||
  986. req->start + req->size < req->start ||
  987. req->start + req->size > ((mapping->size + page_mask) & ~(mem_size_t)page_mask))
  988. {
  989. set_error( STATUS_INVALID_PARAMETER );
  990. goto done;
  991. }
  992. if ((view = mem_alloc( sizeof(*view) )))
  993. {
  994. view->base = req->base;
  995. view->size = req->size;
  996. view->start = req->start;
  997. view->flags = mapping->flags;
  998. view->fd = !is_fd_removable( mapping->fd ) ? (struct fd *)grab_object( mapping->fd ) : NULL;
  999. view->committed = mapping->committed ? (struct ranges *)grab_object( mapping->committed ) : NULL;
  1000. view->shared = mapping->shared ? (struct shared_map *)grab_object( mapping->shared ) : NULL;
  1001. list_add_tail( &current->process->views, &view->entry );
  1002. }
  1003. done:
  1004. release_object( mapping );
  1005. }
  1006. /* unmap a memory view from the current process */
  1007. DECL_HANDLER(unmap_view)
  1008. {
  1009. struct memory_view *view = find_mapped_view( current->process, req->base );
  1010. if (view) free_memory_view( view );
  1011. }
  1012. /* get file handle from mapping by address */
  1013. DECL_HANDLER(get_mapping_file)
  1014. {
  1015. struct memory_view *view;
  1016. struct process *process;
  1017. struct file *file;
  1018. if (!(process = get_process_from_handle( req->process, 0 ))) return;
  1019. LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
  1020. if (req->addr >= view->base && req->addr < view->base + view->size) break;
  1021. if (&view->entry == &process->views)
  1022. {
  1023. set_error( STATUS_NOT_MAPPED_VIEW );
  1024. release_object( process );
  1025. return;
  1026. }
  1027. if (view->fd && (file = create_file_for_fd_obj( view->fd, GENERIC_READ,
  1028. FILE_SHARE_READ | FILE_SHARE_WRITE )))
  1029. {
  1030. reply->handle = alloc_handle( current->process, file, GENERIC_READ, 0 );
  1031. release_object( file );
  1032. }
  1033. release_object( process );
  1034. }
  1035. /* get a range of committed pages in a file mapping */
  1036. DECL_HANDLER(get_mapping_committed_range)
  1037. {
  1038. struct memory_view *view = find_mapped_view( current->process, req->base );
  1039. if (view) reply->committed = find_committed_range( view, req->offset, &reply->size );
  1040. }
  1041. /* add a range to the committed pages in a file mapping */
  1042. DECL_HANDLER(add_mapping_committed_range)
  1043. {
  1044. struct memory_view *view = find_mapped_view( current->process, req->base );
  1045. if (view) add_committed_range( view, req->offset, req->offset + req->size );
  1046. }
  1047. /* check if two memory maps are for the same file */
  1048. DECL_HANDLER(is_same_mapping)
  1049. {
  1050. struct memory_view *view1 = find_mapped_view( current->process, req->base1 );
  1051. struct memory_view *view2 = find_mapped_view( current->process, req->base2 );
  1052. if (!view1 || !view2) return;
  1053. if (!view1->fd || !view2->fd ||
  1054. !(view1->flags & SEC_IMAGE) || !(view2->flags & SEC_IMAGE) ||
  1055. !is_same_file_fd( view1->fd, view2->fd ))
  1056. set_error( STATUS_NOT_SAME_DEVICE );
  1057. }