change.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251
  1. /*
  2. * Server-side change notification management
  3. *
  4. * Copyright (C) 1998 Alexandre Julliard
  5. * Copyright (C) 2006 Mike McCormack
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with this library; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  20. */
  21. #include "config.h"
  22. #include "wine/port.h"
  23. #include <assert.h>
  24. #include <fcntl.h>
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include <signal.h>
  28. #include <sys/stat.h>
  29. #include <sys/types.h>
  30. #include <limits.h>
  31. #include <dirent.h>
  32. #include <errno.h>
  33. #ifdef HAVE_POLL_H
  34. # include <poll.h>
  35. #endif
  36. #ifdef HAVE_SYS_INOTIFY_H
  37. #include <sys/inotify.h>
  38. #endif
  39. #include "ntstatus.h"
  40. #define WIN32_NO_STATUS
  41. #include "windef.h"
  42. #include "file.h"
  43. #include "handle.h"
  44. #include "thread.h"
  45. #include "request.h"
  46. #include "process.h"
  47. #include "security.h"
  48. #include "winternl.h"
  49. /* dnotify support */
  50. #ifdef linux
  51. #ifndef F_NOTIFY
  52. #define F_NOTIFY 1026
  53. #define DN_ACCESS 0x00000001 /* File accessed */
  54. #define DN_MODIFY 0x00000002 /* File modified */
  55. #define DN_CREATE 0x00000004 /* File created */
  56. #define DN_DELETE 0x00000008 /* File removed */
  57. #define DN_RENAME 0x00000010 /* File renamed */
  58. #define DN_ATTRIB 0x00000020 /* File changed attributes */
  59. #define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
  60. #endif
  61. #endif
  62. /* inotify support */
  63. struct inode;
  64. static void free_inode( struct inode *inode );
  65. static struct fd *inotify_fd;
  66. struct change_record {
  67. struct list entry;
  68. unsigned int cookie;
  69. struct filesystem_event event;
  70. };
  71. struct dir
  72. {
  73. struct object obj; /* object header */
  74. struct fd *fd; /* file descriptor to the directory */
  75. mode_t mode; /* file stat.st_mode */
  76. uid_t uid; /* file stat.st_uid */
  77. struct list entry; /* entry in global change notifications list */
  78. unsigned int filter; /* notification filter */
  79. int notified; /* SIGIO counter */
  80. int want_data; /* return change data */
  81. int subtree; /* do we want to watch subdirectories? */
  82. struct list change_records; /* data for the change */
  83. struct list in_entry; /* entry in the inode dirs list */
  84. struct inode *inode; /* inode of the associated directory */
  85. struct process *client_process; /* client process that has a cache for this directory */
  86. int client_entry; /* entry in client process cache */
  87. };
  88. static struct fd *dir_get_fd( struct object *obj );
  89. static struct security_descriptor *dir_get_sd( struct object *obj );
  90. static int dir_set_sd( struct object *obj, const struct security_descriptor *sd,
  91. unsigned int set_info );
  92. static void dir_dump( struct object *obj, int verbose );
  93. static struct object_type *dir_get_type( struct object *obj );
  94. static int dir_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  95. static void dir_destroy( struct object *obj );
  96. static const struct object_ops dir_ops =
  97. {
  98. sizeof(struct dir), /* size */
  99. dir_dump, /* dump */
  100. dir_get_type, /* get_type */
  101. add_queue, /* add_queue */
  102. remove_queue, /* remove_queue */
  103. default_fd_signaled, /* signaled */
  104. default_fd_get_esync_fd, /* get_esync_fd */
  105. no_satisfied, /* satisfied */
  106. no_signal, /* signal */
  107. dir_get_fd, /* get_fd */
  108. default_fd_map_access, /* map_access */
  109. dir_get_sd, /* get_sd */
  110. dir_set_sd, /* set_sd */
  111. no_lookup_name, /* lookup_name */
  112. no_link_name, /* link_name */
  113. NULL, /* unlink_name */
  114. no_open_file, /* open_file */
  115. no_kernel_obj_list, /* get_kernel_obj_list */
  116. no_alloc_handle, /* alloc_handle */
  117. dir_close_handle, /* close_handle */
  118. dir_destroy /* destroy */
  119. };
  120. static int dir_get_poll_events( struct fd *fd );
  121. static enum server_fd_type dir_get_fd_type( struct fd *fd );
  122. static const struct fd_ops dir_fd_ops =
  123. {
  124. dir_get_poll_events, /* get_poll_events */
  125. default_poll_event, /* poll_event */
  126. dir_get_fd_type, /* get_fd_type */
  127. no_fd_read, /* read */
  128. no_fd_write, /* write */
  129. no_fd_flush, /* flush */
  130. default_fd_get_file_info, /* get_file_info */
  131. no_fd_get_volume_info, /* get_volume_info */
  132. default_fd_ioctl, /* ioctl */
  133. default_fd_queue_async, /* queue_async */
  134. default_fd_reselect_async /* reselect_async */
  135. };
  136. static struct list change_list = LIST_INIT(change_list);
  137. /* per-process structure to keep track of cache entries on the client size */
  138. struct dir_cache
  139. {
  140. unsigned int size;
  141. unsigned int count;
  142. unsigned char state[1];
  143. };
  144. enum dir_cache_state
  145. {
  146. DIR_CACHE_STATE_FREE,
  147. DIR_CACHE_STATE_INUSE,
  148. DIR_CACHE_STATE_RELEASED
  149. };
  150. /* return an array of cache entries that can be freed on the client side */
  151. static int *get_free_dir_cache_entries( struct process *process, data_size_t *size )
  152. {
  153. int *ret;
  154. struct dir_cache *cache = process->dir_cache;
  155. unsigned int i, j, count;
  156. if (!cache) return NULL;
  157. for (i = count = 0; i < cache->count && count < *size / sizeof(*ret); i++)
  158. if (cache->state[i] == DIR_CACHE_STATE_RELEASED) count++;
  159. if (!count) return NULL;
  160. if ((ret = malloc( count * sizeof(*ret) )))
  161. {
  162. for (i = j = 0; j < count; i++)
  163. {
  164. if (cache->state[i] != DIR_CACHE_STATE_RELEASED) continue;
  165. cache->state[i] = DIR_CACHE_STATE_FREE;
  166. ret[j++] = i;
  167. }
  168. *size = count * sizeof(*ret);
  169. }
  170. return ret;
  171. }
  172. /* allocate a new client-side directory cache entry */
  173. static int alloc_dir_cache_entry( struct dir *dir, struct process *process )
  174. {
  175. unsigned int i = 0;
  176. struct dir_cache *cache = process->dir_cache;
  177. if (cache)
  178. for (i = 0; i < cache->count; i++)
  179. if (cache->state[i] == DIR_CACHE_STATE_FREE) goto found;
  180. if (!cache || cache->count == cache->size)
  181. {
  182. unsigned int size = cache ? cache->size * 2 : 256;
  183. if (!(cache = realloc( cache, offsetof( struct dir_cache, state[size] ))))
  184. {
  185. set_error( STATUS_NO_MEMORY );
  186. return -1;
  187. }
  188. process->dir_cache = cache;
  189. cache->size = size;
  190. }
  191. cache->count = i + 1;
  192. found:
  193. cache->state[i] = DIR_CACHE_STATE_INUSE;
  194. return i;
  195. }
  196. /* release a directory cache entry; it will be freed on the client side on the next cache request */
  197. static void release_dir_cache_entry( struct dir *dir )
  198. {
  199. struct dir_cache *cache;
  200. if (!dir->client_process) return;
  201. cache = dir->client_process->dir_cache;
  202. cache->state[dir->client_entry] = DIR_CACHE_STATE_RELEASED;
  203. release_object( dir->client_process );
  204. dir->client_process = NULL;
  205. }
  206. static void dnotify_adjust_changes( struct dir *dir )
  207. {
  208. #if defined(F_SETSIG) && defined(F_NOTIFY)
  209. int fd = get_unix_fd( dir->fd );
  210. unsigned int filter = dir->filter;
  211. unsigned int val;
  212. if ( 0 > fcntl( fd, F_SETSIG, SIGIO) )
  213. return;
  214. val = DN_MULTISHOT;
  215. if (filter & FILE_NOTIFY_CHANGE_FILE_NAME)
  216. val |= DN_RENAME | DN_DELETE | DN_CREATE;
  217. if (filter & FILE_NOTIFY_CHANGE_DIR_NAME)
  218. val |= DN_RENAME | DN_DELETE | DN_CREATE;
  219. if (filter & FILE_NOTIFY_CHANGE_ATTRIBUTES)
  220. val |= DN_ATTRIB;
  221. if (filter & FILE_NOTIFY_CHANGE_SIZE)
  222. val |= DN_MODIFY;
  223. if (filter & FILE_NOTIFY_CHANGE_LAST_WRITE)
  224. val |= DN_MODIFY;
  225. if (filter & FILE_NOTIFY_CHANGE_LAST_ACCESS)
  226. val |= DN_ACCESS;
  227. if (filter & FILE_NOTIFY_CHANGE_CREATION)
  228. val |= DN_CREATE;
  229. if (filter & FILE_NOTIFY_CHANGE_SECURITY)
  230. val |= DN_ATTRIB;
  231. fcntl( fd, F_NOTIFY, val );
  232. #endif
  233. }
  234. /* insert change in the global list */
  235. static inline void insert_change( struct dir *dir )
  236. {
  237. sigset_t sigset;
  238. sigemptyset( &sigset );
  239. sigaddset( &sigset, SIGIO );
  240. sigprocmask( SIG_BLOCK, &sigset, NULL );
  241. list_add_head( &change_list, &dir->entry );
  242. sigprocmask( SIG_UNBLOCK, &sigset, NULL );
  243. }
  244. /* remove change from the global list */
  245. static inline void remove_change( struct dir *dir )
  246. {
  247. sigset_t sigset;
  248. sigemptyset( &sigset );
  249. sigaddset( &sigset, SIGIO );
  250. sigprocmask( SIG_BLOCK, &sigset, NULL );
  251. list_remove( &dir->entry );
  252. sigprocmask( SIG_UNBLOCK, &sigset, NULL );
  253. }
  254. static void dir_dump( struct object *obj, int verbose )
  255. {
  256. struct dir *dir = (struct dir *)obj;
  257. assert( obj->ops == &dir_ops );
  258. fprintf( stderr, "Dirfile fd=%p filter=%08x\n", dir->fd, dir->filter );
  259. }
  260. static struct object_type *dir_get_type( struct object *obj )
  261. {
  262. static const WCHAR name[] = {'F','i','l','e'};
  263. static const struct unicode_str str = { name, sizeof(name) };
  264. return get_object_type( &str );
  265. }
  266. /* enter here directly from SIGIO signal handler */
  267. void do_change_notify( int unix_fd )
  268. {
  269. struct dir *dir;
  270. /* FIXME: this is O(n) ... probably can be improved */
  271. LIST_FOR_EACH_ENTRY( dir, &change_list, struct dir, entry )
  272. {
  273. if (get_unix_fd( dir->fd ) != unix_fd) continue;
  274. interlocked_xchg_add( &dir->notified, 1 );
  275. break;
  276. }
  277. }
  278. /* SIGIO callback, called synchronously with the poll loop */
  279. void sigio_callback(void)
  280. {
  281. struct dir *dir;
  282. LIST_FOR_EACH_ENTRY( dir, &change_list, struct dir, entry )
  283. {
  284. if (interlocked_xchg( &dir->notified, 0 ))
  285. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  286. }
  287. }
  288. static struct fd *dir_get_fd( struct object *obj )
  289. {
  290. struct dir *dir = (struct dir *)obj;
  291. assert( obj->ops == &dir_ops );
  292. return (struct fd *)grab_object( dir->fd );
  293. }
  294. static struct security_descriptor *dir_get_sd( struct object *obj )
  295. {
  296. struct dir *dir = (struct dir *)obj;
  297. struct security_descriptor *sd;
  298. struct fd *fd;
  299. assert( obj->ops == &dir_ops );
  300. fd = dir_get_fd( obj );
  301. sd = get_file_sd( obj, fd, &dir->mode, &dir->uid );
  302. release_object( fd );
  303. return sd;
  304. }
  305. static int dir_set_sd( struct object *obj, const struct security_descriptor *sd,
  306. unsigned int set_info )
  307. {
  308. struct dir *dir = (struct dir *)obj;
  309. struct fd *fd;
  310. int ret;
  311. assert( obj->ops == &dir_ops );
  312. fd = dir_get_fd( obj );
  313. ret = set_file_sd( obj, fd, &dir->mode, &dir->uid, sd, set_info );
  314. release_object( fd );
  315. return ret;
  316. }
  317. static struct change_record *get_first_change_record( struct dir *dir )
  318. {
  319. struct list *ptr = list_head( &dir->change_records );
  320. if (!ptr) return NULL;
  321. list_remove( ptr );
  322. return LIST_ENTRY( ptr, struct change_record, entry );
  323. }
  324. static int dir_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  325. {
  326. struct dir *dir = (struct dir *)obj;
  327. if (!fd_close_handle( obj, process, handle )) return 0;
  328. if (obj->handle_count == 1) release_dir_cache_entry( dir ); /* closing last handle, release cache */
  329. return 1; /* ok to close */
  330. }
  331. static void dir_destroy( struct object *obj )
  332. {
  333. struct change_record *record;
  334. struct dir *dir = (struct dir *)obj;
  335. assert (obj->ops == &dir_ops);
  336. if (dir->filter)
  337. remove_change( dir );
  338. if (dir->inode)
  339. {
  340. list_remove( &dir->in_entry );
  341. free_inode( dir->inode );
  342. }
  343. while ((record = get_first_change_record( dir ))) free( record );
  344. release_dir_cache_entry( dir );
  345. release_object( dir->fd );
  346. if (inotify_fd && list_empty( &change_list ))
  347. {
  348. release_object( inotify_fd );
  349. inotify_fd = NULL;
  350. }
  351. }
  352. struct dir *get_dir_obj( struct process *process, obj_handle_t handle, unsigned int access )
  353. {
  354. return (struct dir *)get_handle_obj( process, handle, access, &dir_ops );
  355. }
  356. static int dir_get_poll_events( struct fd *fd )
  357. {
  358. return 0;
  359. }
  360. static enum server_fd_type dir_get_fd_type( struct fd *fd )
  361. {
  362. return FD_TYPE_DIR;
  363. }
  364. #ifdef HAVE_SYS_INOTIFY_H
  365. #define HASH_SIZE 31
  366. struct inode {
  367. struct list ch_entry; /* entry in the children list */
  368. struct list children; /* children of this inode */
  369. struct inode *parent; /* parent of this inode */
  370. struct list dirs; /* directory handles watching this inode */
  371. struct list ino_entry; /* entry in the inode hash */
  372. struct list wd_entry; /* entry in the watch descriptor hash */
  373. dev_t dev; /* device number */
  374. ino_t ino; /* device's inode number */
  375. int wd; /* inotify's watch descriptor */
  376. char *name; /* basename name of the inode */
  377. };
  378. static struct list inode_hash[ HASH_SIZE ];
  379. static struct list wd_hash[ HASH_SIZE ];
  380. static int inotify_add_dir( char *path, unsigned int filter );
  381. static struct inode *inode_from_wd( int wd )
  382. {
  383. struct list *bucket = &wd_hash[ wd % HASH_SIZE ];
  384. struct inode *inode;
  385. LIST_FOR_EACH_ENTRY( inode, bucket, struct inode, wd_entry )
  386. if (inode->wd == wd)
  387. return inode;
  388. return NULL;
  389. }
  390. static inline struct list *get_hash_list( dev_t dev, ino_t ino )
  391. {
  392. return &inode_hash[ (ino ^ dev) % HASH_SIZE ];
  393. }
  394. static struct inode *find_inode( dev_t dev, ino_t ino )
  395. {
  396. struct list *bucket = get_hash_list( dev, ino );
  397. struct inode *inode;
  398. LIST_FOR_EACH_ENTRY( inode, bucket, struct inode, ino_entry )
  399. if (inode->ino == ino && inode->dev == dev)
  400. return inode;
  401. return NULL;
  402. }
  403. static struct inode *create_inode( dev_t dev, ino_t ino )
  404. {
  405. struct inode *inode;
  406. inode = malloc( sizeof *inode );
  407. if (inode)
  408. {
  409. list_init( &inode->children );
  410. list_init( &inode->dirs );
  411. inode->ino = ino;
  412. inode->dev = dev;
  413. inode->wd = -1;
  414. inode->parent = NULL;
  415. inode->name = NULL;
  416. list_add_tail( get_hash_list( dev, ino ), &inode->ino_entry );
  417. }
  418. return inode;
  419. }
  420. static struct inode *get_inode( dev_t dev, ino_t ino )
  421. {
  422. struct inode *inode;
  423. inode = find_inode( dev, ino );
  424. if (inode)
  425. return inode;
  426. return create_inode( dev, ino );
  427. }
  428. static void inode_set_wd( struct inode *inode, int wd )
  429. {
  430. if (inode->wd != -1)
  431. list_remove( &inode->wd_entry );
  432. inode->wd = wd;
  433. list_add_tail( &wd_hash[ wd % HASH_SIZE ], &inode->wd_entry );
  434. }
  435. static void inode_set_name( struct inode *inode, const char *name )
  436. {
  437. free (inode->name);
  438. inode->name = name ? strdup( name ) : NULL;
  439. }
  440. static void free_inode( struct inode *inode )
  441. {
  442. int subtree = 0, watches = 0;
  443. struct inode *tmp, *next;
  444. struct dir *dir;
  445. LIST_FOR_EACH_ENTRY( dir, &inode->dirs, struct dir, in_entry )
  446. {
  447. subtree |= dir->subtree;
  448. watches++;
  449. }
  450. if (!subtree && !inode->parent)
  451. {
  452. LIST_FOR_EACH_ENTRY_SAFE( tmp, next, &inode->children,
  453. struct inode, ch_entry )
  454. {
  455. assert( tmp != inode );
  456. assert( tmp->parent == inode );
  457. free_inode( tmp );
  458. }
  459. }
  460. if (watches)
  461. return;
  462. if (inode->parent)
  463. list_remove( &inode->ch_entry );
  464. /* disconnect remaining children from the parent */
  465. LIST_FOR_EACH_ENTRY_SAFE( tmp, next, &inode->children, struct inode, ch_entry )
  466. {
  467. list_remove( &tmp->ch_entry );
  468. tmp->parent = NULL;
  469. }
  470. if (inode->wd != -1)
  471. {
  472. inotify_rm_watch( get_unix_fd( inotify_fd ), inode->wd );
  473. list_remove( &inode->wd_entry );
  474. }
  475. list_remove( &inode->ino_entry );
  476. free( inode->name );
  477. free( inode );
  478. }
  479. static struct inode *inode_add( struct inode *parent,
  480. dev_t dev, ino_t ino, const char *name )
  481. {
  482. struct inode *inode;
  483. inode = get_inode( dev, ino );
  484. if (!inode)
  485. return NULL;
  486. if (!inode->parent)
  487. {
  488. list_add_tail( &parent->children, &inode->ch_entry );
  489. inode->parent = parent;
  490. assert( inode != parent );
  491. }
  492. inode_set_name( inode, name );
  493. return inode;
  494. }
  495. static struct inode *inode_from_name( struct inode *inode, const char *name )
  496. {
  497. struct inode *i;
  498. LIST_FOR_EACH_ENTRY( i, &inode->children, struct inode, ch_entry )
  499. if (i->name && !strcmp( i->name, name ))
  500. return i;
  501. return NULL;
  502. }
  503. static int inotify_get_poll_events( struct fd *fd );
  504. static void inotify_poll_event( struct fd *fd, int event );
  505. static const struct fd_ops inotify_fd_ops =
  506. {
  507. inotify_get_poll_events, /* get_poll_events */
  508. inotify_poll_event, /* poll_event */
  509. NULL, /* flush */
  510. NULL, /* get_fd_type */
  511. NULL, /* ioctl */
  512. NULL, /* queue_async */
  513. NULL /* reselect_async */
  514. };
  515. static int inotify_get_poll_events( struct fd *fd )
  516. {
  517. return POLLIN;
  518. }
  519. static void inotify_do_change_notify( struct dir *dir, unsigned int action,
  520. unsigned int cookie, const char *relpath )
  521. {
  522. struct change_record *record;
  523. assert( dir->obj.ops == &dir_ops );
  524. if (dir->want_data)
  525. {
  526. size_t len = strlen(relpath);
  527. record = malloc( offsetof(struct change_record, event.name[len]) );
  528. if (!record)
  529. return;
  530. record->cookie = cookie;
  531. record->event.action = action;
  532. memcpy( record->event.name, relpath, len );
  533. record->event.len = len;
  534. list_add_tail( &dir->change_records, &record->entry );
  535. }
  536. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  537. }
  538. static unsigned int filter_from_event( struct inotify_event *ie )
  539. {
  540. unsigned int filter = 0;
  541. if (ie->mask & (IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE | IN_CREATE))
  542. filter |= FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME;
  543. if (ie->mask & IN_MODIFY)
  544. filter |= FILE_NOTIFY_CHANGE_SIZE | FILE_NOTIFY_CHANGE_LAST_WRITE | FILE_NOTIFY_CHANGE_LAST_ACCESS;
  545. if (ie->mask & IN_ATTRIB)
  546. filter |= FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY;
  547. if (ie->mask & IN_CREATE)
  548. filter |= FILE_NOTIFY_CHANGE_CREATION;
  549. if (ie->mask & IN_ISDIR)
  550. filter &= ~FILE_NOTIFY_CHANGE_FILE_NAME;
  551. else
  552. filter &= ~FILE_NOTIFY_CHANGE_DIR_NAME;
  553. return filter;
  554. }
  555. /* scan up the parent directories for watches */
  556. static unsigned int filter_from_inode( struct inode *inode, int is_parent )
  557. {
  558. unsigned int filter = 0;
  559. struct dir *dir;
  560. /* combine filters from parents watching subtrees */
  561. while (inode)
  562. {
  563. LIST_FOR_EACH_ENTRY( dir, &inode->dirs, struct dir, in_entry )
  564. if (dir->subtree || !is_parent)
  565. filter |= dir->filter;
  566. is_parent = 1;
  567. inode = inode->parent;
  568. }
  569. return filter;
  570. }
  571. static char *inode_get_path( struct inode *inode, int sz )
  572. {
  573. struct list *head;
  574. char *path;
  575. int len;
  576. if (!inode)
  577. return NULL;
  578. head = list_head( &inode->dirs );
  579. if (head)
  580. {
  581. int unix_fd = get_unix_fd( LIST_ENTRY( head, struct dir, in_entry )->fd );
  582. path = malloc ( 32 + sz );
  583. if (path)
  584. sprintf( path, "/proc/self/fd/%u/", unix_fd );
  585. return path;
  586. }
  587. if (!inode->name)
  588. return NULL;
  589. len = strlen( inode->name );
  590. path = inode_get_path( inode->parent, sz + len + 1 );
  591. if (!path)
  592. return NULL;
  593. strcat( path, inode->name );
  594. strcat( path, "/" );
  595. return path;
  596. }
  597. static void inode_check_dir( struct inode *parent, const char *name )
  598. {
  599. char *path;
  600. unsigned int filter;
  601. struct inode *inode;
  602. struct stat st;
  603. int wd = -1;
  604. path = inode_get_path( parent, strlen(name) );
  605. if (!path)
  606. return;
  607. strcat( path, name );
  608. if (stat( path, &st ) < 0)
  609. goto end;
  610. filter = filter_from_inode( parent, 1 );
  611. if (!filter)
  612. goto end;
  613. inode = inode_add( parent, st.st_dev, st.st_ino, name );
  614. if (!inode || inode->wd != -1)
  615. goto end;
  616. wd = inotify_add_dir( path, filter );
  617. if (wd != -1)
  618. inode_set_wd( inode, wd );
  619. else
  620. free_inode( inode );
  621. end:
  622. free( path );
  623. }
  624. static int prepend( char **path, const char *segment )
  625. {
  626. int extra;
  627. char *p;
  628. extra = strlen( segment ) + 1;
  629. if (*path)
  630. {
  631. int len = strlen( *path ) + 1;
  632. p = realloc( *path, len + extra );
  633. if (!p) return 0;
  634. memmove( &p[ extra ], p, len );
  635. p[ extra - 1 ] = '/';
  636. memcpy( p, segment, extra - 1 );
  637. }
  638. else
  639. {
  640. p = malloc( extra );
  641. if (!p) return 0;
  642. memcpy( p, segment, extra );
  643. }
  644. *path = p;
  645. return 1;
  646. }
  647. static void inotify_notify_all( struct inotify_event *ie )
  648. {
  649. unsigned int filter, action;
  650. struct inode *inode, *i;
  651. char *path = NULL;
  652. struct dir *dir;
  653. inode = inode_from_wd( ie->wd );
  654. if (!inode)
  655. {
  656. fprintf( stderr, "no inode matches %d\n", ie->wd);
  657. return;
  658. }
  659. filter = filter_from_event( ie );
  660. if (ie->mask & IN_CREATE)
  661. {
  662. if (ie->mask & IN_ISDIR)
  663. inode_check_dir( inode, ie->name );
  664. action = FILE_ACTION_ADDED;
  665. }
  666. else if (ie->mask & IN_DELETE)
  667. action = FILE_ACTION_REMOVED;
  668. else if (ie->mask & IN_MOVED_FROM)
  669. action = FILE_ACTION_RENAMED_OLD_NAME;
  670. else if (ie->mask & IN_MOVED_TO)
  671. action = FILE_ACTION_RENAMED_NEW_NAME;
  672. else
  673. action = FILE_ACTION_MODIFIED;
  674. /*
  675. * Work our way up the inode hierarchy
  676. * extending the relative path as we go
  677. * and notifying all recursive watches.
  678. */
  679. if (!prepend( &path, ie->name ))
  680. return;
  681. for (i = inode; i; i = i->parent)
  682. {
  683. LIST_FOR_EACH_ENTRY( dir, &i->dirs, struct dir, in_entry )
  684. if ((filter & dir->filter) && (i==inode || dir->subtree))
  685. inotify_do_change_notify( dir, action, ie->cookie, path );
  686. if (!i->name || !prepend( &path, i->name ))
  687. break;
  688. }
  689. free( path );
  690. if (ie->mask & IN_DELETE)
  691. {
  692. i = inode_from_name( inode, ie->name );
  693. if (i)
  694. free_inode( i );
  695. }
  696. }
  697. static void inotify_poll_event( struct fd *fd, int event )
  698. {
  699. int r, ofs, unix_fd;
  700. char buffer[0x1000];
  701. struct inotify_event *ie;
  702. unix_fd = get_unix_fd( fd );
  703. r = read( unix_fd, buffer, sizeof buffer );
  704. if (r < 0)
  705. {
  706. fprintf(stderr,"inotify_poll_event(): inotify read failed!\n");
  707. return;
  708. }
  709. for( ofs = 0; ofs < r - offsetof(struct inotify_event, name); )
  710. {
  711. ie = (struct inotify_event*) &buffer[ofs];
  712. ofs += offsetof( struct inotify_event, name[ie->len] );
  713. if (ofs > r) break;
  714. if (ie->len) inotify_notify_all( ie );
  715. }
  716. }
  717. static inline struct fd *create_inotify_fd( void )
  718. {
  719. int unix_fd;
  720. unix_fd = inotify_init();
  721. if (unix_fd<0)
  722. return NULL;
  723. return create_anonymous_fd( &inotify_fd_ops, unix_fd, NULL, 0 );
  724. }
  725. static int map_flags( unsigned int filter )
  726. {
  727. unsigned int mask;
  728. /* always watch these so we can track subdirectories in recursive watches */
  729. mask = (IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF);
  730. if (filter & FILE_NOTIFY_CHANGE_ATTRIBUTES)
  731. mask |= IN_ATTRIB;
  732. if (filter & FILE_NOTIFY_CHANGE_SIZE)
  733. mask |= IN_MODIFY;
  734. if (filter & FILE_NOTIFY_CHANGE_LAST_WRITE)
  735. mask |= IN_MODIFY;
  736. if (filter & FILE_NOTIFY_CHANGE_LAST_ACCESS)
  737. mask |= IN_MODIFY;
  738. if (filter & FILE_NOTIFY_CHANGE_SECURITY)
  739. mask |= IN_ATTRIB;
  740. return mask;
  741. }
  742. static int inotify_add_dir( char *path, unsigned int filter )
  743. {
  744. int wd = inotify_add_watch( get_unix_fd( inotify_fd ),
  745. path, map_flags( filter ) );
  746. if (wd != -1)
  747. set_fd_events( inotify_fd, POLLIN );
  748. return wd;
  749. }
  750. static int init_inotify( void )
  751. {
  752. int i;
  753. if (inotify_fd)
  754. return 1;
  755. inotify_fd = create_inotify_fd();
  756. if (!inotify_fd)
  757. return 0;
  758. for (i=0; i<HASH_SIZE; i++)
  759. {
  760. list_init( &inode_hash[i] );
  761. list_init( &wd_hash[i] );
  762. }
  763. return 1;
  764. }
  765. static int inotify_adjust_changes( struct dir *dir )
  766. {
  767. unsigned int filter;
  768. struct inode *inode;
  769. struct stat st;
  770. char path[32];
  771. int wd, unix_fd;
  772. if (!inotify_fd)
  773. return 0;
  774. unix_fd = get_unix_fd( dir->fd );
  775. inode = dir->inode;
  776. if (!inode)
  777. {
  778. /* check if this fd is already being watched */
  779. if (-1 == fstat( unix_fd, &st ))
  780. return 0;
  781. inode = get_inode( st.st_dev, st.st_ino );
  782. if (!inode)
  783. inode = create_inode( st.st_dev, st.st_ino );
  784. if (!inode)
  785. return 0;
  786. list_add_tail( &inode->dirs, &dir->in_entry );
  787. dir->inode = inode;
  788. }
  789. filter = filter_from_inode( inode, 0 );
  790. sprintf( path, "/proc/self/fd/%u", unix_fd );
  791. wd = inotify_add_dir( path, filter );
  792. if (wd == -1) return 0;
  793. inode_set_wd( inode, wd );
  794. return 1;
  795. }
  796. static char *get_basename( const char *link )
  797. {
  798. char *buffer, *name = NULL;
  799. int r, n = 0x100;
  800. while (1)
  801. {
  802. buffer = malloc( n );
  803. if (!buffer) return NULL;
  804. r = readlink( link, buffer, n );
  805. if (r < 0)
  806. break;
  807. if (r < n)
  808. {
  809. name = buffer;
  810. break;
  811. }
  812. free( buffer );
  813. n *= 2;
  814. }
  815. if (name)
  816. {
  817. while (r > 0 && name[ r - 1 ] == '/' )
  818. r--;
  819. name[ r ] = 0;
  820. name = strrchr( name, '/' );
  821. if (name)
  822. name = strdup( &name[1] );
  823. }
  824. free( buffer );
  825. return name;
  826. }
  827. static int dir_add_to_existing_notify( struct dir *dir )
  828. {
  829. struct inode *inode, *parent;
  830. unsigned int filter = 0;
  831. struct stat st, st_new;
  832. char link[35], *name;
  833. int wd, unix_fd;
  834. if (!inotify_fd)
  835. return 0;
  836. unix_fd = get_unix_fd( dir->fd );
  837. /* check if it's in the list of inodes we want to watch */
  838. if (-1 == fstat( unix_fd, &st_new ))
  839. return 0;
  840. inode = find_inode( st_new.st_dev, st_new.st_ino );
  841. if (inode)
  842. return 0;
  843. /* lookup the parent */
  844. sprintf( link, "/proc/self/fd/%u/..", unix_fd );
  845. if (-1 == stat( link, &st ))
  846. return 0;
  847. /*
  848. * If there's no parent, stop. We could keep going adding
  849. * ../ to the path until we hit the root of the tree or
  850. * find a recursively watched ancestor.
  851. * Assume it's too expensive to search up the tree for now.
  852. */
  853. parent = find_inode( st.st_dev, st.st_ino );
  854. if (!parent)
  855. return 0;
  856. if (parent->wd == -1)
  857. return 0;
  858. filter = filter_from_inode( parent, 1 );
  859. if (!filter)
  860. return 0;
  861. sprintf( link, "/proc/self/fd/%u", unix_fd );
  862. name = get_basename( link );
  863. if (!name)
  864. return 0;
  865. inode = inode_add( parent, st_new.st_dev, st_new.st_ino, name );
  866. free( name );
  867. if (!inode)
  868. return 0;
  869. /* Couldn't find this inode at the start of the function, must be new */
  870. assert( inode->wd == -1 );
  871. wd = inotify_add_dir( link, filter );
  872. if (wd != -1)
  873. inode_set_wd( inode, wd );
  874. return 1;
  875. }
  876. #else
  877. static int init_inotify( void )
  878. {
  879. return 0;
  880. }
  881. static int inotify_adjust_changes( struct dir *dir )
  882. {
  883. return 0;
  884. }
  885. static void free_inode( struct inode *inode )
  886. {
  887. assert( 0 );
  888. }
  889. static int dir_add_to_existing_notify( struct dir *dir )
  890. {
  891. return 0;
  892. }
  893. #endif /* HAVE_SYS_INOTIFY_H */
  894. struct object *create_dir_obj( struct fd *fd, unsigned int access, mode_t mode,
  895. const struct security_descriptor *sd )
  896. {
  897. struct dir *dir;
  898. dir = alloc_object( &dir_ops );
  899. if (!dir)
  900. return NULL;
  901. list_init( &dir->change_records );
  902. dir->filter = 0;
  903. dir->notified = 0;
  904. dir->want_data = 0;
  905. dir->inode = NULL;
  906. grab_object( fd );
  907. dir->fd = fd;
  908. dir->mode = mode;
  909. dir->uid = ~(uid_t)0;
  910. dir->client_process = NULL;
  911. set_fd_user( fd, &dir_fd_ops, &dir->obj );
  912. if (sd) dir_set_sd( &dir->obj, sd, OWNER_SECURITY_INFORMATION |
  913. GROUP_SECURITY_INFORMATION |
  914. DACL_SECURITY_INFORMATION |
  915. SACL_SECURITY_INFORMATION );
  916. dir_add_to_existing_notify( dir );
  917. return &dir->obj;
  918. }
  919. /* retrieve (or allocate) the client-side directory cache entry */
  920. DECL_HANDLER(get_directory_cache_entry)
  921. {
  922. struct dir *dir;
  923. int *free_entries;
  924. data_size_t free_size;
  925. if (!(dir = get_dir_obj( current->process, req->handle, 0 ))) return;
  926. if (!dir->client_process)
  927. {
  928. if ((dir->client_entry = alloc_dir_cache_entry( dir, current->process )) == -1) goto done;
  929. dir->client_process = (struct process *)grab_object( current->process );
  930. }
  931. if (dir->client_process == current->process) reply->entry = dir->client_entry;
  932. else set_error( STATUS_SHARING_VIOLATION );
  933. done: /* allow freeing entries even on failure */
  934. free_size = get_reply_max_size();
  935. free_entries = get_free_dir_cache_entries( current->process, &free_size );
  936. if (free_entries) set_reply_data_ptr( free_entries, free_size );
  937. release_object( dir );
  938. }
  939. /* enable change notifications for a directory */
  940. DECL_HANDLER(read_directory_changes)
  941. {
  942. struct dir *dir;
  943. struct async *async;
  944. if (!req->filter)
  945. {
  946. set_error(STATUS_INVALID_PARAMETER);
  947. return;
  948. }
  949. dir = get_dir_obj( current->process, req->async.handle, 0 );
  950. if (!dir)
  951. return;
  952. /* requests don't timeout */
  953. if (!(async = create_async( dir->fd, current, &req->async, NULL ))) goto end;
  954. fd_queue_async( dir->fd, async, ASYNC_TYPE_WAIT );
  955. /* assign it once */
  956. if (!dir->filter)
  957. {
  958. init_inotify();
  959. insert_change( dir );
  960. dir->filter = req->filter;
  961. dir->subtree = req->subtree;
  962. dir->want_data = req->want_data;
  963. }
  964. /* if there's already a change in the queue, send it */
  965. if (!list_empty( &dir->change_records ))
  966. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  967. /* setup the real notification */
  968. if (!inotify_adjust_changes( dir ))
  969. dnotify_adjust_changes( dir );
  970. set_error(STATUS_PENDING);
  971. release_object( async );
  972. end:
  973. release_object( dir );
  974. }
  975. DECL_HANDLER(read_change)
  976. {
  977. struct change_record *record, *next;
  978. struct dir *dir;
  979. struct list events;
  980. char *data, *event;
  981. int size = 0;
  982. dir = get_dir_obj( current->process, req->handle, 0 );
  983. if (!dir)
  984. return;
  985. list_init( &events );
  986. list_move_tail( &events, &dir->change_records );
  987. release_object( dir );
  988. if (list_empty( &events ))
  989. {
  990. set_error( STATUS_NO_DATA_DETECTED );
  991. return;
  992. }
  993. LIST_FOR_EACH_ENTRY( record, &events, struct change_record, entry )
  994. {
  995. size += (offsetof(struct filesystem_event, name[record->event.len])
  996. + sizeof(int)-1) / sizeof(int) * sizeof(int);
  997. }
  998. if (size > get_reply_max_size())
  999. set_error( STATUS_BUFFER_TOO_SMALL );
  1000. else if ((data = mem_alloc( size )) != NULL)
  1001. {
  1002. event = data;
  1003. LIST_FOR_EACH_ENTRY( record, &events, struct change_record, entry )
  1004. {
  1005. data_size_t len = offsetof( struct filesystem_event, name[record->event.len] );
  1006. /* FIXME: rename events are sometimes reported as delete/create */
  1007. if (record->event.action == FILE_ACTION_RENAMED_OLD_NAME)
  1008. {
  1009. struct list *elem = list_next( &events, &record->entry );
  1010. if (elem)
  1011. next = LIST_ENTRY(elem, struct change_record, entry);
  1012. if (elem && next->cookie == record->cookie)
  1013. next->cookie = 0;
  1014. else
  1015. record->event.action = FILE_ACTION_REMOVED;
  1016. }
  1017. else if (record->event.action == FILE_ACTION_RENAMED_NEW_NAME && record->cookie)
  1018. record->event.action = FILE_ACTION_ADDED;
  1019. memcpy( event, &record->event, len );
  1020. event += len;
  1021. if (len % sizeof(int))
  1022. {
  1023. memset( event, 0, sizeof(int) - len % sizeof(int) );
  1024. event += sizeof(int) - len % sizeof(int);
  1025. }
  1026. }
  1027. set_reply_data_ptr( data, size );
  1028. }
  1029. LIST_FOR_EACH_ENTRY_SAFE( record, next, &events, struct change_record, entry )
  1030. {
  1031. list_remove( &record->entry );
  1032. free( record );
  1033. }
  1034. }