change.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290
  1. /*
  2. * Server-side change notification management
  3. *
  4. * Copyright (C) 1998 Alexandre Julliard
  5. * Copyright (C) 2006 Mike McCormack
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with this library; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  20. */
  21. #include "config.h"
  22. #include <assert.h>
  23. #include <fcntl.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <signal.h>
  27. #include <sys/stat.h>
  28. #include <sys/types.h>
  29. #include <limits.h>
  30. #include <dirent.h>
  31. #include <errno.h>
  32. #include <unistd.h>
  33. #include <poll.h>
  34. #ifdef HAVE_SYS_INOTIFY_H
  35. #include <sys/inotify.h>
  36. #endif
  37. #include "ntstatus.h"
  38. #define WIN32_NO_STATUS
  39. #include "windef.h"
  40. #include "file.h"
  41. #include "handle.h"
  42. #include "thread.h"
  43. #include "request.h"
  44. #include "process.h"
  45. #include "security.h"
  46. #include "winternl.h"
  47. /* dnotify support */
  48. #ifdef linux
  49. #ifndef F_NOTIFY
  50. #define F_NOTIFY 1026
  51. #define DN_ACCESS 0x00000001 /* File accessed */
  52. #define DN_MODIFY 0x00000002 /* File modified */
  53. #define DN_CREATE 0x00000004 /* File created */
  54. #define DN_DELETE 0x00000008 /* File removed */
  55. #define DN_RENAME 0x00000010 /* File renamed */
  56. #define DN_ATTRIB 0x00000020 /* File changed attributes */
  57. #define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
  58. #endif
  59. #endif
  60. /* inotify support */
  61. struct inode;
  62. static void free_inode( struct inode *inode );
  63. static struct fd *inotify_fd;
  64. struct change_record {
  65. struct list entry;
  66. unsigned int cookie;
  67. struct filesystem_event event;
  68. };
  69. struct dir
  70. {
  71. struct object obj; /* object header */
  72. struct fd *fd; /* file descriptor to the directory */
  73. mode_t mode; /* file stat.st_mode */
  74. uid_t uid; /* file stat.st_uid */
  75. struct list entry; /* entry in global change notifications list */
  76. unsigned int filter; /* notification filter */
  77. volatile int notified; /* SIGIO counter */
  78. int want_data; /* return change data */
  79. int subtree; /* do we want to watch subdirectories? */
  80. struct list change_records; /* data for the change */
  81. struct list in_entry; /* entry in the inode dirs list */
  82. struct inode *inode; /* inode of the associated directory */
  83. struct process *client_process; /* client process that has a cache for this directory */
  84. int client_entry; /* entry in client process cache */
  85. };
  86. static struct fd *dir_get_fd( struct object *obj );
  87. static struct security_descriptor *dir_get_sd( struct object *obj );
  88. static int dir_set_sd( struct object *obj, const struct security_descriptor *sd,
  89. unsigned int set_info );
  90. static void dir_dump( struct object *obj, int verbose );
  91. static int dir_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  92. static void dir_destroy( struct object *obj );
  93. static const struct object_ops dir_ops =
  94. {
  95. sizeof(struct dir), /* size */
  96. &file_type, /* type */
  97. dir_dump, /* dump */
  98. add_queue, /* add_queue */
  99. remove_queue, /* remove_queue */
  100. default_fd_signaled, /* signaled */
  101. no_satisfied, /* satisfied */
  102. no_signal, /* signal */
  103. dir_get_fd, /* get_fd */
  104. default_map_access, /* map_access */
  105. dir_get_sd, /* get_sd */
  106. dir_set_sd, /* set_sd */
  107. no_get_full_name, /* get_full_name */
  108. no_lookup_name, /* lookup_name */
  109. no_link_name, /* link_name */
  110. NULL, /* unlink_name */
  111. no_open_file, /* open_file */
  112. no_kernel_obj_list, /* get_kernel_obj_list */
  113. dir_close_handle, /* close_handle */
  114. dir_destroy /* destroy */
  115. };
  116. static int dir_get_poll_events( struct fd *fd );
  117. static enum server_fd_type dir_get_fd_type( struct fd *fd );
  118. static const struct fd_ops dir_fd_ops =
  119. {
  120. dir_get_poll_events, /* get_poll_events */
  121. default_poll_event, /* poll_event */
  122. dir_get_fd_type, /* get_fd_type */
  123. no_fd_read, /* read */
  124. no_fd_write, /* write */
  125. no_fd_flush, /* flush */
  126. default_fd_get_file_info, /* get_file_info */
  127. no_fd_get_volume_info, /* get_volume_info */
  128. default_fd_ioctl, /* ioctl */
  129. default_fd_cancel_async, /* cancel_async */
  130. default_fd_queue_async, /* queue_async */
  131. default_fd_reselect_async /* reselect_async */
  132. };
  133. static struct list change_list = LIST_INIT(change_list);
  134. /* per-process structure to keep track of cache entries on the client size */
  135. struct dir_cache
  136. {
  137. unsigned int size;
  138. unsigned int count;
  139. unsigned char state[1];
  140. };
  141. enum dir_cache_state
  142. {
  143. DIR_CACHE_STATE_FREE,
  144. DIR_CACHE_STATE_INUSE,
  145. DIR_CACHE_STATE_RELEASED
  146. };
  147. /* return an array of cache entries that can be freed on the client side */
  148. static int *get_free_dir_cache_entries( struct process *process, data_size_t *size )
  149. {
  150. int *ret;
  151. struct dir_cache *cache = process->dir_cache;
  152. unsigned int i, j, count;
  153. if (!cache) return NULL;
  154. for (i = count = 0; i < cache->count && count < *size / sizeof(*ret); i++)
  155. if (cache->state[i] == DIR_CACHE_STATE_RELEASED) count++;
  156. if (!count) return NULL;
  157. if ((ret = malloc( count * sizeof(*ret) )))
  158. {
  159. for (i = j = 0; j < count; i++)
  160. {
  161. if (cache->state[i] != DIR_CACHE_STATE_RELEASED) continue;
  162. cache->state[i] = DIR_CACHE_STATE_FREE;
  163. ret[j++] = i;
  164. }
  165. *size = count * sizeof(*ret);
  166. }
  167. return ret;
  168. }
  169. /* allocate a new client-side directory cache entry */
  170. static int alloc_dir_cache_entry( struct dir *dir, struct process *process )
  171. {
  172. unsigned int i = 0;
  173. struct dir_cache *cache = process->dir_cache;
  174. if (cache)
  175. for (i = 0; i < cache->count; i++)
  176. if (cache->state[i] == DIR_CACHE_STATE_FREE) goto found;
  177. if (!cache || cache->count == cache->size)
  178. {
  179. unsigned int size = cache ? cache->size * 2 : 256;
  180. if (!(cache = realloc( cache, offsetof( struct dir_cache, state[size] ))))
  181. {
  182. set_error( STATUS_NO_MEMORY );
  183. return -1;
  184. }
  185. process->dir_cache = cache;
  186. cache->size = size;
  187. }
  188. cache->count = i + 1;
  189. found:
  190. cache->state[i] = DIR_CACHE_STATE_INUSE;
  191. return i;
  192. }
  193. /* release a directory cache entry; it will be freed on the client side on the next cache request */
  194. static void release_dir_cache_entry( struct dir *dir )
  195. {
  196. struct dir_cache *cache;
  197. if (!dir->client_process) return;
  198. cache = dir->client_process->dir_cache;
  199. cache->state[dir->client_entry] = DIR_CACHE_STATE_RELEASED;
  200. release_object( dir->client_process );
  201. dir->client_process = NULL;
  202. }
  203. static void dnotify_adjust_changes( struct dir *dir )
  204. {
  205. #if defined(F_SETSIG) && defined(F_NOTIFY)
  206. int fd = get_unix_fd( dir->fd );
  207. unsigned int filter = dir->filter;
  208. unsigned int val;
  209. if ( 0 > fcntl( fd, F_SETSIG, SIGIO) )
  210. return;
  211. val = DN_MULTISHOT;
  212. if (filter & FILE_NOTIFY_CHANGE_FILE_NAME)
  213. val |= DN_RENAME | DN_DELETE | DN_CREATE;
  214. if (filter & FILE_NOTIFY_CHANGE_DIR_NAME)
  215. val |= DN_RENAME | DN_DELETE | DN_CREATE;
  216. if (filter & FILE_NOTIFY_CHANGE_ATTRIBUTES)
  217. val |= DN_ATTRIB;
  218. if (filter & FILE_NOTIFY_CHANGE_SIZE)
  219. val |= DN_MODIFY;
  220. if (filter & FILE_NOTIFY_CHANGE_LAST_WRITE)
  221. val |= DN_MODIFY;
  222. if (filter & FILE_NOTIFY_CHANGE_LAST_ACCESS)
  223. val |= DN_ACCESS;
  224. if (filter & FILE_NOTIFY_CHANGE_CREATION)
  225. val |= DN_CREATE;
  226. if (filter & FILE_NOTIFY_CHANGE_SECURITY)
  227. val |= DN_ATTRIB;
  228. fcntl( fd, F_NOTIFY, val );
  229. #endif
  230. }
  231. /* insert change in the global list */
  232. static inline void insert_change( struct dir *dir )
  233. {
  234. sigset_t sigset;
  235. sigemptyset( &sigset );
  236. sigaddset( &sigset, SIGIO );
  237. sigprocmask( SIG_BLOCK, &sigset, NULL );
  238. list_add_head( &change_list, &dir->entry );
  239. sigprocmask( SIG_UNBLOCK, &sigset, NULL );
  240. }
  241. /* remove change from the global list */
  242. static inline void remove_change( struct dir *dir )
  243. {
  244. sigset_t sigset;
  245. sigemptyset( &sigset );
  246. sigaddset( &sigset, SIGIO );
  247. sigprocmask( SIG_BLOCK, &sigset, NULL );
  248. list_remove( &dir->entry );
  249. sigprocmask( SIG_UNBLOCK, &sigset, NULL );
  250. }
  251. static void dir_dump( struct object *obj, int verbose )
  252. {
  253. struct dir *dir = (struct dir *)obj;
  254. assert( obj->ops == &dir_ops );
  255. fprintf( stderr, "Dirfile fd=%p filter=%08x\n", dir->fd, dir->filter );
  256. }
  257. /* enter here directly from SIGIO signal handler */
  258. void do_change_notify( int unix_fd )
  259. {
  260. struct dir *dir;
  261. /* FIXME: this is O(n) ... probably can be improved */
  262. LIST_FOR_EACH_ENTRY( dir, &change_list, struct dir, entry )
  263. {
  264. if (get_unix_fd( dir->fd ) != unix_fd) continue;
  265. dir->notified = 1;
  266. break;
  267. }
  268. }
  269. /* SIGIO callback, called synchronously with the poll loop */
  270. void sigio_callback(void)
  271. {
  272. struct dir *dir;
  273. LIST_FOR_EACH_ENTRY( dir, &change_list, struct dir, entry )
  274. {
  275. if (!dir->notified) continue;
  276. dir->notified = 0;
  277. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  278. }
  279. }
  280. static struct fd *dir_get_fd( struct object *obj )
  281. {
  282. struct dir *dir = (struct dir *)obj;
  283. assert( obj->ops == &dir_ops );
  284. return (struct fd *)grab_object( dir->fd );
  285. }
  286. static int get_dir_unix_fd( struct dir *dir )
  287. {
  288. return get_unix_fd( dir->fd );
  289. }
  290. static struct security_descriptor *dir_get_sd( struct object *obj )
  291. {
  292. struct dir *dir = (struct dir *)obj;
  293. int unix_fd;
  294. struct stat st;
  295. struct security_descriptor *sd;
  296. assert( obj->ops == &dir_ops );
  297. unix_fd = get_dir_unix_fd( dir );
  298. if (unix_fd == -1 || fstat( unix_fd, &st ) == -1)
  299. return obj->sd;
  300. /* mode and uid the same? if so, no need to re-generate security descriptor */
  301. if (obj->sd &&
  302. (st.st_mode & (S_IRWXU|S_IRWXO)) == (dir->mode & (S_IRWXU|S_IRWXO)) &&
  303. (st.st_uid == dir->uid))
  304. return obj->sd;
  305. sd = mode_to_sd( st.st_mode,
  306. security_unix_uid_to_sid( st.st_uid ),
  307. token_get_primary_group( current->process->token ));
  308. if (!sd) return obj->sd;
  309. dir->mode = st.st_mode;
  310. dir->uid = st.st_uid;
  311. free( obj->sd );
  312. obj->sd = sd;
  313. return sd;
  314. }
  315. static int dir_set_sd( struct object *obj, const struct security_descriptor *sd,
  316. unsigned int set_info )
  317. {
  318. struct dir *dir = (struct dir *)obj;
  319. const struct sid *owner;
  320. struct stat st;
  321. mode_t mode;
  322. int unix_fd;
  323. assert( obj->ops == &dir_ops );
  324. unix_fd = get_dir_unix_fd( dir );
  325. if (unix_fd == -1 || fstat( unix_fd, &st ) == -1) return 1;
  326. if (set_info & OWNER_SECURITY_INFORMATION)
  327. {
  328. owner = sd_get_owner( sd );
  329. if (!owner)
  330. {
  331. set_error( STATUS_INVALID_SECURITY_DESCR );
  332. return 0;
  333. }
  334. if (!obj->sd || !equal_sid( owner, sd_get_owner( obj->sd ) ))
  335. {
  336. /* FIXME: get Unix uid and call fchown */
  337. }
  338. }
  339. else if (obj->sd)
  340. owner = sd_get_owner( obj->sd );
  341. else
  342. owner = token_get_user( current->process->token );
  343. if (set_info & DACL_SECURITY_INFORMATION)
  344. {
  345. /* keep the bits that we don't map to access rights in the ACL */
  346. mode = st.st_mode & (S_ISUID|S_ISGID|S_ISVTX);
  347. mode |= sd_to_mode( sd, owner );
  348. if (((st.st_mode ^ mode) & (S_IRWXU|S_IRWXG|S_IRWXO)) && fchmod( unix_fd, mode ) == -1)
  349. {
  350. file_set_error();
  351. return 0;
  352. }
  353. }
  354. return 1;
  355. }
  356. static struct change_record *get_first_change_record( struct dir *dir )
  357. {
  358. struct list *ptr = list_head( &dir->change_records );
  359. if (!ptr) return NULL;
  360. list_remove( ptr );
  361. return LIST_ENTRY( ptr, struct change_record, entry );
  362. }
  363. static int dir_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  364. {
  365. struct dir *dir = (struct dir *)obj;
  366. if (obj->handle_count == 1) release_dir_cache_entry( dir ); /* closing last handle, release cache */
  367. return 1; /* ok to close */
  368. }
  369. static void dir_destroy( struct object *obj )
  370. {
  371. struct change_record *record;
  372. struct dir *dir = (struct dir *)obj;
  373. assert (obj->ops == &dir_ops);
  374. if (dir->filter)
  375. remove_change( dir );
  376. if (dir->inode)
  377. {
  378. list_remove( &dir->in_entry );
  379. free_inode( dir->inode );
  380. }
  381. while ((record = get_first_change_record( dir ))) free( record );
  382. release_dir_cache_entry( dir );
  383. release_object( dir->fd );
  384. if (inotify_fd && list_empty( &change_list ))
  385. {
  386. release_object( inotify_fd );
  387. inotify_fd = NULL;
  388. }
  389. }
  390. struct dir *get_dir_obj( struct process *process, obj_handle_t handle, unsigned int access )
  391. {
  392. return (struct dir *)get_handle_obj( process, handle, access, &dir_ops );
  393. }
  394. static int dir_get_poll_events( struct fd *fd )
  395. {
  396. return 0;
  397. }
  398. static enum server_fd_type dir_get_fd_type( struct fd *fd )
  399. {
  400. return FD_TYPE_DIR;
  401. }
  402. #ifdef HAVE_SYS_INOTIFY_H
  403. #define HASH_SIZE 31
  404. struct inode {
  405. struct list ch_entry; /* entry in the children list */
  406. struct list children; /* children of this inode */
  407. struct inode *parent; /* parent of this inode */
  408. struct list dirs; /* directory handles watching this inode */
  409. struct list ino_entry; /* entry in the inode hash */
  410. struct list wd_entry; /* entry in the watch descriptor hash */
  411. dev_t dev; /* device number */
  412. ino_t ino; /* device's inode number */
  413. int wd; /* inotify's watch descriptor */
  414. char *name; /* basename name of the inode */
  415. };
  416. static struct list inode_hash[ HASH_SIZE ];
  417. static struct list wd_hash[ HASH_SIZE ];
  418. static int inotify_add_dir( char *path, unsigned int filter );
  419. static struct inode *inode_from_wd( int wd )
  420. {
  421. struct list *bucket = &wd_hash[ wd % HASH_SIZE ];
  422. struct inode *inode;
  423. LIST_FOR_EACH_ENTRY( inode, bucket, struct inode, wd_entry )
  424. if (inode->wd == wd)
  425. return inode;
  426. return NULL;
  427. }
  428. static inline struct list *get_hash_list( dev_t dev, ino_t ino )
  429. {
  430. return &inode_hash[ (ino ^ dev) % HASH_SIZE ];
  431. }
  432. static struct inode *find_inode( dev_t dev, ino_t ino )
  433. {
  434. struct list *bucket = get_hash_list( dev, ino );
  435. struct inode *inode;
  436. LIST_FOR_EACH_ENTRY( inode, bucket, struct inode, ino_entry )
  437. if (inode->ino == ino && inode->dev == dev)
  438. return inode;
  439. return NULL;
  440. }
  441. static struct inode *create_inode( dev_t dev, ino_t ino )
  442. {
  443. struct inode *inode;
  444. inode = malloc( sizeof *inode );
  445. if (inode)
  446. {
  447. list_init( &inode->children );
  448. list_init( &inode->dirs );
  449. inode->ino = ino;
  450. inode->dev = dev;
  451. inode->wd = -1;
  452. inode->parent = NULL;
  453. inode->name = NULL;
  454. list_add_tail( get_hash_list( dev, ino ), &inode->ino_entry );
  455. }
  456. return inode;
  457. }
  458. static struct inode *get_inode( dev_t dev, ino_t ino )
  459. {
  460. struct inode *inode;
  461. inode = find_inode( dev, ino );
  462. if (inode)
  463. return inode;
  464. return create_inode( dev, ino );
  465. }
  466. static void inode_set_wd( struct inode *inode, int wd )
  467. {
  468. if (inode->wd != -1)
  469. list_remove( &inode->wd_entry );
  470. inode->wd = wd;
  471. list_add_tail( &wd_hash[ wd % HASH_SIZE ], &inode->wd_entry );
  472. }
  473. static void inode_set_name( struct inode *inode, const char *name )
  474. {
  475. free (inode->name);
  476. inode->name = name ? strdup( name ) : NULL;
  477. }
  478. static void free_inode( struct inode *inode )
  479. {
  480. int subtree = 0, watches = 0;
  481. struct inode *tmp, *next;
  482. struct dir *dir;
  483. LIST_FOR_EACH_ENTRY( dir, &inode->dirs, struct dir, in_entry )
  484. {
  485. subtree |= dir->subtree;
  486. watches++;
  487. }
  488. if (!subtree && !inode->parent)
  489. {
  490. LIST_FOR_EACH_ENTRY_SAFE( tmp, next, &inode->children,
  491. struct inode, ch_entry )
  492. {
  493. assert( tmp != inode );
  494. assert( tmp->parent == inode );
  495. free_inode( tmp );
  496. }
  497. }
  498. if (watches)
  499. return;
  500. if (inode->parent)
  501. list_remove( &inode->ch_entry );
  502. /* disconnect remaining children from the parent */
  503. LIST_FOR_EACH_ENTRY_SAFE( tmp, next, &inode->children, struct inode, ch_entry )
  504. {
  505. list_remove( &tmp->ch_entry );
  506. tmp->parent = NULL;
  507. }
  508. if (inode->wd != -1)
  509. {
  510. inotify_rm_watch( get_unix_fd( inotify_fd ), inode->wd );
  511. list_remove( &inode->wd_entry );
  512. }
  513. list_remove( &inode->ino_entry );
  514. free( inode->name );
  515. free( inode );
  516. }
  517. static struct inode *inode_add( struct inode *parent,
  518. dev_t dev, ino_t ino, const char *name )
  519. {
  520. struct inode *inode;
  521. inode = get_inode( dev, ino );
  522. if (!inode)
  523. return NULL;
  524. if (!inode->parent)
  525. {
  526. list_add_tail( &parent->children, &inode->ch_entry );
  527. inode->parent = parent;
  528. assert( inode != parent );
  529. }
  530. inode_set_name( inode, name );
  531. return inode;
  532. }
  533. static struct inode *inode_from_name( struct inode *inode, const char *name )
  534. {
  535. struct inode *i;
  536. LIST_FOR_EACH_ENTRY( i, &inode->children, struct inode, ch_entry )
  537. if (i->name && !strcmp( i->name, name ))
  538. return i;
  539. return NULL;
  540. }
  541. static int inotify_get_poll_events( struct fd *fd );
  542. static void inotify_poll_event( struct fd *fd, int event );
  543. static const struct fd_ops inotify_fd_ops =
  544. {
  545. inotify_get_poll_events, /* get_poll_events */
  546. inotify_poll_event, /* poll_event */
  547. NULL, /* flush */
  548. NULL, /* get_fd_type */
  549. NULL, /* ioctl */
  550. NULL, /* queue_async */
  551. NULL /* reselect_async */
  552. };
  553. static int inotify_get_poll_events( struct fd *fd )
  554. {
  555. return POLLIN;
  556. }
  557. static void inotify_do_change_notify( struct dir *dir, unsigned int action,
  558. unsigned int cookie, const char *relpath )
  559. {
  560. struct change_record *record;
  561. assert( dir->obj.ops == &dir_ops );
  562. if (dir->want_data)
  563. {
  564. size_t len = strlen(relpath);
  565. record = malloc( offsetof(struct change_record, event.name[len]) );
  566. if (!record)
  567. return;
  568. record->cookie = cookie;
  569. record->event.action = action;
  570. memcpy( record->event.name, relpath, len );
  571. record->event.len = len;
  572. list_add_tail( &dir->change_records, &record->entry );
  573. }
  574. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  575. }
  576. static unsigned int filter_from_event( struct inotify_event *ie )
  577. {
  578. unsigned int filter = 0;
  579. if (ie->mask & (IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE | IN_CREATE))
  580. filter |= FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME;
  581. if (ie->mask & IN_MODIFY)
  582. filter |= FILE_NOTIFY_CHANGE_SIZE | FILE_NOTIFY_CHANGE_LAST_WRITE | FILE_NOTIFY_CHANGE_LAST_ACCESS;
  583. if (ie->mask & IN_ATTRIB)
  584. filter |= FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY;
  585. if (ie->mask & IN_CREATE)
  586. filter |= FILE_NOTIFY_CHANGE_CREATION;
  587. if (ie->mask & IN_ISDIR)
  588. filter &= ~FILE_NOTIFY_CHANGE_FILE_NAME;
  589. else
  590. filter &= ~FILE_NOTIFY_CHANGE_DIR_NAME;
  591. return filter;
  592. }
  593. /* scan up the parent directories for watches */
  594. static unsigned int filter_from_inode( struct inode *inode, int is_parent )
  595. {
  596. unsigned int filter = 0;
  597. struct dir *dir;
  598. /* combine filters from parents watching subtrees */
  599. while (inode)
  600. {
  601. LIST_FOR_EACH_ENTRY( dir, &inode->dirs, struct dir, in_entry )
  602. if (dir->subtree || !is_parent)
  603. filter |= dir->filter;
  604. is_parent = 1;
  605. inode = inode->parent;
  606. }
  607. return filter;
  608. }
  609. static char *inode_get_path( struct inode *inode, int sz )
  610. {
  611. struct list *head;
  612. char *path;
  613. int len;
  614. if (!inode)
  615. return NULL;
  616. head = list_head( &inode->dirs );
  617. if (head)
  618. {
  619. int unix_fd = get_unix_fd( LIST_ENTRY( head, struct dir, in_entry )->fd );
  620. path = malloc ( 32 + sz );
  621. if (path)
  622. sprintf( path, "/proc/self/fd/%u/", unix_fd );
  623. return path;
  624. }
  625. if (!inode->name)
  626. return NULL;
  627. len = strlen( inode->name );
  628. path = inode_get_path( inode->parent, sz + len + 1 );
  629. if (!path)
  630. return NULL;
  631. strcat( path, inode->name );
  632. strcat( path, "/" );
  633. return path;
  634. }
  635. static void inode_check_dir( struct inode *parent, const char *name )
  636. {
  637. char *path;
  638. unsigned int filter;
  639. struct inode *inode;
  640. struct stat st;
  641. int wd = -1;
  642. path = inode_get_path( parent, strlen(name) );
  643. if (!path)
  644. return;
  645. strcat( path, name );
  646. if (stat( path, &st ) < 0)
  647. goto end;
  648. filter = filter_from_inode( parent, 1 );
  649. if (!filter)
  650. goto end;
  651. inode = inode_add( parent, st.st_dev, st.st_ino, name );
  652. if (!inode || inode->wd != -1)
  653. goto end;
  654. wd = inotify_add_dir( path, filter );
  655. if (wd != -1)
  656. inode_set_wd( inode, wd );
  657. else
  658. free_inode( inode );
  659. end:
  660. free( path );
  661. }
  662. static int prepend( char **path, const char *segment )
  663. {
  664. int extra;
  665. char *p;
  666. extra = strlen( segment ) + 1;
  667. if (*path)
  668. {
  669. int len = strlen( *path ) + 1;
  670. p = realloc( *path, len + extra );
  671. if (!p) return 0;
  672. memmove( &p[ extra ], p, len );
  673. p[ extra - 1 ] = '/';
  674. memcpy( p, segment, extra - 1 );
  675. }
  676. else
  677. {
  678. p = malloc( extra );
  679. if (!p) return 0;
  680. memcpy( p, segment, extra );
  681. }
  682. *path = p;
  683. return 1;
  684. }
  685. static void inotify_notify_all( struct inotify_event *ie )
  686. {
  687. unsigned int filter, action;
  688. struct inode *inode, *i;
  689. char *path = NULL;
  690. struct dir *dir;
  691. inode = inode_from_wd( ie->wd );
  692. if (!inode)
  693. {
  694. fprintf( stderr, "no inode matches %d\n", ie->wd);
  695. return;
  696. }
  697. filter = filter_from_event( ie );
  698. if (ie->mask & IN_CREATE)
  699. {
  700. if (ie->mask & IN_ISDIR)
  701. inode_check_dir( inode, ie->name );
  702. action = FILE_ACTION_ADDED;
  703. }
  704. else if (ie->mask & IN_DELETE)
  705. action = FILE_ACTION_REMOVED;
  706. else if (ie->mask & IN_MOVED_FROM)
  707. action = FILE_ACTION_RENAMED_OLD_NAME;
  708. else if (ie->mask & IN_MOVED_TO)
  709. action = FILE_ACTION_RENAMED_NEW_NAME;
  710. else
  711. action = FILE_ACTION_MODIFIED;
  712. /*
  713. * Work our way up the inode hierarchy
  714. * extending the relative path as we go
  715. * and notifying all recursive watches.
  716. */
  717. if (!prepend( &path, ie->name ))
  718. return;
  719. for (i = inode; i; i = i->parent)
  720. {
  721. LIST_FOR_EACH_ENTRY( dir, &i->dirs, struct dir, in_entry )
  722. if ((filter & dir->filter) && (i==inode || dir->subtree))
  723. inotify_do_change_notify( dir, action, ie->cookie, path );
  724. if (!i->name || !prepend( &path, i->name ))
  725. break;
  726. }
  727. free( path );
  728. if (ie->mask & IN_DELETE)
  729. {
  730. i = inode_from_name( inode, ie->name );
  731. if (i)
  732. free_inode( i );
  733. }
  734. }
  735. static void inotify_poll_event( struct fd *fd, int event )
  736. {
  737. int r, ofs, unix_fd;
  738. char buffer[0x1000];
  739. struct inotify_event *ie;
  740. unix_fd = get_unix_fd( fd );
  741. r = read( unix_fd, buffer, sizeof buffer );
  742. if (r < 0)
  743. {
  744. fprintf(stderr,"inotify_poll_event(): inotify read failed!\n");
  745. return;
  746. }
  747. for( ofs = 0; ofs < r - offsetof(struct inotify_event, name); )
  748. {
  749. ie = (struct inotify_event*) &buffer[ofs];
  750. ofs += offsetof( struct inotify_event, name[ie->len] );
  751. if (ofs > r) break;
  752. if (ie->len) inotify_notify_all( ie );
  753. }
  754. }
  755. static inline struct fd *create_inotify_fd( void )
  756. {
  757. int unix_fd;
  758. unix_fd = inotify_init();
  759. if (unix_fd<0)
  760. return NULL;
  761. return create_anonymous_fd( &inotify_fd_ops, unix_fd, NULL, 0 );
  762. }
  763. static int map_flags( unsigned int filter )
  764. {
  765. unsigned int mask;
  766. /* always watch these so we can track subdirectories in recursive watches */
  767. mask = (IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF);
  768. if (filter & FILE_NOTIFY_CHANGE_ATTRIBUTES)
  769. mask |= IN_ATTRIB;
  770. if (filter & FILE_NOTIFY_CHANGE_SIZE)
  771. mask |= IN_MODIFY;
  772. if (filter & FILE_NOTIFY_CHANGE_LAST_WRITE)
  773. mask |= IN_MODIFY;
  774. if (filter & FILE_NOTIFY_CHANGE_LAST_ACCESS)
  775. mask |= IN_MODIFY;
  776. if (filter & FILE_NOTIFY_CHANGE_SECURITY)
  777. mask |= IN_ATTRIB;
  778. return mask;
  779. }
  780. static int inotify_add_dir( char *path, unsigned int filter )
  781. {
  782. int wd = inotify_add_watch( get_unix_fd( inotify_fd ),
  783. path, map_flags( filter ) );
  784. if (wd != -1)
  785. set_fd_events( inotify_fd, POLLIN );
  786. return wd;
  787. }
  788. static int init_inotify( void )
  789. {
  790. int i;
  791. if (inotify_fd)
  792. return 1;
  793. inotify_fd = create_inotify_fd();
  794. if (!inotify_fd)
  795. return 0;
  796. for (i=0; i<HASH_SIZE; i++)
  797. {
  798. list_init( &inode_hash[i] );
  799. list_init( &wd_hash[i] );
  800. }
  801. return 1;
  802. }
  803. static int inotify_adjust_changes( struct dir *dir )
  804. {
  805. unsigned int filter;
  806. struct inode *inode;
  807. struct stat st;
  808. char path[32];
  809. int wd, unix_fd;
  810. if (!inotify_fd)
  811. return 0;
  812. unix_fd = get_unix_fd( dir->fd );
  813. inode = dir->inode;
  814. if (!inode)
  815. {
  816. /* check if this fd is already being watched */
  817. if (-1 == fstat( unix_fd, &st ))
  818. return 0;
  819. inode = get_inode( st.st_dev, st.st_ino );
  820. if (!inode)
  821. inode = create_inode( st.st_dev, st.st_ino );
  822. if (!inode)
  823. return 0;
  824. list_add_tail( &inode->dirs, &dir->in_entry );
  825. dir->inode = inode;
  826. }
  827. filter = filter_from_inode( inode, 0 );
  828. sprintf( path, "/proc/self/fd/%u", unix_fd );
  829. wd = inotify_add_dir( path, filter );
  830. if (wd == -1) return 0;
  831. inode_set_wd( inode, wd );
  832. return 1;
  833. }
  834. static char *get_basename( const char *link )
  835. {
  836. char *buffer, *name = NULL;
  837. int r, n = 0x100;
  838. while (1)
  839. {
  840. buffer = malloc( n );
  841. if (!buffer) return NULL;
  842. r = readlink( link, buffer, n );
  843. if (r < 0)
  844. break;
  845. if (r < n)
  846. {
  847. name = buffer;
  848. break;
  849. }
  850. free( buffer );
  851. n *= 2;
  852. }
  853. if (name)
  854. {
  855. while (r > 0 && name[ r - 1 ] == '/' )
  856. r--;
  857. name[ r ] = 0;
  858. name = strrchr( name, '/' );
  859. if (name)
  860. name = strdup( &name[1] );
  861. }
  862. free( buffer );
  863. return name;
  864. }
  865. static int dir_add_to_existing_notify( struct dir *dir )
  866. {
  867. struct inode *inode, *parent;
  868. unsigned int filter = 0;
  869. struct stat st, st_new;
  870. char link[35], *name;
  871. int wd, unix_fd;
  872. if (!inotify_fd)
  873. return 0;
  874. unix_fd = get_unix_fd( dir->fd );
  875. /* check if it's in the list of inodes we want to watch */
  876. if (-1 == fstat( unix_fd, &st_new ))
  877. return 0;
  878. inode = find_inode( st_new.st_dev, st_new.st_ino );
  879. if (inode)
  880. return 0;
  881. /* lookup the parent */
  882. sprintf( link, "/proc/self/fd/%u/..", unix_fd );
  883. if (-1 == stat( link, &st ))
  884. return 0;
  885. /*
  886. * If there's no parent, stop. We could keep going adding
  887. * ../ to the path until we hit the root of the tree or
  888. * find a recursively watched ancestor.
  889. * Assume it's too expensive to search up the tree for now.
  890. */
  891. parent = find_inode( st.st_dev, st.st_ino );
  892. if (!parent)
  893. return 0;
  894. if (parent->wd == -1)
  895. return 0;
  896. filter = filter_from_inode( parent, 1 );
  897. if (!filter)
  898. return 0;
  899. sprintf( link, "/proc/self/fd/%u", unix_fd );
  900. name = get_basename( link );
  901. if (!name)
  902. return 0;
  903. inode = inode_add( parent, st_new.st_dev, st_new.st_ino, name );
  904. free( name );
  905. if (!inode)
  906. return 0;
  907. /* Couldn't find this inode at the start of the function, must be new */
  908. assert( inode->wd == -1 );
  909. wd = inotify_add_dir( link, filter );
  910. if (wd != -1)
  911. inode_set_wd( inode, wd );
  912. return 1;
  913. }
  914. #else
  915. static int init_inotify( void )
  916. {
  917. return 0;
  918. }
  919. static int inotify_adjust_changes( struct dir *dir )
  920. {
  921. return 0;
  922. }
  923. static void free_inode( struct inode *inode )
  924. {
  925. assert( 0 );
  926. }
  927. static int dir_add_to_existing_notify( struct dir *dir )
  928. {
  929. return 0;
  930. }
  931. #endif /* HAVE_SYS_INOTIFY_H */
  932. struct object *create_dir_obj( struct fd *fd, unsigned int access, mode_t mode )
  933. {
  934. struct dir *dir;
  935. dir = alloc_object( &dir_ops );
  936. if (!dir)
  937. return NULL;
  938. list_init( &dir->change_records );
  939. dir->filter = 0;
  940. dir->notified = 0;
  941. dir->want_data = 0;
  942. dir->inode = NULL;
  943. grab_object( fd );
  944. dir->fd = fd;
  945. dir->mode = mode;
  946. dir->uid = ~(uid_t)0;
  947. dir->client_process = NULL;
  948. set_fd_user( fd, &dir_fd_ops, &dir->obj );
  949. dir_add_to_existing_notify( dir );
  950. return &dir->obj;
  951. }
  952. /* retrieve (or allocate) the client-side directory cache entry */
  953. DECL_HANDLER(get_directory_cache_entry)
  954. {
  955. struct dir *dir;
  956. int *free_entries;
  957. data_size_t free_size;
  958. if (!(dir = get_dir_obj( current->process, req->handle, 0 ))) return;
  959. if (!dir->client_process)
  960. {
  961. if ((dir->client_entry = alloc_dir_cache_entry( dir, current->process )) == -1) goto done;
  962. dir->client_process = (struct process *)grab_object( current->process );
  963. }
  964. if (dir->client_process == current->process) reply->entry = dir->client_entry;
  965. else set_error( STATUS_SHARING_VIOLATION );
  966. done: /* allow freeing entries even on failure */
  967. free_size = get_reply_max_size();
  968. free_entries = get_free_dir_cache_entries( current->process, &free_size );
  969. if (free_entries) set_reply_data_ptr( free_entries, free_size );
  970. release_object( dir );
  971. }
  972. /* enable change notifications for a directory */
  973. DECL_HANDLER(read_directory_changes)
  974. {
  975. struct dir *dir;
  976. struct async *async;
  977. if (!req->filter)
  978. {
  979. set_error(STATUS_INVALID_PARAMETER);
  980. return;
  981. }
  982. dir = get_dir_obj( current->process, req->async.handle, 0 );
  983. if (!dir)
  984. return;
  985. /* requests don't timeout */
  986. if (!(async = create_async( dir->fd, current, &req->async, NULL ))) goto end;
  987. fd_queue_async( dir->fd, async, ASYNC_TYPE_WAIT );
  988. /* assign it once */
  989. if (!dir->filter)
  990. {
  991. init_inotify();
  992. insert_change( dir );
  993. dir->filter = req->filter;
  994. dir->subtree = req->subtree;
  995. dir->want_data = req->want_data;
  996. }
  997. /* if there's already a change in the queue, send it */
  998. if (!list_empty( &dir->change_records ))
  999. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  1000. /* setup the real notification */
  1001. if (!inotify_adjust_changes( dir ))
  1002. dnotify_adjust_changes( dir );
  1003. set_error(STATUS_PENDING);
  1004. release_object( async );
  1005. end:
  1006. release_object( dir );
  1007. }
  1008. DECL_HANDLER(read_change)
  1009. {
  1010. struct change_record *record, *next;
  1011. struct dir *dir;
  1012. struct list events;
  1013. char *data, *event;
  1014. int size = 0;
  1015. dir = get_dir_obj( current->process, req->handle, 0 );
  1016. if (!dir)
  1017. return;
  1018. list_init( &events );
  1019. list_move_tail( &events, &dir->change_records );
  1020. release_object( dir );
  1021. if (list_empty( &events ))
  1022. {
  1023. set_error( STATUS_NO_DATA_DETECTED );
  1024. return;
  1025. }
  1026. LIST_FOR_EACH_ENTRY( record, &events, struct change_record, entry )
  1027. {
  1028. size += (offsetof(struct filesystem_event, name[record->event.len])
  1029. + sizeof(int)-1) / sizeof(int) * sizeof(int);
  1030. }
  1031. if (size > get_reply_max_size())
  1032. set_error( STATUS_BUFFER_TOO_SMALL );
  1033. else if ((data = mem_alloc( size )) != NULL)
  1034. {
  1035. event = data;
  1036. LIST_FOR_EACH_ENTRY( record, &events, struct change_record, entry )
  1037. {
  1038. data_size_t len = offsetof( struct filesystem_event, name[record->event.len] );
  1039. /* FIXME: rename events are sometimes reported as delete/create */
  1040. if (record->event.action == FILE_ACTION_RENAMED_OLD_NAME)
  1041. {
  1042. struct list *elem = list_next( &events, &record->entry );
  1043. if (elem)
  1044. next = LIST_ENTRY(elem, struct change_record, entry);
  1045. if (elem && next->cookie == record->cookie)
  1046. next->cookie = 0;
  1047. else
  1048. record->event.action = FILE_ACTION_REMOVED;
  1049. }
  1050. else if (record->event.action == FILE_ACTION_RENAMED_NEW_NAME && record->cookie)
  1051. record->event.action = FILE_ACTION_ADDED;
  1052. memcpy( event, &record->event, len );
  1053. event += len;
  1054. if (len % sizeof(int))
  1055. {
  1056. memset( event, 0, sizeof(int) - len % sizeof(int) );
  1057. event += sizeof(int) - len % sizeof(int);
  1058. }
  1059. }
  1060. set_reply_data_ptr( data, size );
  1061. }
  1062. LIST_FOR_EACH_ENTRY_SAFE( record, next, &events, struct change_record, entry )
  1063. {
  1064. list_remove( &record->entry );
  1065. free( record );
  1066. }
  1067. }