change.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. /*
  2. * Server-side change notification management
  3. *
  4. * Copyright (C) 1998 Alexandre Julliard
  5. * Copyright (C) 2006 Mike McCormack
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with this library; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  20. */
  21. #include "config.h"
  22. #include <assert.h>
  23. #include <fcntl.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <signal.h>
  27. #include <sys/stat.h>
  28. #include <sys/types.h>
  29. #include <limits.h>
  30. #include <dirent.h>
  31. #include <errno.h>
  32. #include <unistd.h>
  33. #include <poll.h>
  34. #ifdef HAVE_SYS_INOTIFY_H
  35. #include <sys/inotify.h>
  36. #endif
  37. #include "ntstatus.h"
  38. #define WIN32_NO_STATUS
  39. #include "windef.h"
  40. #include "file.h"
  41. #include "handle.h"
  42. #include "thread.h"
  43. #include "request.h"
  44. #include "process.h"
  45. #include "security.h"
  46. #include "winternl.h"
  47. /* dnotify support */
  48. #ifdef linux
  49. #ifndef F_NOTIFY
  50. #define F_NOTIFY 1026
  51. #define DN_ACCESS 0x00000001 /* File accessed */
  52. #define DN_MODIFY 0x00000002 /* File modified */
  53. #define DN_CREATE 0x00000004 /* File created */
  54. #define DN_DELETE 0x00000008 /* File removed */
  55. #define DN_RENAME 0x00000010 /* File renamed */
  56. #define DN_ATTRIB 0x00000020 /* File changed attributes */
  57. #define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
  58. #endif
  59. #endif
  60. /* inotify support */
  61. struct inode;
  62. static void free_inode( struct inode *inode );
  63. static struct fd *inotify_fd;
  64. struct change_record {
  65. struct list entry;
  66. unsigned int cookie;
  67. struct filesystem_event event;
  68. };
  69. struct dir
  70. {
  71. struct object obj; /* object header */
  72. struct fd *fd; /* file descriptor to the directory */
  73. mode_t mode; /* file stat.st_mode */
  74. uid_t uid; /* file stat.st_uid */
  75. struct list entry; /* entry in global change notifications list */
  76. unsigned int filter; /* notification filter */
  77. volatile int notified; /* SIGIO counter */
  78. int want_data; /* return change data */
  79. int subtree; /* do we want to watch subdirectories? */
  80. struct list change_records; /* data for the change */
  81. struct list in_entry; /* entry in the inode dirs list */
  82. struct inode *inode; /* inode of the associated directory */
  83. struct process *client_process; /* client process that has a cache for this directory */
  84. int client_entry; /* entry in client process cache */
  85. };
  86. static struct fd *dir_get_fd( struct object *obj );
  87. static struct security_descriptor *dir_get_sd( struct object *obj );
  88. static int dir_set_sd( struct object *obj, const struct security_descriptor *sd,
  89. unsigned int set_info );
  90. static void dir_dump( struct object *obj, int verbose );
  91. static int dir_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  92. static void dir_destroy( struct object *obj );
  93. static const struct object_ops dir_ops =
  94. {
  95. sizeof(struct dir), /* size */
  96. &file_type, /* type */
  97. dir_dump, /* dump */
  98. add_queue, /* add_queue */
  99. remove_queue, /* remove_queue */
  100. default_fd_signaled, /* signaled */
  101. no_satisfied, /* satisfied */
  102. no_signal, /* signal */
  103. dir_get_fd, /* get_fd */
  104. default_map_access, /* map_access */
  105. dir_get_sd, /* get_sd */
  106. dir_set_sd, /* set_sd */
  107. no_get_full_name, /* get_full_name */
  108. no_lookup_name, /* lookup_name */
  109. no_link_name, /* link_name */
  110. NULL, /* unlink_name */
  111. no_open_file, /* open_file */
  112. no_kernel_obj_list, /* get_kernel_obj_list */
  113. default_fd_get_fast_sync, /* get_fast_sync */
  114. dir_close_handle, /* close_handle */
  115. dir_destroy /* destroy */
  116. };
  117. static int dir_get_poll_events( struct fd *fd );
  118. static enum server_fd_type dir_get_fd_type( struct fd *fd );
  119. static const struct fd_ops dir_fd_ops =
  120. {
  121. dir_get_poll_events, /* get_poll_events */
  122. default_poll_event, /* poll_event */
  123. dir_get_fd_type, /* get_fd_type */
  124. no_fd_read, /* read */
  125. no_fd_write, /* write */
  126. no_fd_flush, /* flush */
  127. default_fd_get_file_info, /* get_file_info */
  128. no_fd_get_volume_info, /* get_volume_info */
  129. default_fd_ioctl, /* ioctl */
  130. default_fd_cancel_async, /* cancel_async */
  131. default_fd_queue_async, /* queue_async */
  132. default_fd_reselect_async /* reselect_async */
  133. };
  134. static struct list change_list = LIST_INIT(change_list);
  135. /* per-process structure to keep track of cache entries on the client size */
  136. struct dir_cache
  137. {
  138. unsigned int size;
  139. unsigned int count;
  140. unsigned char state[1];
  141. };
  142. enum dir_cache_state
  143. {
  144. DIR_CACHE_STATE_FREE,
  145. DIR_CACHE_STATE_INUSE,
  146. DIR_CACHE_STATE_RELEASED
  147. };
  148. /* return an array of cache entries that can be freed on the client side */
  149. static int *get_free_dir_cache_entries( struct process *process, data_size_t *size )
  150. {
  151. int *ret;
  152. struct dir_cache *cache = process->dir_cache;
  153. unsigned int i, j, count;
  154. if (!cache) return NULL;
  155. for (i = count = 0; i < cache->count && count < *size / sizeof(*ret); i++)
  156. if (cache->state[i] == DIR_CACHE_STATE_RELEASED) count++;
  157. if (!count) return NULL;
  158. if ((ret = malloc( count * sizeof(*ret) )))
  159. {
  160. for (i = j = 0; j < count; i++)
  161. {
  162. if (cache->state[i] != DIR_CACHE_STATE_RELEASED) continue;
  163. cache->state[i] = DIR_CACHE_STATE_FREE;
  164. ret[j++] = i;
  165. }
  166. *size = count * sizeof(*ret);
  167. }
  168. return ret;
  169. }
  170. /* allocate a new client-side directory cache entry */
  171. static int alloc_dir_cache_entry( struct dir *dir, struct process *process )
  172. {
  173. unsigned int i = 0;
  174. struct dir_cache *cache = process->dir_cache;
  175. if (cache)
  176. for (i = 0; i < cache->count; i++)
  177. if (cache->state[i] == DIR_CACHE_STATE_FREE) goto found;
  178. if (!cache || cache->count == cache->size)
  179. {
  180. unsigned int size = cache ? cache->size * 2 : 256;
  181. if (!(cache = realloc( cache, offsetof( struct dir_cache, state[size] ))))
  182. {
  183. set_error( STATUS_NO_MEMORY );
  184. return -1;
  185. }
  186. process->dir_cache = cache;
  187. cache->size = size;
  188. }
  189. cache->count = i + 1;
  190. found:
  191. cache->state[i] = DIR_CACHE_STATE_INUSE;
  192. return i;
  193. }
  194. /* release a directory cache entry; it will be freed on the client side on the next cache request */
  195. static void release_dir_cache_entry( struct dir *dir )
  196. {
  197. struct dir_cache *cache;
  198. if (!dir->client_process) return;
  199. cache = dir->client_process->dir_cache;
  200. cache->state[dir->client_entry] = DIR_CACHE_STATE_RELEASED;
  201. release_object( dir->client_process );
  202. dir->client_process = NULL;
  203. }
  204. static void dnotify_adjust_changes( struct dir *dir )
  205. {
  206. #if defined(F_SETSIG) && defined(F_NOTIFY)
  207. int fd = get_unix_fd( dir->fd );
  208. unsigned int filter = dir->filter;
  209. unsigned int val;
  210. if ( 0 > fcntl( fd, F_SETSIG, SIGIO) )
  211. return;
  212. val = DN_MULTISHOT;
  213. if (filter & FILE_NOTIFY_CHANGE_FILE_NAME)
  214. val |= DN_RENAME | DN_DELETE | DN_CREATE;
  215. if (filter & FILE_NOTIFY_CHANGE_DIR_NAME)
  216. val |= DN_RENAME | DN_DELETE | DN_CREATE;
  217. if (filter & FILE_NOTIFY_CHANGE_ATTRIBUTES)
  218. val |= DN_ATTRIB;
  219. if (filter & FILE_NOTIFY_CHANGE_SIZE)
  220. val |= DN_MODIFY;
  221. if (filter & FILE_NOTIFY_CHANGE_LAST_WRITE)
  222. val |= DN_MODIFY;
  223. if (filter & FILE_NOTIFY_CHANGE_LAST_ACCESS)
  224. val |= DN_ACCESS;
  225. if (filter & FILE_NOTIFY_CHANGE_CREATION)
  226. val |= DN_CREATE;
  227. if (filter & FILE_NOTIFY_CHANGE_SECURITY)
  228. val |= DN_ATTRIB;
  229. fcntl( fd, F_NOTIFY, val );
  230. #endif
  231. }
  232. /* insert change in the global list */
  233. static inline void insert_change( struct dir *dir )
  234. {
  235. sigset_t sigset;
  236. sigemptyset( &sigset );
  237. sigaddset( &sigset, SIGIO );
  238. sigprocmask( SIG_BLOCK, &sigset, NULL );
  239. list_add_head( &change_list, &dir->entry );
  240. sigprocmask( SIG_UNBLOCK, &sigset, NULL );
  241. }
  242. /* remove change from the global list */
  243. static inline void remove_change( struct dir *dir )
  244. {
  245. sigset_t sigset;
  246. sigemptyset( &sigset );
  247. sigaddset( &sigset, SIGIO );
  248. sigprocmask( SIG_BLOCK, &sigset, NULL );
  249. list_remove( &dir->entry );
  250. sigprocmask( SIG_UNBLOCK, &sigset, NULL );
  251. }
  252. static void dir_dump( struct object *obj, int verbose )
  253. {
  254. struct dir *dir = (struct dir *)obj;
  255. assert( obj->ops == &dir_ops );
  256. fprintf( stderr, "Dirfile fd=%p filter=%08x\n", dir->fd, dir->filter );
  257. }
  258. /* enter here directly from SIGIO signal handler */
  259. void do_change_notify( int unix_fd )
  260. {
  261. struct dir *dir;
  262. /* FIXME: this is O(n) ... probably can be improved */
  263. LIST_FOR_EACH_ENTRY( dir, &change_list, struct dir, entry )
  264. {
  265. if (get_unix_fd( dir->fd ) != unix_fd) continue;
  266. dir->notified = 1;
  267. break;
  268. }
  269. }
  270. /* SIGIO callback, called synchronously with the poll loop */
  271. void sigio_callback(void)
  272. {
  273. struct dir *dir;
  274. LIST_FOR_EACH_ENTRY( dir, &change_list, struct dir, entry )
  275. {
  276. if (!dir->notified) continue;
  277. dir->notified = 0;
  278. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  279. }
  280. }
  281. static struct fd *dir_get_fd( struct object *obj )
  282. {
  283. struct dir *dir = (struct dir *)obj;
  284. assert( obj->ops == &dir_ops );
  285. return (struct fd *)grab_object( dir->fd );
  286. }
  287. static int get_dir_unix_fd( struct dir *dir )
  288. {
  289. return get_unix_fd( dir->fd );
  290. }
  291. static struct security_descriptor *dir_get_sd( struct object *obj )
  292. {
  293. struct dir *dir = (struct dir *)obj;
  294. int unix_fd;
  295. struct stat st;
  296. struct security_descriptor *sd;
  297. assert( obj->ops == &dir_ops );
  298. unix_fd = get_dir_unix_fd( dir );
  299. if (unix_fd == -1 || fstat( unix_fd, &st ) == -1)
  300. return obj->sd;
  301. /* mode and uid the same? if so, no need to re-generate security descriptor */
  302. if (obj->sd &&
  303. (st.st_mode & (S_IRWXU|S_IRWXO)) == (dir->mode & (S_IRWXU|S_IRWXO)) &&
  304. (st.st_uid == dir->uid))
  305. return obj->sd;
  306. sd = mode_to_sd( st.st_mode,
  307. security_unix_uid_to_sid( st.st_uid ),
  308. token_get_primary_group( current->process->token ));
  309. if (!sd) return obj->sd;
  310. dir->mode = st.st_mode;
  311. dir->uid = st.st_uid;
  312. free( obj->sd );
  313. obj->sd = sd;
  314. return sd;
  315. }
  316. static int dir_set_sd( struct object *obj, const struct security_descriptor *sd,
  317. unsigned int set_info )
  318. {
  319. struct dir *dir = (struct dir *)obj;
  320. const struct sid *owner;
  321. struct stat st;
  322. mode_t mode;
  323. int unix_fd;
  324. assert( obj->ops == &dir_ops );
  325. unix_fd = get_dir_unix_fd( dir );
  326. if (unix_fd == -1 || fstat( unix_fd, &st ) == -1) return 1;
  327. if (set_info & OWNER_SECURITY_INFORMATION)
  328. {
  329. owner = sd_get_owner( sd );
  330. if (!owner)
  331. {
  332. set_error( STATUS_INVALID_SECURITY_DESCR );
  333. return 0;
  334. }
  335. if (!obj->sd || !equal_sid( owner, sd_get_owner( obj->sd ) ))
  336. {
  337. /* FIXME: get Unix uid and call fchown */
  338. }
  339. }
  340. else if (obj->sd)
  341. owner = sd_get_owner( obj->sd );
  342. else
  343. owner = token_get_owner( current->process->token );
  344. if (set_info & DACL_SECURITY_INFORMATION)
  345. {
  346. /* keep the bits that we don't map to access rights in the ACL */
  347. mode = st.st_mode & (S_ISUID|S_ISGID|S_ISVTX);
  348. mode |= sd_to_mode( sd, owner );
  349. if (((st.st_mode ^ mode) & (S_IRWXU|S_IRWXG|S_IRWXO)) && fchmod( unix_fd, mode ) == -1)
  350. {
  351. file_set_error();
  352. return 0;
  353. }
  354. }
  355. return 1;
  356. }
  357. static struct change_record *get_first_change_record( struct dir *dir )
  358. {
  359. struct list *ptr = list_head( &dir->change_records );
  360. if (!ptr) return NULL;
  361. list_remove( ptr );
  362. return LIST_ENTRY( ptr, struct change_record, entry );
  363. }
  364. static int dir_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  365. {
  366. struct dir *dir = (struct dir *)obj;
  367. if (obj->handle_count == 1) release_dir_cache_entry( dir ); /* closing last handle, release cache */
  368. return 1; /* ok to close */
  369. }
  370. static void dir_destroy( struct object *obj )
  371. {
  372. struct change_record *record;
  373. struct dir *dir = (struct dir *)obj;
  374. assert (obj->ops == &dir_ops);
  375. if (dir->filter)
  376. remove_change( dir );
  377. if (dir->inode)
  378. {
  379. list_remove( &dir->in_entry );
  380. free_inode( dir->inode );
  381. }
  382. while ((record = get_first_change_record( dir ))) free( record );
  383. release_dir_cache_entry( dir );
  384. release_object( dir->fd );
  385. if (inotify_fd && list_empty( &change_list ))
  386. {
  387. release_object( inotify_fd );
  388. inotify_fd = NULL;
  389. }
  390. }
  391. struct dir *get_dir_obj( struct process *process, obj_handle_t handle, unsigned int access )
  392. {
  393. return (struct dir *)get_handle_obj( process, handle, access, &dir_ops );
  394. }
  395. static int dir_get_poll_events( struct fd *fd )
  396. {
  397. return 0;
  398. }
  399. static enum server_fd_type dir_get_fd_type( struct fd *fd )
  400. {
  401. return FD_TYPE_DIR;
  402. }
  403. #ifdef HAVE_SYS_INOTIFY_H
  404. #define HASH_SIZE 31
  405. struct inode {
  406. struct list ch_entry; /* entry in the children list */
  407. struct list children; /* children of this inode */
  408. struct inode *parent; /* parent of this inode */
  409. struct list dirs; /* directory handles watching this inode */
  410. struct list ino_entry; /* entry in the inode hash */
  411. struct list wd_entry; /* entry in the watch descriptor hash */
  412. dev_t dev; /* device number */
  413. ino_t ino; /* device's inode number */
  414. int wd; /* inotify's watch descriptor */
  415. char *name; /* basename name of the inode */
  416. };
  417. static struct list inode_hash[ HASH_SIZE ];
  418. static struct list wd_hash[ HASH_SIZE ];
  419. static int inotify_add_dir( char *path, unsigned int filter );
  420. static struct inode *inode_from_wd( int wd )
  421. {
  422. struct list *bucket = &wd_hash[ wd % HASH_SIZE ];
  423. struct inode *inode;
  424. LIST_FOR_EACH_ENTRY( inode, bucket, struct inode, wd_entry )
  425. if (inode->wd == wd)
  426. return inode;
  427. return NULL;
  428. }
  429. static inline struct list *get_hash_list( dev_t dev, ino_t ino )
  430. {
  431. return &inode_hash[ (ino ^ dev) % HASH_SIZE ];
  432. }
  433. static struct inode *find_inode( dev_t dev, ino_t ino )
  434. {
  435. struct list *bucket = get_hash_list( dev, ino );
  436. struct inode *inode;
  437. LIST_FOR_EACH_ENTRY( inode, bucket, struct inode, ino_entry )
  438. if (inode->ino == ino && inode->dev == dev)
  439. return inode;
  440. return NULL;
  441. }
  442. static struct inode *create_inode( dev_t dev, ino_t ino )
  443. {
  444. struct inode *inode;
  445. inode = malloc( sizeof *inode );
  446. if (inode)
  447. {
  448. list_init( &inode->children );
  449. list_init( &inode->dirs );
  450. inode->ino = ino;
  451. inode->dev = dev;
  452. inode->wd = -1;
  453. inode->parent = NULL;
  454. inode->name = NULL;
  455. list_add_tail( get_hash_list( dev, ino ), &inode->ino_entry );
  456. }
  457. return inode;
  458. }
  459. static struct inode *get_inode( dev_t dev, ino_t ino )
  460. {
  461. struct inode *inode;
  462. inode = find_inode( dev, ino );
  463. if (inode)
  464. return inode;
  465. return create_inode( dev, ino );
  466. }
  467. static void inode_set_wd( struct inode *inode, int wd )
  468. {
  469. if (inode->wd != -1)
  470. list_remove( &inode->wd_entry );
  471. inode->wd = wd;
  472. list_add_tail( &wd_hash[ wd % HASH_SIZE ], &inode->wd_entry );
  473. }
  474. static void inode_set_name( struct inode *inode, const char *name )
  475. {
  476. free (inode->name);
  477. inode->name = name ? strdup( name ) : NULL;
  478. }
  479. static void free_inode( struct inode *inode )
  480. {
  481. int subtree = 0, watches = 0;
  482. struct inode *tmp, *next;
  483. struct dir *dir;
  484. LIST_FOR_EACH_ENTRY( dir, &inode->dirs, struct dir, in_entry )
  485. {
  486. subtree |= dir->subtree;
  487. watches++;
  488. }
  489. if (!subtree && !inode->parent)
  490. {
  491. LIST_FOR_EACH_ENTRY_SAFE( tmp, next, &inode->children,
  492. struct inode, ch_entry )
  493. {
  494. assert( tmp != inode );
  495. assert( tmp->parent == inode );
  496. free_inode( tmp );
  497. }
  498. }
  499. if (watches)
  500. return;
  501. if (inode->parent)
  502. list_remove( &inode->ch_entry );
  503. /* disconnect remaining children from the parent */
  504. LIST_FOR_EACH_ENTRY_SAFE( tmp, next, &inode->children, struct inode, ch_entry )
  505. {
  506. list_remove( &tmp->ch_entry );
  507. tmp->parent = NULL;
  508. }
  509. if (inode->wd != -1)
  510. {
  511. inotify_rm_watch( get_unix_fd( inotify_fd ), inode->wd );
  512. list_remove( &inode->wd_entry );
  513. }
  514. list_remove( &inode->ino_entry );
  515. free( inode->name );
  516. free( inode );
  517. }
  518. static struct inode *inode_add( struct inode *parent,
  519. dev_t dev, ino_t ino, const char *name )
  520. {
  521. struct inode *inode;
  522. inode = get_inode( dev, ino );
  523. if (!inode)
  524. return NULL;
  525. if (!inode->parent)
  526. {
  527. list_add_tail( &parent->children, &inode->ch_entry );
  528. inode->parent = parent;
  529. assert( inode != parent );
  530. }
  531. inode_set_name( inode, name );
  532. return inode;
  533. }
  534. static struct inode *inode_from_name( struct inode *inode, const char *name )
  535. {
  536. struct inode *i;
  537. LIST_FOR_EACH_ENTRY( i, &inode->children, struct inode, ch_entry )
  538. if (i->name && !strcmp( i->name, name ))
  539. return i;
  540. return NULL;
  541. }
  542. static int inotify_get_poll_events( struct fd *fd );
  543. static void inotify_poll_event( struct fd *fd, int event );
  544. static const struct fd_ops inotify_fd_ops =
  545. {
  546. inotify_get_poll_events, /* get_poll_events */
  547. inotify_poll_event, /* poll_event */
  548. NULL, /* flush */
  549. NULL, /* get_fd_type */
  550. NULL, /* ioctl */
  551. NULL, /* queue_async */
  552. NULL /* reselect_async */
  553. };
  554. static int inotify_get_poll_events( struct fd *fd )
  555. {
  556. return POLLIN;
  557. }
  558. static void inotify_do_change_notify( struct dir *dir, unsigned int action,
  559. unsigned int cookie, const char *relpath )
  560. {
  561. struct change_record *record;
  562. assert( dir->obj.ops == &dir_ops );
  563. if (dir->want_data)
  564. {
  565. size_t len = strlen(relpath);
  566. record = malloc( offsetof(struct change_record, event.name[len]) );
  567. if (!record)
  568. return;
  569. record->cookie = cookie;
  570. record->event.action = action;
  571. memcpy( record->event.name, relpath, len );
  572. record->event.len = len;
  573. list_add_tail( &dir->change_records, &record->entry );
  574. }
  575. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  576. }
  577. static unsigned int filter_from_event( struct inotify_event *ie )
  578. {
  579. unsigned int filter = 0;
  580. if (ie->mask & (IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE | IN_CREATE))
  581. filter |= FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME;
  582. if (ie->mask & IN_MODIFY)
  583. filter |= FILE_NOTIFY_CHANGE_SIZE | FILE_NOTIFY_CHANGE_LAST_WRITE | FILE_NOTIFY_CHANGE_LAST_ACCESS;
  584. if (ie->mask & IN_ATTRIB)
  585. filter |= FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY;
  586. if (ie->mask & IN_CREATE)
  587. filter |= FILE_NOTIFY_CHANGE_CREATION;
  588. if (ie->mask & IN_ISDIR)
  589. filter &= ~FILE_NOTIFY_CHANGE_FILE_NAME;
  590. else
  591. filter &= ~FILE_NOTIFY_CHANGE_DIR_NAME;
  592. return filter;
  593. }
  594. /* scan up the parent directories for watches */
  595. static unsigned int filter_from_inode( struct inode *inode, int is_parent )
  596. {
  597. unsigned int filter = 0;
  598. struct dir *dir;
  599. /* combine filters from parents watching subtrees */
  600. while (inode)
  601. {
  602. LIST_FOR_EACH_ENTRY( dir, &inode->dirs, struct dir, in_entry )
  603. if (dir->subtree || !is_parent)
  604. filter |= dir->filter;
  605. is_parent = 1;
  606. inode = inode->parent;
  607. }
  608. return filter;
  609. }
  610. static char *get_path_from_fd( int fd, int sz )
  611. {
  612. #ifdef linux
  613. char *ret = malloc( 32 + sz );
  614. if (ret) snprintf( ret, 32 + sz, "/proc/self/fd/%u", fd );
  615. return ret;
  616. #elif defined(F_GETPATH)
  617. char *ret = malloc( PATH_MAX + sz );
  618. if (!ret) return NULL;
  619. if (!fcntl( fd, F_GETPATH, ret )) return ret;
  620. free( ret );
  621. return NULL;
  622. #else
  623. return NULL;
  624. #endif
  625. }
  626. static char *inode_get_path( struct inode *inode, int sz )
  627. {
  628. struct list *head;
  629. char *path;
  630. int len;
  631. if (!inode)
  632. return NULL;
  633. head = list_head( &inode->dirs );
  634. if (head)
  635. {
  636. int unix_fd = get_unix_fd( LIST_ENTRY( head, struct dir, in_entry )->fd );
  637. if (!(path = get_path_from_fd( unix_fd, sz + 1 ))) return NULL;
  638. strcat( path, "/" );
  639. return path;
  640. }
  641. if (!inode->name)
  642. return NULL;
  643. len = strlen( inode->name );
  644. path = inode_get_path( inode->parent, sz + len + 1 );
  645. if (!path)
  646. return NULL;
  647. strcat( path, inode->name );
  648. strcat( path, "/" );
  649. return path;
  650. }
  651. static void inode_check_dir( struct inode *parent, const char *name )
  652. {
  653. char *path;
  654. unsigned int filter;
  655. struct inode *inode;
  656. struct stat st;
  657. int wd = -1;
  658. path = inode_get_path( parent, strlen(name) );
  659. if (!path)
  660. return;
  661. strcat( path, name );
  662. if (stat( path, &st ) < 0)
  663. goto end;
  664. filter = filter_from_inode( parent, 1 );
  665. if (!filter)
  666. goto end;
  667. inode = inode_add( parent, st.st_dev, st.st_ino, name );
  668. if (!inode || inode->wd != -1)
  669. goto end;
  670. wd = inotify_add_dir( path, filter );
  671. if (wd != -1)
  672. inode_set_wd( inode, wd );
  673. else
  674. free_inode( inode );
  675. end:
  676. free( path );
  677. }
  678. static int prepend( char **path, const char *segment )
  679. {
  680. int extra;
  681. char *p;
  682. extra = strlen( segment ) + 1;
  683. if (*path)
  684. {
  685. int len = strlen( *path ) + 1;
  686. p = realloc( *path, len + extra );
  687. if (!p) return 0;
  688. memmove( &p[ extra ], p, len );
  689. p[ extra - 1 ] = '/';
  690. memcpy( p, segment, extra - 1 );
  691. }
  692. else
  693. {
  694. p = malloc( extra );
  695. if (!p) return 0;
  696. memcpy( p, segment, extra );
  697. }
  698. *path = p;
  699. return 1;
  700. }
  701. static void inotify_notify_all( struct inotify_event *ie )
  702. {
  703. unsigned int filter, action;
  704. struct inode *inode, *i;
  705. char *path = NULL;
  706. struct dir *dir;
  707. inode = inode_from_wd( ie->wd );
  708. if (!inode)
  709. {
  710. fprintf( stderr, "no inode matches %d\n", ie->wd);
  711. return;
  712. }
  713. filter = filter_from_event( ie );
  714. if (ie->mask & IN_CREATE)
  715. {
  716. if (ie->mask & IN_ISDIR)
  717. inode_check_dir( inode, ie->name );
  718. action = FILE_ACTION_ADDED;
  719. }
  720. else if (ie->mask & IN_DELETE)
  721. action = FILE_ACTION_REMOVED;
  722. else if (ie->mask & IN_MOVED_FROM)
  723. action = FILE_ACTION_RENAMED_OLD_NAME;
  724. else if (ie->mask & IN_MOVED_TO)
  725. action = FILE_ACTION_RENAMED_NEW_NAME;
  726. else
  727. action = FILE_ACTION_MODIFIED;
  728. /*
  729. * Work our way up the inode hierarchy
  730. * extending the relative path as we go
  731. * and notifying all recursive watches.
  732. */
  733. if (!prepend( &path, ie->name ))
  734. return;
  735. for (i = inode; i; i = i->parent)
  736. {
  737. LIST_FOR_EACH_ENTRY( dir, &i->dirs, struct dir, in_entry )
  738. if ((filter & dir->filter) && (i==inode || dir->subtree))
  739. inotify_do_change_notify( dir, action, ie->cookie, path );
  740. if (!i->name || !prepend( &path, i->name ))
  741. break;
  742. }
  743. free( path );
  744. if (ie->mask & IN_DELETE)
  745. {
  746. i = inode_from_name( inode, ie->name );
  747. if (i)
  748. free_inode( i );
  749. }
  750. }
  751. static void inotify_poll_event( struct fd *fd, int event )
  752. {
  753. int r, ofs, unix_fd;
  754. char buffer[0x1000];
  755. struct inotify_event *ie;
  756. unix_fd = get_unix_fd( fd );
  757. r = read( unix_fd, buffer, sizeof buffer );
  758. if (r < 0)
  759. {
  760. fprintf(stderr,"inotify_poll_event(): inotify read failed!\n");
  761. return;
  762. }
  763. for( ofs = 0; ofs < r - offsetof(struct inotify_event, name); )
  764. {
  765. ie = (struct inotify_event*) &buffer[ofs];
  766. ofs += offsetof( struct inotify_event, name[ie->len] );
  767. if (ofs > r) break;
  768. if (ie->len) inotify_notify_all( ie );
  769. }
  770. }
  771. static inline struct fd *create_inotify_fd( void )
  772. {
  773. int unix_fd;
  774. unix_fd = inotify_init();
  775. if (unix_fd<0)
  776. return NULL;
  777. return create_anonymous_fd( &inotify_fd_ops, unix_fd, NULL, 0 );
  778. }
  779. static int map_flags( unsigned int filter )
  780. {
  781. unsigned int mask;
  782. /* always watch these so we can track subdirectories in recursive watches */
  783. mask = (IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF);
  784. if (filter & FILE_NOTIFY_CHANGE_ATTRIBUTES)
  785. mask |= IN_ATTRIB;
  786. if (filter & FILE_NOTIFY_CHANGE_SIZE)
  787. mask |= IN_MODIFY;
  788. if (filter & FILE_NOTIFY_CHANGE_LAST_WRITE)
  789. mask |= IN_MODIFY;
  790. if (filter & FILE_NOTIFY_CHANGE_LAST_ACCESS)
  791. mask |= IN_MODIFY;
  792. if (filter & FILE_NOTIFY_CHANGE_SECURITY)
  793. mask |= IN_ATTRIB;
  794. return mask;
  795. }
  796. static int inotify_add_dir( char *path, unsigned int filter )
  797. {
  798. int wd = inotify_add_watch( get_unix_fd( inotify_fd ),
  799. path, map_flags( filter ) );
  800. if (wd != -1)
  801. set_fd_events( inotify_fd, POLLIN );
  802. return wd;
  803. }
  804. static int init_inotify( void )
  805. {
  806. int i;
  807. if (inotify_fd)
  808. return 1;
  809. inotify_fd = create_inotify_fd();
  810. if (!inotify_fd)
  811. return 0;
  812. for (i=0; i<HASH_SIZE; i++)
  813. {
  814. list_init( &inode_hash[i] );
  815. list_init( &wd_hash[i] );
  816. }
  817. return 1;
  818. }
  819. static int inotify_adjust_changes( struct dir *dir )
  820. {
  821. unsigned int filter;
  822. struct inode *inode;
  823. struct stat st;
  824. char *path;
  825. int wd, unix_fd;
  826. if (!inotify_fd)
  827. return 0;
  828. unix_fd = get_unix_fd( dir->fd );
  829. inode = dir->inode;
  830. if (!inode)
  831. {
  832. /* check if this fd is already being watched */
  833. if (-1 == fstat( unix_fd, &st ))
  834. return 0;
  835. inode = get_inode( st.st_dev, st.st_ino );
  836. if (!inode)
  837. inode = create_inode( st.st_dev, st.st_ino );
  838. if (!inode)
  839. return 0;
  840. list_add_tail( &inode->dirs, &dir->in_entry );
  841. dir->inode = inode;
  842. }
  843. filter = filter_from_inode( inode, 0 );
  844. if (!(path = get_path_from_fd( unix_fd, 0 ))) return 0;
  845. wd = inotify_add_dir( path, filter );
  846. free( path );
  847. if (wd == -1) return 0;
  848. inode_set_wd( inode, wd );
  849. return 1;
  850. }
  851. static char *get_basename( const char *link )
  852. {
  853. char *buffer, *name = NULL;
  854. int r, n = 0x100;
  855. while (1)
  856. {
  857. buffer = malloc( n );
  858. if (!buffer) return NULL;
  859. r = readlink( link, buffer, n );
  860. if (r < 0)
  861. break;
  862. if (r < n)
  863. {
  864. name = buffer;
  865. break;
  866. }
  867. free( buffer );
  868. n *= 2;
  869. }
  870. if (name)
  871. {
  872. while (r > 0 && name[ r - 1 ] == '/' )
  873. r--;
  874. name[ r ] = 0;
  875. name = strrchr( name, '/' );
  876. if (name)
  877. name = strdup( &name[1] );
  878. }
  879. free( buffer );
  880. return name;
  881. }
  882. static void dir_add_to_existing_notify( struct dir *dir )
  883. {
  884. struct inode *inode, *parent;
  885. unsigned int filter = 0;
  886. struct stat st, st_new;
  887. char *link, *name;
  888. int res, wd, unix_fd;
  889. if (!inotify_fd)
  890. return;
  891. unix_fd = get_unix_fd( dir->fd );
  892. /* check if it's in the list of inodes we want to watch */
  893. if (fstat( unix_fd, &st_new )) return;
  894. if ((inode = find_inode( st_new.st_dev, st_new.st_ino ))) return;
  895. /* lookup the parent */
  896. if (!(link = get_path_from_fd( unix_fd, 3 ))) return;
  897. strcat( link, "/.." );
  898. res = stat( link, &st );
  899. free( link );
  900. if (res == -1) return;
  901. /*
  902. * If there's no parent, stop. We could keep going adding
  903. * ../ to the path until we hit the root of the tree or
  904. * find a recursively watched ancestor.
  905. * Assume it's too expensive to search up the tree for now.
  906. */
  907. if (!(parent = find_inode( st.st_dev, st.st_ino ))) return;
  908. if (parent->wd == -1) return;
  909. if (!(filter = filter_from_inode( parent, 1 ))) return;
  910. if (!(link = get_path_from_fd( unix_fd, 0 ))) return;
  911. if (!(name = get_basename( link )))
  912. {
  913. free( link );
  914. return;
  915. }
  916. inode = inode_add( parent, st_new.st_dev, st_new.st_ino, name );
  917. if (inode)
  918. {
  919. /* Couldn't find this inode at the start of the function, must be new */
  920. assert( inode->wd == -1 );
  921. wd = inotify_add_dir( link, filter );
  922. if (wd != -1)
  923. inode_set_wd( inode, wd );
  924. }
  925. free( name );
  926. free( link );
  927. }
  928. #else
  929. static int init_inotify( void )
  930. {
  931. return 0;
  932. }
  933. static int inotify_adjust_changes( struct dir *dir )
  934. {
  935. return 0;
  936. }
  937. static void free_inode( struct inode *inode )
  938. {
  939. assert( 0 );
  940. }
  941. static void dir_add_to_existing_notify( struct dir *dir )
  942. {
  943. }
  944. #endif /* HAVE_SYS_INOTIFY_H */
  945. struct object *create_dir_obj( struct fd *fd, unsigned int access, mode_t mode )
  946. {
  947. struct dir *dir;
  948. dir = alloc_object( &dir_ops );
  949. if (!dir)
  950. return NULL;
  951. list_init( &dir->change_records );
  952. dir->filter = 0;
  953. dir->notified = 0;
  954. dir->want_data = 0;
  955. dir->inode = NULL;
  956. grab_object( fd );
  957. dir->fd = fd;
  958. dir->mode = mode;
  959. dir->uid = ~(uid_t)0;
  960. dir->client_process = NULL;
  961. set_fd_user( fd, &dir_fd_ops, &dir->obj );
  962. dir_add_to_existing_notify( dir );
  963. return &dir->obj;
  964. }
  965. /* retrieve (or allocate) the client-side directory cache entry */
  966. DECL_HANDLER(get_directory_cache_entry)
  967. {
  968. struct dir *dir;
  969. int *free_entries;
  970. data_size_t free_size;
  971. if (!(dir = get_dir_obj( current->process, req->handle, 0 ))) return;
  972. if (!dir->client_process)
  973. {
  974. if ((dir->client_entry = alloc_dir_cache_entry( dir, current->process )) == -1) goto done;
  975. dir->client_process = (struct process *)grab_object( current->process );
  976. }
  977. if (dir->client_process == current->process) reply->entry = dir->client_entry;
  978. else set_error( STATUS_SHARING_VIOLATION );
  979. done: /* allow freeing entries even on failure */
  980. free_size = get_reply_max_size();
  981. free_entries = get_free_dir_cache_entries( current->process, &free_size );
  982. if (free_entries) set_reply_data_ptr( free_entries, free_size );
  983. release_object( dir );
  984. }
  985. /* enable change notifications for a directory */
  986. DECL_HANDLER(read_directory_changes)
  987. {
  988. struct dir *dir;
  989. struct async *async;
  990. if (!req->filter)
  991. {
  992. set_error(STATUS_INVALID_PARAMETER);
  993. return;
  994. }
  995. dir = get_dir_obj( current->process, req->async.handle, 0 );
  996. if (!dir)
  997. return;
  998. /* requests don't timeout */
  999. if (!(async = create_async( dir->fd, current, &req->async, NULL ))) goto end;
  1000. fd_queue_async( dir->fd, async, ASYNC_TYPE_WAIT );
  1001. /* assign it once */
  1002. if (!dir->filter)
  1003. {
  1004. init_inotify();
  1005. insert_change( dir );
  1006. dir->filter = req->filter;
  1007. dir->subtree = req->subtree;
  1008. dir->want_data = req->want_data;
  1009. }
  1010. /* if there's already a change in the queue, send it */
  1011. if (!list_empty( &dir->change_records ))
  1012. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  1013. /* setup the real notification */
  1014. if (!inotify_adjust_changes( dir ))
  1015. dnotify_adjust_changes( dir );
  1016. set_error(STATUS_PENDING);
  1017. release_object( async );
  1018. end:
  1019. release_object( dir );
  1020. }
  1021. DECL_HANDLER(read_change)
  1022. {
  1023. struct change_record *record, *next;
  1024. struct dir *dir;
  1025. struct list events;
  1026. char *data, *event;
  1027. int size = 0;
  1028. dir = get_dir_obj( current->process, req->handle, 0 );
  1029. if (!dir)
  1030. return;
  1031. list_init( &events );
  1032. list_move_tail( &events, &dir->change_records );
  1033. release_object( dir );
  1034. if (list_empty( &events ))
  1035. {
  1036. set_error( STATUS_NO_DATA_DETECTED );
  1037. return;
  1038. }
  1039. LIST_FOR_EACH_ENTRY( record, &events, struct change_record, entry )
  1040. {
  1041. size += (offsetof(struct filesystem_event, name[record->event.len])
  1042. + sizeof(int)-1) / sizeof(int) * sizeof(int);
  1043. }
  1044. if (size > get_reply_max_size())
  1045. set_error( STATUS_BUFFER_TOO_SMALL );
  1046. else if ((data = mem_alloc( size )) != NULL)
  1047. {
  1048. event = data;
  1049. LIST_FOR_EACH_ENTRY( record, &events, struct change_record, entry )
  1050. {
  1051. data_size_t len = offsetof( struct filesystem_event, name[record->event.len] );
  1052. /* FIXME: rename events are sometimes reported as delete/create */
  1053. if (record->event.action == FILE_ACTION_RENAMED_OLD_NAME)
  1054. {
  1055. struct list *elem = list_next( &events, &record->entry );
  1056. if (elem)
  1057. next = LIST_ENTRY(elem, struct change_record, entry);
  1058. if (elem && next->cookie == record->cookie)
  1059. next->cookie = 0;
  1060. else
  1061. record->event.action = FILE_ACTION_REMOVED;
  1062. }
  1063. else if (record->event.action == FILE_ACTION_RENAMED_NEW_NAME && record->cookie)
  1064. record->event.action = FILE_ACTION_ADDED;
  1065. memcpy( event, &record->event, len );
  1066. event += len;
  1067. if (len % sizeof(int))
  1068. {
  1069. memset( event, 0, sizeof(int) - len % sizeof(int) );
  1070. event += sizeof(int) - len % sizeof(int);
  1071. }
  1072. }
  1073. set_reply_data_ptr( data, size );
  1074. }
  1075. LIST_FOR_EACH_ENTRY_SAFE( record, next, &events, struct change_record, entry )
  1076. {
  1077. list_remove( &record->entry );
  1078. free( record );
  1079. }
  1080. }