flock.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. /* AFS file locking support
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include "internal.h"
  12. #define AFS_LOCK_GRANTED 0
  13. #define AFS_LOCK_PENDING 1
  14. static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
  15. static void afs_fl_release_private(struct file_lock *fl);
  16. static struct workqueue_struct *afs_lock_manager;
  17. static DEFINE_MUTEX(afs_lock_manager_mutex);
  18. static const struct file_lock_operations afs_lock_ops = {
  19. .fl_copy_lock = afs_fl_copy_lock,
  20. .fl_release_private = afs_fl_release_private,
  21. };
  22. /*
  23. * initialise the lock manager thread if it isn't already running
  24. */
  25. static int afs_init_lock_manager(void)
  26. {
  27. int ret;
  28. ret = 0;
  29. if (!afs_lock_manager) {
  30. mutex_lock(&afs_lock_manager_mutex);
  31. if (!afs_lock_manager) {
  32. afs_lock_manager = alloc_workqueue("kafs_lockd",
  33. WQ_MEM_RECLAIM, 0);
  34. if (!afs_lock_manager)
  35. ret = -ENOMEM;
  36. }
  37. mutex_unlock(&afs_lock_manager_mutex);
  38. }
  39. return ret;
  40. }
  41. /*
  42. * destroy the lock manager thread if it's running
  43. */
  44. void __exit afs_kill_lock_manager(void)
  45. {
  46. if (afs_lock_manager)
  47. destroy_workqueue(afs_lock_manager);
  48. }
  49. /*
  50. * if the callback is broken on this vnode, then the lock may now be available
  51. */
  52. void afs_lock_may_be_available(struct afs_vnode *vnode)
  53. {
  54. _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
  55. queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
  56. }
  57. /*
  58. * the lock will time out in 5 minutes unless we extend it, so schedule
  59. * extension in a bit less than that time
  60. */
  61. static void afs_schedule_lock_extension(struct afs_vnode *vnode)
  62. {
  63. queue_delayed_work(afs_lock_manager, &vnode->lock_work,
  64. AFS_LOCKWAIT * HZ / 2);
  65. }
  66. /*
  67. * grant one or more locks (readlocks are allowed to jump the queue if the
  68. * first lock in the queue is itself a readlock)
  69. * - the caller must hold the vnode lock
  70. */
  71. static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl)
  72. {
  73. struct file_lock *p, *_p;
  74. list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
  75. if (fl->fl_type == F_RDLCK) {
  76. list_for_each_entry_safe(p, _p, &vnode->pending_locks,
  77. fl_u.afs.link) {
  78. if (p->fl_type == F_RDLCK) {
  79. p->fl_u.afs.state = AFS_LOCK_GRANTED;
  80. list_move_tail(&p->fl_u.afs.link,
  81. &vnode->granted_locks);
  82. wake_up(&p->fl_wait);
  83. }
  84. }
  85. }
  86. }
  87. /*
  88. * do work for a lock, including:
  89. * - probing for a lock we're waiting on but didn't get immediately
  90. * - extending a lock that's close to timing out
  91. */
  92. void afs_lock_work(struct work_struct *work)
  93. {
  94. struct afs_vnode *vnode =
  95. container_of(work, struct afs_vnode, lock_work.work);
  96. struct file_lock *fl;
  97. afs_lock_type_t type;
  98. struct key *key;
  99. int ret;
  100. _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
  101. spin_lock(&vnode->lock);
  102. if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) {
  103. _debug("unlock");
  104. spin_unlock(&vnode->lock);
  105. /* attempt to release the server lock; if it fails, we just
  106. * wait 5 minutes and it'll time out anyway */
  107. ret = afs_vnode_release_lock(vnode, vnode->unlock_key);
  108. if (ret < 0)
  109. printk(KERN_WARNING "AFS:"
  110. " Failed to release lock on {%x:%x} error %d\n",
  111. vnode->fid.vid, vnode->fid.vnode, ret);
  112. spin_lock(&vnode->lock);
  113. key_put(vnode->unlock_key);
  114. vnode->unlock_key = NULL;
  115. clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags);
  116. }
  117. /* if we've got a lock, then it must be time to extend that lock as AFS
  118. * locks time out after 5 minutes */
  119. if (!list_empty(&vnode->granted_locks)) {
  120. _debug("extend");
  121. if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
  122. BUG();
  123. fl = list_entry(vnode->granted_locks.next,
  124. struct file_lock, fl_u.afs.link);
  125. key = key_get(fl->fl_file->private_data);
  126. spin_unlock(&vnode->lock);
  127. ret = afs_vnode_extend_lock(vnode, key);
  128. clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
  129. key_put(key);
  130. switch (ret) {
  131. case 0:
  132. afs_schedule_lock_extension(vnode);
  133. break;
  134. default:
  135. /* ummm... we failed to extend the lock - retry
  136. * extension shortly */
  137. printk(KERN_WARNING "AFS:"
  138. " Failed to extend lock on {%x:%x} error %d\n",
  139. vnode->fid.vid, vnode->fid.vnode, ret);
  140. queue_delayed_work(afs_lock_manager, &vnode->lock_work,
  141. HZ * 10);
  142. break;
  143. }
  144. _leave(" [extend]");
  145. return;
  146. }
  147. /* if we don't have a granted lock, then we must've been called back by
  148. * the server, and so if might be possible to get a lock we're
  149. * currently waiting for */
  150. if (!list_empty(&vnode->pending_locks)) {
  151. _debug("get");
  152. if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
  153. BUG();
  154. fl = list_entry(vnode->pending_locks.next,
  155. struct file_lock, fl_u.afs.link);
  156. key = key_get(fl->fl_file->private_data);
  157. type = (fl->fl_type == F_RDLCK) ?
  158. AFS_LOCK_READ : AFS_LOCK_WRITE;
  159. spin_unlock(&vnode->lock);
  160. ret = afs_vnode_set_lock(vnode, key, type);
  161. clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
  162. switch (ret) {
  163. case -EWOULDBLOCK:
  164. _debug("blocked");
  165. break;
  166. case 0:
  167. _debug("acquired");
  168. if (type == AFS_LOCK_READ)
  169. set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
  170. else
  171. set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
  172. ret = AFS_LOCK_GRANTED;
  173. default:
  174. spin_lock(&vnode->lock);
  175. /* the pending lock may have been withdrawn due to a
  176. * signal */
  177. if (list_entry(vnode->pending_locks.next,
  178. struct file_lock, fl_u.afs.link) == fl) {
  179. fl->fl_u.afs.state = ret;
  180. if (ret == AFS_LOCK_GRANTED)
  181. afs_grant_locks(vnode, fl);
  182. else
  183. list_del_init(&fl->fl_u.afs.link);
  184. wake_up(&fl->fl_wait);
  185. spin_unlock(&vnode->lock);
  186. } else {
  187. _debug("withdrawn");
  188. clear_bit(AFS_VNODE_READLOCKED, &vnode->flags);
  189. clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
  190. spin_unlock(&vnode->lock);
  191. afs_vnode_release_lock(vnode, key);
  192. if (!list_empty(&vnode->pending_locks))
  193. afs_lock_may_be_available(vnode);
  194. }
  195. break;
  196. }
  197. key_put(key);
  198. _leave(" [pend]");
  199. return;
  200. }
  201. /* looks like the lock request was withdrawn on a signal */
  202. spin_unlock(&vnode->lock);
  203. _leave(" [no locks]");
  204. }
  205. /*
  206. * pass responsibility for the unlocking of a vnode on the server to the
  207. * manager thread, lest a pending signal in the calling thread interrupt
  208. * AF_RXRPC
  209. * - the caller must hold the vnode lock
  210. */
  211. static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
  212. {
  213. cancel_delayed_work(&vnode->lock_work);
  214. if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) &&
  215. !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
  216. BUG();
  217. if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags))
  218. BUG();
  219. vnode->unlock_key = key_get(key);
  220. afs_lock_may_be_available(vnode);
  221. }
  222. /*
  223. * request a lock on a file on the server
  224. */
  225. static int afs_do_setlk(struct file *file, struct file_lock *fl)
  226. {
  227. struct inode *inode = file_inode(file);
  228. struct afs_vnode *vnode = AFS_FS_I(inode);
  229. afs_lock_type_t type;
  230. struct key *key = file->private_data;
  231. int ret;
  232. _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
  233. /* only whole-file locks are supported */
  234. if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
  235. return -EINVAL;
  236. ret = afs_init_lock_manager();
  237. if (ret < 0)
  238. return ret;
  239. fl->fl_ops = &afs_lock_ops;
  240. INIT_LIST_HEAD(&fl->fl_u.afs.link);
  241. fl->fl_u.afs.state = AFS_LOCK_PENDING;
  242. type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
  243. spin_lock(&inode->i_lock);
  244. /* make sure we've got a callback on this file and that our view of the
  245. * data version is up to date */
  246. ret = afs_vnode_fetch_status(vnode, NULL, key);
  247. if (ret < 0)
  248. goto error;
  249. if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) {
  250. ret = -EAGAIN;
  251. goto error;
  252. }
  253. spin_lock(&vnode->lock);
  254. /* if we've already got a readlock on the server then we can instantly
  255. * grant another readlock, irrespective of whether there are any
  256. * pending writelocks */
  257. if (type == AFS_LOCK_READ &&
  258. vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
  259. _debug("instant readlock");
  260. ASSERTCMP(vnode->flags &
  261. ((1 << AFS_VNODE_LOCKING) |
  262. (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
  263. ASSERT(!list_empty(&vnode->granted_locks));
  264. goto sharing_existing_lock;
  265. }
  266. /* if there's no-one else with a lock on this vnode, then we need to
  267. * ask the server for a lock */
  268. if (list_empty(&vnode->pending_locks) &&
  269. list_empty(&vnode->granted_locks)) {
  270. _debug("not locked");
  271. ASSERTCMP(vnode->flags &
  272. ((1 << AFS_VNODE_LOCKING) |
  273. (1 << AFS_VNODE_READLOCKED) |
  274. (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
  275. list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
  276. set_bit(AFS_VNODE_LOCKING, &vnode->flags);
  277. spin_unlock(&vnode->lock);
  278. ret = afs_vnode_set_lock(vnode, key, type);
  279. clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
  280. switch (ret) {
  281. case 0:
  282. _debug("acquired");
  283. goto acquired_server_lock;
  284. case -EWOULDBLOCK:
  285. _debug("would block");
  286. spin_lock(&vnode->lock);
  287. ASSERT(list_empty(&vnode->granted_locks));
  288. ASSERTCMP(vnode->pending_locks.next, ==,
  289. &fl->fl_u.afs.link);
  290. goto wait;
  291. default:
  292. spin_lock(&vnode->lock);
  293. list_del_init(&fl->fl_u.afs.link);
  294. spin_unlock(&vnode->lock);
  295. goto error;
  296. }
  297. }
  298. /* otherwise, we need to wait for a local lock to become available */
  299. _debug("wait local");
  300. list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
  301. wait:
  302. if (!(fl->fl_flags & FL_SLEEP)) {
  303. _debug("noblock");
  304. ret = -EAGAIN;
  305. goto abort_attempt;
  306. }
  307. spin_unlock(&vnode->lock);
  308. /* now we need to sleep and wait for the lock manager thread to get the
  309. * lock from the server */
  310. _debug("sleep");
  311. ret = wait_event_interruptible(fl->fl_wait,
  312. fl->fl_u.afs.state <= AFS_LOCK_GRANTED);
  313. if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
  314. ret = fl->fl_u.afs.state;
  315. if (ret < 0)
  316. goto error;
  317. spin_lock(&vnode->lock);
  318. goto given_lock;
  319. }
  320. /* we were interrupted, but someone may still be in the throes of
  321. * giving us the lock */
  322. _debug("intr");
  323. ASSERTCMP(ret, ==, -ERESTARTSYS);
  324. spin_lock(&vnode->lock);
  325. if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
  326. ret = fl->fl_u.afs.state;
  327. if (ret < 0) {
  328. spin_unlock(&vnode->lock);
  329. goto error;
  330. }
  331. goto given_lock;
  332. }
  333. abort_attempt:
  334. /* we aren't going to get the lock, either because we're unwilling to
  335. * wait, or because some signal happened */
  336. _debug("abort");
  337. if (list_empty(&vnode->granted_locks) &&
  338. vnode->pending_locks.next == &fl->fl_u.afs.link) {
  339. if (vnode->pending_locks.prev != &fl->fl_u.afs.link) {
  340. /* kick the next pending lock into having a go */
  341. list_del_init(&fl->fl_u.afs.link);
  342. afs_lock_may_be_available(vnode);
  343. }
  344. } else {
  345. list_del_init(&fl->fl_u.afs.link);
  346. }
  347. spin_unlock(&vnode->lock);
  348. goto error;
  349. acquired_server_lock:
  350. /* we've acquired a server lock, but it needs to be renewed after 5
  351. * mins */
  352. spin_lock(&vnode->lock);
  353. afs_schedule_lock_extension(vnode);
  354. if (type == AFS_LOCK_READ)
  355. set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
  356. else
  357. set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
  358. sharing_existing_lock:
  359. /* the lock has been granted as far as we're concerned... */
  360. fl->fl_u.afs.state = AFS_LOCK_GRANTED;
  361. list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
  362. given_lock:
  363. /* ... but we do still need to get the VFS's blessing */
  364. ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING)));
  365. ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) |
  366. (1 << AFS_VNODE_WRITELOCKED))) != 0);
  367. ret = posix_lock_file(file, fl, NULL);
  368. if (ret < 0)
  369. goto vfs_rejected_lock;
  370. spin_unlock(&vnode->lock);
  371. /* again, make sure we've got a callback on this file and, again, make
  372. * sure that our view of the data version is up to date (we ignore
  373. * errors incurred here and deal with the consequences elsewhere) */
  374. afs_vnode_fetch_status(vnode, NULL, key);
  375. error:
  376. spin_unlock(&inode->i_lock);
  377. _leave(" = %d", ret);
  378. return ret;
  379. vfs_rejected_lock:
  380. /* the VFS rejected the lock we just obtained, so we have to discard
  381. * what we just got */
  382. _debug("vfs refused %d", ret);
  383. list_del_init(&fl->fl_u.afs.link);
  384. if (list_empty(&vnode->granted_locks))
  385. afs_defer_unlock(vnode, key);
  386. goto abort_attempt;
  387. }
  388. /*
  389. * unlock on a file on the server
  390. */
  391. static int afs_do_unlk(struct file *file, struct file_lock *fl)
  392. {
  393. struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
  394. struct key *key = file->private_data;
  395. int ret;
  396. _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
  397. /* only whole-file unlocks are supported */
  398. if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
  399. return -EINVAL;
  400. fl->fl_ops = &afs_lock_ops;
  401. INIT_LIST_HEAD(&fl->fl_u.afs.link);
  402. fl->fl_u.afs.state = AFS_LOCK_PENDING;
  403. spin_lock(&vnode->lock);
  404. ret = posix_lock_file(file, fl, NULL);
  405. if (ret < 0) {
  406. spin_unlock(&vnode->lock);
  407. _leave(" = %d [vfs]", ret);
  408. return ret;
  409. }
  410. /* discard the server lock only if all granted locks are gone */
  411. if (list_empty(&vnode->granted_locks))
  412. afs_defer_unlock(vnode, key);
  413. spin_unlock(&vnode->lock);
  414. _leave(" = 0");
  415. return 0;
  416. }
  417. /*
  418. * return information about a lock we currently hold, if indeed we hold one
  419. */
  420. static int afs_do_getlk(struct file *file, struct file_lock *fl)
  421. {
  422. struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
  423. struct key *key = file->private_data;
  424. int ret, lock_count;
  425. _enter("");
  426. fl->fl_type = F_UNLCK;
  427. inode_lock(&vnode->vfs_inode);
  428. /* check local lock records first */
  429. ret = 0;
  430. posix_test_lock(file, fl);
  431. if (fl->fl_type == F_UNLCK) {
  432. /* no local locks; consult the server */
  433. ret = afs_vnode_fetch_status(vnode, NULL, key);
  434. if (ret < 0)
  435. goto error;
  436. lock_count = vnode->status.lock_count;
  437. if (lock_count) {
  438. if (lock_count > 0)
  439. fl->fl_type = F_RDLCK;
  440. else
  441. fl->fl_type = F_WRLCK;
  442. fl->fl_start = 0;
  443. fl->fl_end = OFFSET_MAX;
  444. }
  445. }
  446. error:
  447. inode_unlock(&vnode->vfs_inode);
  448. _leave(" = %d [%hd]", ret, fl->fl_type);
  449. return ret;
  450. }
  451. /*
  452. * manage POSIX locks on a file
  453. */
  454. int afs_lock(struct file *file, int cmd, struct file_lock *fl)
  455. {
  456. struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  457. _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
  458. vnode->fid.vid, vnode->fid.vnode, cmd,
  459. fl->fl_type, fl->fl_flags,
  460. (long long) fl->fl_start, (long long) fl->fl_end);
  461. /* AFS doesn't support mandatory locks */
  462. if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
  463. return -ENOLCK;
  464. if (IS_GETLK(cmd))
  465. return afs_do_getlk(file, fl);
  466. if (fl->fl_type == F_UNLCK)
  467. return afs_do_unlk(file, fl);
  468. return afs_do_setlk(file, fl);
  469. }
  470. /*
  471. * manage FLOCK locks on a file
  472. */
  473. int afs_flock(struct file *file, int cmd, struct file_lock *fl)
  474. {
  475. struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  476. _enter("{%x:%u},%d,{t=%x,fl=%x}",
  477. vnode->fid.vid, vnode->fid.vnode, cmd,
  478. fl->fl_type, fl->fl_flags);
  479. /*
  480. * No BSD flocks over NFS allowed.
  481. * Note: we could try to fake a POSIX lock request here by
  482. * using ((u32) filp | 0x80000000) or some such as the pid.
  483. * Not sure whether that would be unique, though, or whether
  484. * that would break in other places.
  485. */
  486. if (!(fl->fl_flags & FL_FLOCK))
  487. return -ENOLCK;
  488. /* we're simulating flock() locks using posix locks on the server */
  489. if (fl->fl_type == F_UNLCK)
  490. return afs_do_unlk(file, fl);
  491. return afs_do_setlk(file, fl);
  492. }
  493. /*
  494. * the POSIX lock management core VFS code copies the lock record and adds the
  495. * copy into its own list, so we need to add that copy to the vnode's lock
  496. * queue in the same place as the original (which will be deleted shortly
  497. * after)
  498. */
  499. static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
  500. {
  501. _enter("");
  502. list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
  503. }
  504. /*
  505. * need to remove this lock from the vnode queue when it's removed from the
  506. * VFS's list
  507. */
  508. static void afs_fl_release_private(struct file_lock *fl)
  509. {
  510. _enter("");
  511. list_del_init(&fl->fl_u.afs.link);
  512. }