vfs_lockf.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. /* $OpenBSD: vfs_lockf.c,v 1.20 2015/03/14 03:38:51 jsg Exp $ */
  2. /* $NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $ */
  3. /*
  4. * Copyright (c) 1982, 1986, 1989, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. *
  7. * This code is derived from software contributed to Berkeley by
  8. * Scooter Morris at Genentech Inc.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions and the following disclaimer.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. * 3. Neither the name of the University nor the names of its contributors
  19. * may be used to endorse or promote products derived from this software
  20. * without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  23. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  26. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  27. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  28. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  29. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  30. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  31. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  32. * SUCH DAMAGE.
  33. *
  34. * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
  35. */
  36. #include <sys/param.h>
  37. #include <sys/systm.h>
  38. #include <sys/kernel.h>
  39. #include <sys/proc.h>
  40. #include <sys/vnode.h>
  41. #include <sys/pool.h>
  42. #include <sys/fcntl.h>
  43. #include <sys/lockf.h>
  44. #include <sys/unistd.h>
  45. struct pool lockfpool;
  46. /*
  47. * This variable controls the maximum number of processes that will
  48. * be checked in doing deadlock detection.
  49. */
  50. int maxlockdepth = MAXDEPTH;
  51. #define SELF 0x1
  52. #define OTHERS 0x2
  53. #ifdef LOCKF_DEBUG
  54. #define DEBUG_SETLOCK 0x01
  55. #define DEBUG_CLEARLOCK 0x02
  56. #define DEBUG_GETLOCK 0x04
  57. #define DEBUG_FINDOVR 0x08
  58. #define DEBUG_SPLIT 0x10
  59. #define DEBUG_WAKELOCK 0x20
  60. int lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
  61. #define DPRINTF(args, level) if (lockf_debug & (level)) printf args
  62. #else
  63. #define DPRINTF(args, level)
  64. #endif
  65. void
  66. lf_init(void)
  67. {
  68. pool_init(&lockfpool, sizeof(struct lockf), 0, 0, PR_WAITOK,
  69. "lockfpl", NULL);
  70. }
  71. struct lockf *lf_alloc(uid_t, int);
  72. void lf_free(struct lockf *);
  73. /*
  74. * We enforce a limit on locks by uid, so that a single user cannot
  75. * run the kernel out of memory. For now, the limit is pretty coarse.
  76. * There is no limit on root.
  77. *
  78. * Splitting a lock will always succeed, regardless of current allocations.
  79. * If you're slightly above the limit, we still have to permit an allocation
  80. * so that the unlock can succeed. If the unlocking causes too many splits,
  81. * however, you're totally cutoff.
  82. */
  83. int maxlocksperuid = 1024;
  84. /*
  85. * 3 options for allowfail.
  86. * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
  87. */
  88. struct lockf *
  89. lf_alloc(uid_t uid, int allowfail)
  90. {
  91. struct uidinfo *uip;
  92. struct lockf *lock;
  93. uip = uid_find(uid);
  94. if (uid && allowfail && uip->ui_lockcnt >
  95. (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2)))
  96. return (NULL);
  97. uip->ui_lockcnt++;
  98. lock = pool_get(&lockfpool, PR_WAITOK);
  99. lock->lf_uid = uid;
  100. return (lock);
  101. }
  102. void
  103. lf_free(struct lockf *lock)
  104. {
  105. struct uidinfo *uip;
  106. uip = uid_find(lock->lf_uid);
  107. uip->ui_lockcnt--;
  108. pool_put(&lockfpool, lock);
  109. }
  110. /*
  111. * Do an advisory lock operation.
  112. */
  113. int
  114. lf_advlock(struct lockf **head, off_t size, caddr_t id, int op,
  115. struct flock *fl, int flags)
  116. {
  117. struct proc *p = curproc;
  118. struct lockf *lock;
  119. off_t start, end;
  120. int error;
  121. /*
  122. * Convert the flock structure into a start and end.
  123. */
  124. switch (fl->l_whence) {
  125. case SEEK_SET:
  126. case SEEK_CUR:
  127. /*
  128. * Caller is responsible for adding any necessary offset
  129. * when SEEK_CUR is used.
  130. */
  131. start = fl->l_start;
  132. break;
  133. case SEEK_END:
  134. start = size + fl->l_start;
  135. break;
  136. default:
  137. return (EINVAL);
  138. }
  139. if (start < 0)
  140. return (EINVAL);
  141. if (fl->l_len == 0) {
  142. end = -1;
  143. } else {
  144. end = start + fl->l_len - 1;
  145. if (end < start)
  146. return (EINVAL);
  147. }
  148. /*
  149. * Avoid the common case of unlocking when inode has no locks.
  150. */
  151. if (*head == NULL) {
  152. if (op != F_SETLK) {
  153. fl->l_type = F_UNLCK;
  154. return (0);
  155. }
  156. }
  157. lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
  158. if (!lock)
  159. return (ENOLCK);
  160. lock->lf_start = start;
  161. lock->lf_end = end;
  162. lock->lf_id = id;
  163. lock->lf_head = head;
  164. lock->lf_type = fl->l_type;
  165. lock->lf_next = NULL;
  166. TAILQ_INIT(&lock->lf_blkhd);
  167. lock->lf_flags = flags;
  168. lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1;
  169. switch (op) {
  170. case F_SETLK:
  171. return (lf_setlock(lock));
  172. case F_UNLCK:
  173. error = lf_clearlock(lock);
  174. lf_free(lock);
  175. return (error);
  176. case F_GETLK:
  177. error = lf_getlock(lock, fl);
  178. lf_free(lock);
  179. return (error);
  180. default:
  181. lf_free(lock);
  182. return (EINVAL);
  183. }
  184. /* NOTREACHED */
  185. }
  186. /*
  187. * Set a byte-range lock.
  188. */
  189. int
  190. lf_setlock(struct lockf *lock)
  191. {
  192. struct lockf *block;
  193. struct lockf **head = lock->lf_head;
  194. struct lockf **prev, *overlap, *ltmp;
  195. static char lockstr[] = "lockf";
  196. int ovcase, priority, needtolink, error;
  197. #ifdef LOCKF_DEBUG
  198. if (lockf_debug & DEBUG_SETLOCK)
  199. lf_print("lf_setlock", lock);
  200. #endif /* LOCKF_DEBUG */
  201. priority = PLOCK;
  202. if (lock->lf_type == F_WRLCK)
  203. priority += 4;
  204. priority |= PCATCH;
  205. /*
  206. * Scan lock list for this file looking for locks that would block us.
  207. */
  208. while ((block = lf_getblock(lock)) != NULL) {
  209. if ((lock->lf_flags & F_WAIT) == 0) {
  210. lf_free(lock);
  211. return (EAGAIN);
  212. }
  213. /*
  214. * We are blocked. Since flock style locks cover
  215. * the whole file, there is no chance for deadlock.
  216. * For byte-range locks we must check for deadlock.
  217. *
  218. * Deadlock detection is done by looking through the
  219. * wait channels to see if there are any cycles that
  220. * involve us. MAXDEPTH is set just to make sure we
  221. * do not go off into neverland.
  222. */
  223. if ((lock->lf_flags & F_POSIX) &&
  224. (block->lf_flags & F_POSIX)) {
  225. struct proc *wproc;
  226. struct lockf *waitblock;
  227. int i = 0;
  228. /* The block is waiting on something */
  229. wproc = (struct proc *)block->lf_id;
  230. while (wproc->p_wchan &&
  231. (wproc->p_wmesg == lockstr) &&
  232. (i++ < maxlockdepth)) {
  233. waitblock = (struct lockf *)wproc->p_wchan;
  234. /* Get the owner of the blocking lock */
  235. waitblock = waitblock->lf_next;
  236. if ((waitblock->lf_flags & F_POSIX) == 0)
  237. break;
  238. wproc = (struct proc *)waitblock->lf_id;
  239. if (wproc == (struct proc *)lock->lf_id) {
  240. lf_free(lock);
  241. return (EDEADLK);
  242. }
  243. }
  244. }
  245. /*
  246. * For flock type locks, we must first remove
  247. * any shared locks that we hold before we sleep
  248. * waiting for an exclusive lock.
  249. */
  250. if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) {
  251. lock->lf_type = F_UNLCK;
  252. (void)lf_clearlock(lock);
  253. lock->lf_type = F_WRLCK;
  254. }
  255. /*
  256. * Add our lock to the blocked list and sleep until we're free.
  257. * Remember who blocked us (for deadlock detection).
  258. */
  259. lock->lf_next = block;
  260. #ifdef LOCKF_DEBUG
  261. if (lockf_debug & DEBUG_SETLOCK) {
  262. lf_print("lf_setlock", lock);
  263. lf_print("lf_setlock: blocking on", block);
  264. }
  265. #endif /* LOCKF_DEBUG */
  266. TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
  267. error = tsleep(lock, priority, lockstr, 0);
  268. if (lock->lf_next != NULL) {
  269. TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
  270. lock->lf_next = NULL;
  271. }
  272. if (error) {
  273. lf_free(lock);
  274. return (error);
  275. }
  276. }
  277. /*
  278. * No blocks!! Add the lock. Note that we will
  279. * downgrade or upgrade any overlapping locks this
  280. * process already owns.
  281. *
  282. * Skip over locks owned by other processes.
  283. * Handle any locks that overlap and are owned by ourselves.
  284. */
  285. prev = head;
  286. block = *head;
  287. needtolink = 1;
  288. for (;;) {
  289. ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
  290. if (ovcase)
  291. block = overlap->lf_next;
  292. /*
  293. * Six cases:
  294. * 0) no overlap
  295. * 1) overlap == lock
  296. * 2) overlap contains lock
  297. * 3) lock contains overlap
  298. * 4) overlap starts before lock
  299. * 5) overlap ends after lock
  300. */
  301. switch (ovcase) {
  302. case 0: /* no overlap */
  303. if (needtolink) {
  304. *prev = lock;
  305. lock->lf_next = overlap;
  306. }
  307. break;
  308. case 1: /* overlap == lock */
  309. /*
  310. * If downgrading lock, others may be
  311. * able to acquire it.
  312. */
  313. if (lock->lf_type == F_RDLCK &&
  314. overlap->lf_type == F_WRLCK)
  315. lf_wakelock(overlap);
  316. overlap->lf_type = lock->lf_type;
  317. lf_free(lock);
  318. lock = overlap; /* for debug output below */
  319. break;
  320. case 2: /* overlap contains lock */
  321. /*
  322. * Check for common starting point and different types.
  323. */
  324. if (overlap->lf_type == lock->lf_type) {
  325. lf_free(lock);
  326. lock = overlap; /* for debug output below */
  327. break;
  328. }
  329. if (overlap->lf_start == lock->lf_start) {
  330. *prev = lock;
  331. lock->lf_next = overlap;
  332. overlap->lf_start = lock->lf_end + 1;
  333. } else
  334. lf_split(overlap, lock);
  335. lf_wakelock(overlap);
  336. break;
  337. case 3: /* lock contains overlap */
  338. /*
  339. * If downgrading lock, others may be able to
  340. * acquire it, otherwise take the list.
  341. */
  342. if (lock->lf_type == F_RDLCK &&
  343. overlap->lf_type == F_WRLCK) {
  344. lf_wakelock(overlap);
  345. } else {
  346. while ((ltmp =
  347. TAILQ_FIRST(&overlap->lf_blkhd))) {
  348. TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
  349. lf_block);
  350. ltmp->lf_next = lock;
  351. TAILQ_INSERT_TAIL(&lock->lf_blkhd,
  352. ltmp, lf_block);
  353. }
  354. }
  355. /*
  356. * Add the new lock if necessary and delete the overlap.
  357. */
  358. if (needtolink) {
  359. *prev = lock;
  360. lock->lf_next = overlap->lf_next;
  361. prev = &lock->lf_next;
  362. needtolink = 0;
  363. } else
  364. *prev = overlap->lf_next;
  365. lf_free(overlap);
  366. continue;
  367. case 4: /* overlap starts before lock */
  368. /*
  369. * Add lock after overlap on the list.
  370. */
  371. lock->lf_next = overlap->lf_next;
  372. overlap->lf_next = lock;
  373. overlap->lf_end = lock->lf_start - 1;
  374. prev = &lock->lf_next;
  375. lf_wakelock(overlap);
  376. needtolink = 0;
  377. continue;
  378. case 5: /* overlap ends after lock */
  379. /*
  380. * Add the new lock before overlap.
  381. */
  382. if (needtolink) {
  383. *prev = lock;
  384. lock->lf_next = overlap;
  385. }
  386. overlap->lf_start = lock->lf_end + 1;
  387. lf_wakelock(overlap);
  388. break;
  389. }
  390. break;
  391. }
  392. #ifdef LOCKF_DEBUG
  393. if (lockf_debug & DEBUG_SETLOCK) {
  394. lf_print("lf_setlock: got the lock", lock);
  395. }
  396. #endif /* LOCKF_DEBUG */
  397. return (0);
  398. }
  399. /*
  400. * Remove a byte-range lock on an inode.
  401. *
  402. * Generally, find the lock (or an overlap to that lock)
  403. * and remove it (or shrink it), then wakeup anyone we can.
  404. */
  405. int
  406. lf_clearlock(struct lockf *lock)
  407. {
  408. struct lockf **head = lock->lf_head;
  409. struct lockf *lf = *head;
  410. struct lockf *overlap, **prev;
  411. int ovcase;
  412. if (lf == NULL)
  413. return (0);
  414. #ifdef LOCKF_DEBUG
  415. if (lockf_debug & DEBUG_CLEARLOCK)
  416. lf_print("lf_clearlock", lock);
  417. #endif /* LOCKF_DEBUG */
  418. prev = head;
  419. while ((ovcase = lf_findoverlap(lf, lock, SELF, &prev, &overlap))) {
  420. lf_wakelock(overlap);
  421. switch (ovcase) {
  422. case 1: /* overlap == lock */
  423. *prev = overlap->lf_next;
  424. lf_free(overlap);
  425. break;
  426. case 2: /* overlap contains lock: split it */
  427. if (overlap->lf_start == lock->lf_start) {
  428. overlap->lf_start = lock->lf_end + 1;
  429. break;
  430. }
  431. lf_split(overlap, lock);
  432. overlap->lf_next = lock->lf_next;
  433. break;
  434. case 3: /* lock contains overlap */
  435. *prev = overlap->lf_next;
  436. lf = overlap->lf_next;
  437. lf_free(overlap);
  438. continue;
  439. case 4: /* overlap starts before lock */
  440. overlap->lf_end = lock->lf_start - 1;
  441. prev = &overlap->lf_next;
  442. lf = overlap->lf_next;
  443. continue;
  444. case 5: /* overlap ends after lock */
  445. overlap->lf_start = lock->lf_end + 1;
  446. break;
  447. }
  448. break;
  449. }
  450. return (0);
  451. }
  452. /*
  453. * Check whether there is a blocking lock,
  454. * and if so return its process identifier.
  455. */
  456. int
  457. lf_getlock(struct lockf *lock, struct flock *fl)
  458. {
  459. struct lockf *block;
  460. #ifdef LOCKF_DEBUG
  461. if (lockf_debug & DEBUG_CLEARLOCK)
  462. lf_print("lf_getlock", lock);
  463. #endif /* LOCKF_DEBUG */
  464. if ((block = lf_getblock(lock)) != NULL) {
  465. fl->l_type = block->lf_type;
  466. fl->l_whence = SEEK_SET;
  467. fl->l_start = block->lf_start;
  468. if (block->lf_end == -1)
  469. fl->l_len = 0;
  470. else
  471. fl->l_len = block->lf_end - block->lf_start + 1;
  472. fl->l_pid = block->lf_pid;
  473. } else {
  474. fl->l_type = F_UNLCK;
  475. }
  476. return (0);
  477. }
  478. /*
  479. * Walk the list of locks for an inode and
  480. * return the first blocking lock.
  481. */
  482. struct lockf *
  483. lf_getblock(struct lockf *lock)
  484. {
  485. struct lockf **prev, *overlap, *lf;
  486. prev = lock->lf_head;
  487. lf = *prev;
  488. while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
  489. /*
  490. * We've found an overlap, see if it blocks us
  491. */
  492. if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
  493. return (overlap);
  494. /*
  495. * Nope, point to the next one on the list and
  496. * see if it blocks us
  497. */
  498. lf = overlap->lf_next;
  499. }
  500. return (NULL);
  501. }
  502. /*
  503. * Walk the list of locks for an inode to
  504. * find an overlapping lock (if any).
  505. *
  506. * NOTE: this returns only the FIRST overlapping lock. There
  507. * may be more than one.
  508. */
  509. int
  510. lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
  511. struct lockf ***prev, struct lockf **overlap)
  512. {
  513. off_t start, end;
  514. #ifdef LOCKF_DEBUG
  515. if (lf && lockf_debug & DEBUG_FINDOVR)
  516. lf_print("lf_findoverlap: looking for overlap in", lock);
  517. #endif /* LOCKF_DEBUG */
  518. *overlap = lf;
  519. start = lock->lf_start;
  520. end = lock->lf_end;
  521. while (lf != NULL) {
  522. if (((type & SELF) && lf->lf_id != lock->lf_id) ||
  523. ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
  524. *prev = &lf->lf_next;
  525. *overlap = lf = lf->lf_next;
  526. continue;
  527. }
  528. #ifdef LOCKF_DEBUG
  529. if (lockf_debug & DEBUG_FINDOVR)
  530. lf_print("\tchecking", lf);
  531. #endif /* LOCKF_DEBUG */
  532. /*
  533. * OK, check for overlap
  534. *
  535. * Six cases:
  536. * 0) no overlap
  537. * 1) overlap == lock
  538. * 2) overlap contains lock
  539. * 3) lock contains overlap
  540. * 4) overlap starts before lock
  541. * 5) overlap ends after lock
  542. */
  543. /* Case 0 */
  544. if ((lf->lf_end != -1 && start > lf->lf_end) ||
  545. (end != -1 && lf->lf_start > end)) {
  546. DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
  547. if ((type & SELF) && end != -1 && lf->lf_start > end)
  548. return (0);
  549. *prev = &lf->lf_next;
  550. *overlap = lf = lf->lf_next;
  551. continue;
  552. }
  553. /* Case 1 */
  554. if ((lf->lf_start == start) && (lf->lf_end == end)) {
  555. DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
  556. return (1);
  557. }
  558. /* Case 2 */
  559. if ((lf->lf_start <= start) &&
  560. (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
  561. DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
  562. return (2);
  563. }
  564. /* Case 3 */
  565. if (start <= lf->lf_start &&
  566. (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
  567. DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
  568. return (3);
  569. }
  570. /* Case 4 */
  571. if ((lf->lf_start < start) &&
  572. ((lf->lf_end >= start) || (lf->lf_end == -1))) {
  573. DPRINTF(("overlap starts before lock\n"),
  574. DEBUG_FINDOVR);
  575. return (4);
  576. }
  577. /* Case 5 */
  578. if ((lf->lf_start > start) && (end != -1) &&
  579. ((lf->lf_end > end) || (lf->lf_end == -1))) {
  580. DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
  581. return (5);
  582. }
  583. panic("lf_findoverlap: default");
  584. }
  585. return (0);
  586. }
  587. /*
  588. * Split a lock and a contained region into
  589. * two or three locks as necessary.
  590. */
  591. void
  592. lf_split(struct lockf *lock1, struct lockf *lock2)
  593. {
  594. struct lockf *splitlock;
  595. #ifdef LOCKF_DEBUG
  596. if (lockf_debug & DEBUG_SPLIT) {
  597. lf_print("lf_split", lock1);
  598. lf_print("splitting from", lock2);
  599. }
  600. #endif /* LOCKF_DEBUG */
  601. /*
  602. * Check to see if spliting into only two pieces.
  603. */
  604. if (lock1->lf_start == lock2->lf_start) {
  605. lock1->lf_start = lock2->lf_end + 1;
  606. lock2->lf_next = lock1;
  607. return;
  608. }
  609. if (lock1->lf_end == lock2->lf_end) {
  610. lock1->lf_end = lock2->lf_start - 1;
  611. lock2->lf_next = lock1->lf_next;
  612. lock1->lf_next = lock2;
  613. return;
  614. }
  615. /*
  616. * Make a new lock consisting of the last part of
  617. * the encompassing lock
  618. */
  619. splitlock = lf_alloc(lock1->lf_uid, 0);
  620. memcpy(splitlock, lock1, sizeof(*splitlock));
  621. splitlock->lf_start = lock2->lf_end + 1;
  622. splitlock->lf_block.tqe_next = NULL;
  623. TAILQ_INIT(&splitlock->lf_blkhd);
  624. lock1->lf_end = lock2->lf_start - 1;
  625. lock2->lf_next = splitlock;
  626. lock1->lf_next = lock2;
  627. }
  628. /*
  629. * Wakeup a blocklist
  630. */
  631. void
  632. lf_wakelock(struct lockf *lock)
  633. {
  634. struct lockf *wakelock;
  635. while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
  636. TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
  637. wakelock->lf_next = NULL;
  638. wakeup_one(wakelock);
  639. }
  640. }
  641. #ifdef LOCKF_DEBUG
  642. /*
  643. * Print out a lock.
  644. */
  645. void
  646. lf_print(char *tag, struct lockf *lock)
  647. {
  648. struct lockf *block;
  649. printf("%s: lock %p for ", tag, lock);
  650. if (lock->lf_flags & F_POSIX)
  651. printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
  652. else
  653. printf("id %p", lock->lf_id);
  654. printf(" %s, start %llx, end %llx",
  655. lock->lf_type == F_RDLCK ? "shared" :
  656. lock->lf_type == F_WRLCK ? "exclusive" :
  657. lock->lf_type == F_UNLCK ? "unlock" :
  658. "unknown", lock->lf_start, lock->lf_end);
  659. block = TAILQ_FIRST(&lock->lf_blkhd);
  660. if (block)
  661. printf(" block");
  662. TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
  663. printf(" %p,", block);
  664. printf("\n");
  665. }
  666. void
  667. lf_printlist(char *tag, struct lockf *lock)
  668. {
  669. struct lockf *lf;
  670. printf("%s: Lock list:\n", tag);
  671. for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
  672. printf("\tlock %p for ", lf);
  673. if (lf->lf_flags & F_POSIX)
  674. printf("proc %d", ((struct proc*)(lf->lf_id))->p_pid);
  675. else
  676. printf("id %p", lf->lf_id);
  677. printf(" %s, start %llx, end %llx",
  678. lf->lf_type == F_RDLCK ? "shared" :
  679. lf->lf_type == F_WRLCK ? "exclusive" :
  680. lf->lf_type == F_UNLCK ? "unlock" :
  681. "unknown", lf->lf_start, lf->lf_end);
  682. printf("\n");
  683. }
  684. }
  685. #endif /* LOCKF_DEBUG */