locktorture.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. /*
  2. * Module-based torture test facility for locking
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright (C) IBM Corporation, 2014
  19. *
  20. * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21. * Davidlohr Bueso <dave@stgolabs.net>
  22. * Based on kernel/rcu/torture.c.
  23. */
  24. #define pr_fmt(fmt) fmt
  25. #include <linux/kernel.h>
  26. #include <linux/module.h>
  27. #include <linux/kthread.h>
  28. #include <linux/sched/rt.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/rwlock.h>
  31. #include <linux/mutex.h>
  32. #include <linux/rwsem.h>
  33. #include <linux/smp.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/sched.h>
  36. #include <uapi/linux/sched/types.h>
  37. #include <linux/rtmutex.h>
  38. #include <linux/atomic.h>
  39. #include <linux/moduleparam.h>
  40. #include <linux/delay.h>
  41. #include <linux/slab.h>
  42. #include <linux/percpu-rwsem.h>
  43. #include <linux/torture.h>
  44. MODULE_LICENSE("GPL");
  45. MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
  46. torture_param(int, nwriters_stress, -1,
  47. "Number of write-locking stress-test threads");
  48. torture_param(int, nreaders_stress, -1,
  49. "Number of read-locking stress-test threads");
  50. torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  51. torture_param(int, onoff_interval, 0,
  52. "Time between CPU hotplugs (s), 0=disable");
  53. torture_param(int, shuffle_interval, 3,
  54. "Number of jiffies between shuffles, 0=disable");
  55. torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
  56. torture_param(int, stat_interval, 60,
  57. "Number of seconds between stats printk()s");
  58. torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
  59. torture_param(int, verbose, 1,
  60. "Enable verbose debugging printk()s");
  61. static char *torture_type = "spin_lock";
  62. module_param(torture_type, charp, 0444);
  63. MODULE_PARM_DESC(torture_type,
  64. "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
  65. static struct task_struct *stats_task;
  66. static struct task_struct **writer_tasks;
  67. static struct task_struct **reader_tasks;
  68. static bool lock_is_write_held;
  69. static bool lock_is_read_held;
  70. struct lock_stress_stats {
  71. long n_lock_fail;
  72. long n_lock_acquired;
  73. };
  74. /* Forward reference. */
  75. static void lock_torture_cleanup(void);
  76. /*
  77. * Operations vector for selecting different types of tests.
  78. */
  79. struct lock_torture_ops {
  80. void (*init)(void);
  81. int (*writelock)(void);
  82. void (*write_delay)(struct torture_random_state *trsp);
  83. void (*task_boost)(struct torture_random_state *trsp);
  84. void (*writeunlock)(void);
  85. int (*readlock)(void);
  86. void (*read_delay)(struct torture_random_state *trsp);
  87. void (*readunlock)(void);
  88. unsigned long flags; /* for irq spinlocks */
  89. const char *name;
  90. };
  91. struct lock_torture_cxt {
  92. int nrealwriters_stress;
  93. int nrealreaders_stress;
  94. bool debug_lock;
  95. atomic_t n_lock_torture_errors;
  96. struct lock_torture_ops *cur_ops;
  97. struct lock_stress_stats *lwsa; /* writer statistics */
  98. struct lock_stress_stats *lrsa; /* reader statistics */
  99. };
  100. static struct lock_torture_cxt cxt = { 0, 0, false,
  101. ATOMIC_INIT(0),
  102. NULL, NULL};
  103. /*
  104. * Definitions for lock torture testing.
  105. */
  106. static int torture_lock_busted_write_lock(void)
  107. {
  108. return 0; /* BUGGY, do not use in real life!!! */
  109. }
  110. static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
  111. {
  112. const unsigned long longdelay_ms = 100;
  113. /* We want a long delay occasionally to force massive contention. */
  114. if (!(torture_random(trsp) %
  115. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  116. mdelay(longdelay_ms);
  117. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  118. torture_preempt_schedule(); /* Allow test to be preempted. */
  119. }
  120. static void torture_lock_busted_write_unlock(void)
  121. {
  122. /* BUGGY, do not use in real life!!! */
  123. }
  124. static void torture_boost_dummy(struct torture_random_state *trsp)
  125. {
  126. /* Only rtmutexes care about priority */
  127. }
  128. static struct lock_torture_ops lock_busted_ops = {
  129. .writelock = torture_lock_busted_write_lock,
  130. .write_delay = torture_lock_busted_write_delay,
  131. .task_boost = torture_boost_dummy,
  132. .writeunlock = torture_lock_busted_write_unlock,
  133. .readlock = NULL,
  134. .read_delay = NULL,
  135. .readunlock = NULL,
  136. .name = "lock_busted"
  137. };
  138. static DEFINE_SPINLOCK(torture_spinlock);
  139. static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
  140. {
  141. spin_lock(&torture_spinlock);
  142. return 0;
  143. }
  144. static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
  145. {
  146. const unsigned long shortdelay_us = 2;
  147. const unsigned long longdelay_ms = 100;
  148. /* We want a short delay mostly to emulate likely code, and
  149. * we want a long delay occasionally to force massive contention.
  150. */
  151. if (!(torture_random(trsp) %
  152. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  153. mdelay(longdelay_ms);
  154. if (!(torture_random(trsp) %
  155. (cxt.nrealwriters_stress * 2 * shortdelay_us)))
  156. udelay(shortdelay_us);
  157. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  158. torture_preempt_schedule(); /* Allow test to be preempted. */
  159. }
  160. static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
  161. {
  162. spin_unlock(&torture_spinlock);
  163. }
  164. static struct lock_torture_ops spin_lock_ops = {
  165. .writelock = torture_spin_lock_write_lock,
  166. .write_delay = torture_spin_lock_write_delay,
  167. .task_boost = torture_boost_dummy,
  168. .writeunlock = torture_spin_lock_write_unlock,
  169. .readlock = NULL,
  170. .read_delay = NULL,
  171. .readunlock = NULL,
  172. .name = "spin_lock"
  173. };
  174. static int torture_spin_lock_write_lock_irq(void)
  175. __acquires(torture_spinlock)
  176. {
  177. unsigned long flags;
  178. spin_lock_irqsave(&torture_spinlock, flags);
  179. cxt.cur_ops->flags = flags;
  180. return 0;
  181. }
  182. static void torture_lock_spin_write_unlock_irq(void)
  183. __releases(torture_spinlock)
  184. {
  185. spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
  186. }
  187. static struct lock_torture_ops spin_lock_irq_ops = {
  188. .writelock = torture_spin_lock_write_lock_irq,
  189. .write_delay = torture_spin_lock_write_delay,
  190. .task_boost = torture_boost_dummy,
  191. .writeunlock = torture_lock_spin_write_unlock_irq,
  192. .readlock = NULL,
  193. .read_delay = NULL,
  194. .readunlock = NULL,
  195. .name = "spin_lock_irq"
  196. };
  197. static DEFINE_RWLOCK(torture_rwlock);
  198. static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
  199. {
  200. write_lock(&torture_rwlock);
  201. return 0;
  202. }
  203. static void torture_rwlock_write_delay(struct torture_random_state *trsp)
  204. {
  205. const unsigned long shortdelay_us = 2;
  206. const unsigned long longdelay_ms = 100;
  207. /* We want a short delay mostly to emulate likely code, and
  208. * we want a long delay occasionally to force massive contention.
  209. */
  210. if (!(torture_random(trsp) %
  211. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  212. mdelay(longdelay_ms);
  213. else
  214. udelay(shortdelay_us);
  215. }
  216. static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
  217. {
  218. write_unlock(&torture_rwlock);
  219. }
  220. static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
  221. {
  222. read_lock(&torture_rwlock);
  223. return 0;
  224. }
  225. static void torture_rwlock_read_delay(struct torture_random_state *trsp)
  226. {
  227. const unsigned long shortdelay_us = 10;
  228. const unsigned long longdelay_ms = 100;
  229. /* We want a short delay mostly to emulate likely code, and
  230. * we want a long delay occasionally to force massive contention.
  231. */
  232. if (!(torture_random(trsp) %
  233. (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
  234. mdelay(longdelay_ms);
  235. else
  236. udelay(shortdelay_us);
  237. }
  238. static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
  239. {
  240. read_unlock(&torture_rwlock);
  241. }
  242. static struct lock_torture_ops rw_lock_ops = {
  243. .writelock = torture_rwlock_write_lock,
  244. .write_delay = torture_rwlock_write_delay,
  245. .task_boost = torture_boost_dummy,
  246. .writeunlock = torture_rwlock_write_unlock,
  247. .readlock = torture_rwlock_read_lock,
  248. .read_delay = torture_rwlock_read_delay,
  249. .readunlock = torture_rwlock_read_unlock,
  250. .name = "rw_lock"
  251. };
  252. static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
  253. {
  254. unsigned long flags;
  255. write_lock_irqsave(&torture_rwlock, flags);
  256. cxt.cur_ops->flags = flags;
  257. return 0;
  258. }
  259. static void torture_rwlock_write_unlock_irq(void)
  260. __releases(torture_rwlock)
  261. {
  262. write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  263. }
  264. static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
  265. {
  266. unsigned long flags;
  267. read_lock_irqsave(&torture_rwlock, flags);
  268. cxt.cur_ops->flags = flags;
  269. return 0;
  270. }
  271. static void torture_rwlock_read_unlock_irq(void)
  272. __releases(torture_rwlock)
  273. {
  274. read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  275. }
  276. static struct lock_torture_ops rw_lock_irq_ops = {
  277. .writelock = torture_rwlock_write_lock_irq,
  278. .write_delay = torture_rwlock_write_delay,
  279. .task_boost = torture_boost_dummy,
  280. .writeunlock = torture_rwlock_write_unlock_irq,
  281. .readlock = torture_rwlock_read_lock_irq,
  282. .read_delay = torture_rwlock_read_delay,
  283. .readunlock = torture_rwlock_read_unlock_irq,
  284. .name = "rw_lock_irq"
  285. };
  286. static DEFINE_MUTEX(torture_mutex);
  287. static int torture_mutex_lock(void) __acquires(torture_mutex)
  288. {
  289. mutex_lock(&torture_mutex);
  290. return 0;
  291. }
  292. static void torture_mutex_delay(struct torture_random_state *trsp)
  293. {
  294. const unsigned long longdelay_ms = 100;
  295. /* We want a long delay occasionally to force massive contention. */
  296. if (!(torture_random(trsp) %
  297. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  298. mdelay(longdelay_ms * 5);
  299. else
  300. mdelay(longdelay_ms / 5);
  301. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  302. torture_preempt_schedule(); /* Allow test to be preempted. */
  303. }
  304. static void torture_mutex_unlock(void) __releases(torture_mutex)
  305. {
  306. mutex_unlock(&torture_mutex);
  307. }
  308. static struct lock_torture_ops mutex_lock_ops = {
  309. .writelock = torture_mutex_lock,
  310. .write_delay = torture_mutex_delay,
  311. .task_boost = torture_boost_dummy,
  312. .writeunlock = torture_mutex_unlock,
  313. .readlock = NULL,
  314. .read_delay = NULL,
  315. .readunlock = NULL,
  316. .name = "mutex_lock"
  317. };
  318. #include <linux/ww_mutex.h>
  319. static DEFINE_WD_CLASS(torture_ww_class);
  320. static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
  321. static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
  322. static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
  323. static int torture_ww_mutex_lock(void)
  324. __acquires(torture_ww_mutex_0)
  325. __acquires(torture_ww_mutex_1)
  326. __acquires(torture_ww_mutex_2)
  327. {
  328. LIST_HEAD(list);
  329. struct reorder_lock {
  330. struct list_head link;
  331. struct ww_mutex *lock;
  332. } locks[3], *ll, *ln;
  333. struct ww_acquire_ctx ctx;
  334. locks[0].lock = &torture_ww_mutex_0;
  335. list_add(&locks[0].link, &list);
  336. locks[1].lock = &torture_ww_mutex_1;
  337. list_add(&locks[1].link, &list);
  338. locks[2].lock = &torture_ww_mutex_2;
  339. list_add(&locks[2].link, &list);
  340. ww_acquire_init(&ctx, &torture_ww_class);
  341. list_for_each_entry(ll, &list, link) {
  342. int err;
  343. err = ww_mutex_lock(ll->lock, &ctx);
  344. if (!err)
  345. continue;
  346. ln = ll;
  347. list_for_each_entry_continue_reverse(ln, &list, link)
  348. ww_mutex_unlock(ln->lock);
  349. if (err != -EDEADLK)
  350. return err;
  351. ww_mutex_lock_slow(ll->lock, &ctx);
  352. list_move(&ll->link, &list);
  353. }
  354. ww_acquire_fini(&ctx);
  355. return 0;
  356. }
  357. static void torture_ww_mutex_unlock(void)
  358. __releases(torture_ww_mutex_0)
  359. __releases(torture_ww_mutex_1)
  360. __releases(torture_ww_mutex_2)
  361. {
  362. ww_mutex_unlock(&torture_ww_mutex_0);
  363. ww_mutex_unlock(&torture_ww_mutex_1);
  364. ww_mutex_unlock(&torture_ww_mutex_2);
  365. }
  366. static struct lock_torture_ops ww_mutex_lock_ops = {
  367. .writelock = torture_ww_mutex_lock,
  368. .write_delay = torture_mutex_delay,
  369. .task_boost = torture_boost_dummy,
  370. .writeunlock = torture_ww_mutex_unlock,
  371. .readlock = NULL,
  372. .read_delay = NULL,
  373. .readunlock = NULL,
  374. .name = "ww_mutex_lock"
  375. };
  376. #ifdef CONFIG_RT_MUTEXES
  377. static DEFINE_RT_MUTEX(torture_rtmutex);
  378. static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
  379. {
  380. rt_mutex_lock(&torture_rtmutex);
  381. return 0;
  382. }
  383. static void torture_rtmutex_boost(struct torture_random_state *trsp)
  384. {
  385. int policy;
  386. struct sched_param param;
  387. const unsigned int factor = 50000; /* yes, quite arbitrary */
  388. if (!rt_task(current)) {
  389. /*
  390. * Boost priority once every ~50k operations. When the
  391. * task tries to take the lock, the rtmutex it will account
  392. * for the new priority, and do any corresponding pi-dance.
  393. */
  394. if (trsp && !(torture_random(trsp) %
  395. (cxt.nrealwriters_stress * factor))) {
  396. policy = SCHED_FIFO;
  397. param.sched_priority = MAX_RT_PRIO - 1;
  398. } else /* common case, do nothing */
  399. return;
  400. } else {
  401. /*
  402. * The task will remain boosted for another ~500k operations,
  403. * then restored back to its original prio, and so forth.
  404. *
  405. * When @trsp is nil, we want to force-reset the task for
  406. * stopping the kthread.
  407. */
  408. if (!trsp || !(torture_random(trsp) %
  409. (cxt.nrealwriters_stress * factor * 2))) {
  410. policy = SCHED_NORMAL;
  411. param.sched_priority = 0;
  412. } else /* common case, do nothing */
  413. return;
  414. }
  415. sched_setscheduler_nocheck(current, policy, &param);
  416. }
  417. static void torture_rtmutex_delay(struct torture_random_state *trsp)
  418. {
  419. const unsigned long shortdelay_us = 2;
  420. const unsigned long longdelay_ms = 100;
  421. /*
  422. * We want a short delay mostly to emulate likely code, and
  423. * we want a long delay occasionally to force massive contention.
  424. */
  425. if (!(torture_random(trsp) %
  426. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  427. mdelay(longdelay_ms);
  428. if (!(torture_random(trsp) %
  429. (cxt.nrealwriters_stress * 2 * shortdelay_us)))
  430. udelay(shortdelay_us);
  431. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  432. torture_preempt_schedule(); /* Allow test to be preempted. */
  433. }
  434. static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
  435. {
  436. rt_mutex_unlock(&torture_rtmutex);
  437. }
  438. static struct lock_torture_ops rtmutex_lock_ops = {
  439. .writelock = torture_rtmutex_lock,
  440. .write_delay = torture_rtmutex_delay,
  441. .task_boost = torture_rtmutex_boost,
  442. .writeunlock = torture_rtmutex_unlock,
  443. .readlock = NULL,
  444. .read_delay = NULL,
  445. .readunlock = NULL,
  446. .name = "rtmutex_lock"
  447. };
  448. #endif
  449. static DECLARE_RWSEM(torture_rwsem);
  450. static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
  451. {
  452. down_write(&torture_rwsem);
  453. return 0;
  454. }
  455. static void torture_rwsem_write_delay(struct torture_random_state *trsp)
  456. {
  457. const unsigned long longdelay_ms = 100;
  458. /* We want a long delay occasionally to force massive contention. */
  459. if (!(torture_random(trsp) %
  460. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  461. mdelay(longdelay_ms * 10);
  462. else
  463. mdelay(longdelay_ms / 10);
  464. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  465. torture_preempt_schedule(); /* Allow test to be preempted. */
  466. }
  467. static void torture_rwsem_up_write(void) __releases(torture_rwsem)
  468. {
  469. up_write(&torture_rwsem);
  470. }
  471. static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
  472. {
  473. down_read(&torture_rwsem);
  474. return 0;
  475. }
  476. static void torture_rwsem_read_delay(struct torture_random_state *trsp)
  477. {
  478. const unsigned long longdelay_ms = 100;
  479. /* We want a long delay occasionally to force massive contention. */
  480. if (!(torture_random(trsp) %
  481. (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
  482. mdelay(longdelay_ms * 2);
  483. else
  484. mdelay(longdelay_ms / 2);
  485. if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
  486. torture_preempt_schedule(); /* Allow test to be preempted. */
  487. }
  488. static void torture_rwsem_up_read(void) __releases(torture_rwsem)
  489. {
  490. up_read(&torture_rwsem);
  491. }
  492. static struct lock_torture_ops rwsem_lock_ops = {
  493. .writelock = torture_rwsem_down_write,
  494. .write_delay = torture_rwsem_write_delay,
  495. .task_boost = torture_boost_dummy,
  496. .writeunlock = torture_rwsem_up_write,
  497. .readlock = torture_rwsem_down_read,
  498. .read_delay = torture_rwsem_read_delay,
  499. .readunlock = torture_rwsem_up_read,
  500. .name = "rwsem_lock"
  501. };
  502. #include <linux/percpu-rwsem.h>
  503. static struct percpu_rw_semaphore pcpu_rwsem;
  504. void torture_percpu_rwsem_init(void)
  505. {
  506. BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
  507. }
  508. static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
  509. {
  510. percpu_down_write(&pcpu_rwsem);
  511. return 0;
  512. }
  513. static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
  514. {
  515. percpu_up_write(&pcpu_rwsem);
  516. }
  517. static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
  518. {
  519. percpu_down_read(&pcpu_rwsem);
  520. return 0;
  521. }
  522. static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
  523. {
  524. percpu_up_read(&pcpu_rwsem);
  525. }
  526. static struct lock_torture_ops percpu_rwsem_lock_ops = {
  527. .init = torture_percpu_rwsem_init,
  528. .writelock = torture_percpu_rwsem_down_write,
  529. .write_delay = torture_rwsem_write_delay,
  530. .task_boost = torture_boost_dummy,
  531. .writeunlock = torture_percpu_rwsem_up_write,
  532. .readlock = torture_percpu_rwsem_down_read,
  533. .read_delay = torture_rwsem_read_delay,
  534. .readunlock = torture_percpu_rwsem_up_read,
  535. .name = "percpu_rwsem_lock"
  536. };
  537. /*
  538. * Lock torture writer kthread. Repeatedly acquires and releases
  539. * the lock, checking for duplicate acquisitions.
  540. */
  541. static int lock_torture_writer(void *arg)
  542. {
  543. struct lock_stress_stats *lwsp = arg;
  544. static DEFINE_TORTURE_RANDOM(rand);
  545. VERBOSE_TOROUT_STRING("lock_torture_writer task started");
  546. set_user_nice(current, MAX_NICE);
  547. do {
  548. if ((torture_random(&rand) & 0xfffff) == 0)
  549. schedule_timeout_uninterruptible(1);
  550. cxt.cur_ops->task_boost(&rand);
  551. cxt.cur_ops->writelock();
  552. if (WARN_ON_ONCE(lock_is_write_held))
  553. lwsp->n_lock_fail++;
  554. lock_is_write_held = 1;
  555. if (WARN_ON_ONCE(lock_is_read_held))
  556. lwsp->n_lock_fail++; /* rare, but... */
  557. lwsp->n_lock_acquired++;
  558. cxt.cur_ops->write_delay(&rand);
  559. lock_is_write_held = 0;
  560. cxt.cur_ops->writeunlock();
  561. stutter_wait("lock_torture_writer");
  562. } while (!torture_must_stop());
  563. cxt.cur_ops->task_boost(NULL); /* reset prio */
  564. torture_kthread_stopping("lock_torture_writer");
  565. return 0;
  566. }
  567. /*
  568. * Lock torture reader kthread. Repeatedly acquires and releases
  569. * the reader lock.
  570. */
  571. static int lock_torture_reader(void *arg)
  572. {
  573. struct lock_stress_stats *lrsp = arg;
  574. static DEFINE_TORTURE_RANDOM(rand);
  575. VERBOSE_TOROUT_STRING("lock_torture_reader task started");
  576. set_user_nice(current, MAX_NICE);
  577. do {
  578. if ((torture_random(&rand) & 0xfffff) == 0)
  579. schedule_timeout_uninterruptible(1);
  580. cxt.cur_ops->readlock();
  581. lock_is_read_held = 1;
  582. if (WARN_ON_ONCE(lock_is_write_held))
  583. lrsp->n_lock_fail++; /* rare, but... */
  584. lrsp->n_lock_acquired++;
  585. cxt.cur_ops->read_delay(&rand);
  586. lock_is_read_held = 0;
  587. cxt.cur_ops->readunlock();
  588. stutter_wait("lock_torture_reader");
  589. } while (!torture_must_stop());
  590. torture_kthread_stopping("lock_torture_reader");
  591. return 0;
  592. }
  593. /*
  594. * Create an lock-torture-statistics message in the specified buffer.
  595. */
  596. static void __torture_print_stats(char *page,
  597. struct lock_stress_stats *statp, bool write)
  598. {
  599. bool fail = 0;
  600. int i, n_stress;
  601. long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
  602. long long sum = 0;
  603. n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
  604. for (i = 0; i < n_stress; i++) {
  605. if (statp[i].n_lock_fail)
  606. fail = true;
  607. sum += statp[i].n_lock_acquired;
  608. if (max < statp[i].n_lock_fail)
  609. max = statp[i].n_lock_fail;
  610. if (min > statp[i].n_lock_fail)
  611. min = statp[i].n_lock_fail;
  612. }
  613. page += sprintf(page,
  614. "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
  615. write ? "Writes" : "Reads ",
  616. sum, max, min, max / 2 > min ? "???" : "",
  617. fail, fail ? "!!!" : "");
  618. if (fail)
  619. atomic_inc(&cxt.n_lock_torture_errors);
  620. }
  621. /*
  622. * Print torture statistics. Caller must ensure that there is only one
  623. * call to this function at a given time!!! This is normally accomplished
  624. * by relying on the module system to only have one copy of the module
  625. * loaded, and then by giving the lock_torture_stats kthread full control
  626. * (or the init/cleanup functions when lock_torture_stats thread is not
  627. * running).
  628. */
  629. static void lock_torture_stats_print(void)
  630. {
  631. int size = cxt.nrealwriters_stress * 200 + 8192;
  632. char *buf;
  633. if (cxt.cur_ops->readlock)
  634. size += cxt.nrealreaders_stress * 200 + 8192;
  635. buf = kmalloc(size, GFP_KERNEL);
  636. if (!buf) {
  637. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  638. size);
  639. return;
  640. }
  641. __torture_print_stats(buf, cxt.lwsa, true);
  642. pr_alert("%s", buf);
  643. kfree(buf);
  644. if (cxt.cur_ops->readlock) {
  645. buf = kmalloc(size, GFP_KERNEL);
  646. if (!buf) {
  647. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  648. size);
  649. return;
  650. }
  651. __torture_print_stats(buf, cxt.lrsa, false);
  652. pr_alert("%s", buf);
  653. kfree(buf);
  654. }
  655. }
  656. /*
  657. * Periodically prints torture statistics, if periodic statistics printing
  658. * was specified via the stat_interval module parameter.
  659. *
  660. * No need to worry about fullstop here, since this one doesn't reference
  661. * volatile state or register callbacks.
  662. */
  663. static int lock_torture_stats(void *arg)
  664. {
  665. VERBOSE_TOROUT_STRING("lock_torture_stats task started");
  666. do {
  667. schedule_timeout_interruptible(stat_interval * HZ);
  668. lock_torture_stats_print();
  669. torture_shutdown_absorb("lock_torture_stats");
  670. } while (!torture_must_stop());
  671. torture_kthread_stopping("lock_torture_stats");
  672. return 0;
  673. }
  674. static inline void
  675. lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
  676. const char *tag)
  677. {
  678. pr_alert("%s" TORTURE_FLAG
  679. "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
  680. torture_type, tag, cxt.debug_lock ? " [debug]": "",
  681. cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
  682. verbose, shuffle_interval, stutter, shutdown_secs,
  683. onoff_interval, onoff_holdoff);
  684. }
  685. static void lock_torture_cleanup(void)
  686. {
  687. int i;
  688. if (torture_cleanup_begin())
  689. return;
  690. /*
  691. * Indicates early cleanup, meaning that the test has not run,
  692. * such as when passing bogus args when loading the module. As
  693. * such, only perform the underlying torture-specific cleanups,
  694. * and avoid anything related to locktorture.
  695. */
  696. if (!cxt.lwsa && !cxt.lrsa)
  697. goto end;
  698. if (writer_tasks) {
  699. for (i = 0; i < cxt.nrealwriters_stress; i++)
  700. torture_stop_kthread(lock_torture_writer,
  701. writer_tasks[i]);
  702. kfree(writer_tasks);
  703. writer_tasks = NULL;
  704. }
  705. if (reader_tasks) {
  706. for (i = 0; i < cxt.nrealreaders_stress; i++)
  707. torture_stop_kthread(lock_torture_reader,
  708. reader_tasks[i]);
  709. kfree(reader_tasks);
  710. reader_tasks = NULL;
  711. }
  712. torture_stop_kthread(lock_torture_stats, stats_task);
  713. lock_torture_stats_print(); /* -After- the stats thread is stopped! */
  714. if (atomic_read(&cxt.n_lock_torture_errors))
  715. lock_torture_print_module_parms(cxt.cur_ops,
  716. "End of test: FAILURE");
  717. else if (torture_onoff_failures())
  718. lock_torture_print_module_parms(cxt.cur_ops,
  719. "End of test: LOCK_HOTPLUG");
  720. else
  721. lock_torture_print_module_parms(cxt.cur_ops,
  722. "End of test: SUCCESS");
  723. kfree(cxt.lwsa);
  724. kfree(cxt.lrsa);
  725. end:
  726. torture_cleanup_end();
  727. }
  728. static int __init lock_torture_init(void)
  729. {
  730. int i, j;
  731. int firsterr = 0;
  732. static struct lock_torture_ops *torture_ops[] = {
  733. &lock_busted_ops,
  734. &spin_lock_ops, &spin_lock_irq_ops,
  735. &rw_lock_ops, &rw_lock_irq_ops,
  736. &mutex_lock_ops,
  737. &ww_mutex_lock_ops,
  738. #ifdef CONFIG_RT_MUTEXES
  739. &rtmutex_lock_ops,
  740. #endif
  741. &rwsem_lock_ops,
  742. &percpu_rwsem_lock_ops,
  743. };
  744. if (!torture_init_begin(torture_type, verbose))
  745. return -EBUSY;
  746. /* Process args and tell the world that the torturer is on the job. */
  747. for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
  748. cxt.cur_ops = torture_ops[i];
  749. if (strcmp(torture_type, cxt.cur_ops->name) == 0)
  750. break;
  751. }
  752. if (i == ARRAY_SIZE(torture_ops)) {
  753. pr_alert("lock-torture: invalid torture type: \"%s\"\n",
  754. torture_type);
  755. pr_alert("lock-torture types:");
  756. for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
  757. pr_alert(" %s", torture_ops[i]->name);
  758. pr_alert("\n");
  759. firsterr = -EINVAL;
  760. goto unwind;
  761. }
  762. if (nwriters_stress == 0 && nreaders_stress == 0) {
  763. pr_alert("lock-torture: must run at least one locking thread\n");
  764. firsterr = -EINVAL;
  765. goto unwind;
  766. }
  767. if (cxt.cur_ops->init)
  768. cxt.cur_ops->init();
  769. if (nwriters_stress >= 0)
  770. cxt.nrealwriters_stress = nwriters_stress;
  771. else
  772. cxt.nrealwriters_stress = 2 * num_online_cpus();
  773. #ifdef CONFIG_DEBUG_MUTEXES
  774. if (strncmp(torture_type, "mutex", 5) == 0)
  775. cxt.debug_lock = true;
  776. #endif
  777. #ifdef CONFIG_DEBUG_RT_MUTEXES
  778. if (strncmp(torture_type, "rtmutex", 7) == 0)
  779. cxt.debug_lock = true;
  780. #endif
  781. #ifdef CONFIG_DEBUG_SPINLOCK
  782. if ((strncmp(torture_type, "spin", 4) == 0) ||
  783. (strncmp(torture_type, "rw_lock", 7) == 0))
  784. cxt.debug_lock = true;
  785. #endif
  786. /* Initialize the statistics so that each run gets its own numbers. */
  787. if (nwriters_stress) {
  788. lock_is_write_held = 0;
  789. cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
  790. sizeof(*cxt.lwsa),
  791. GFP_KERNEL);
  792. if (cxt.lwsa == NULL) {
  793. VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
  794. firsterr = -ENOMEM;
  795. goto unwind;
  796. }
  797. for (i = 0; i < cxt.nrealwriters_stress; i++) {
  798. cxt.lwsa[i].n_lock_fail = 0;
  799. cxt.lwsa[i].n_lock_acquired = 0;
  800. }
  801. }
  802. if (cxt.cur_ops->readlock) {
  803. if (nreaders_stress >= 0)
  804. cxt.nrealreaders_stress = nreaders_stress;
  805. else {
  806. /*
  807. * By default distribute evenly the number of
  808. * readers and writers. We still run the same number
  809. * of threads as the writer-only locks default.
  810. */
  811. if (nwriters_stress < 0) /* user doesn't care */
  812. cxt.nrealwriters_stress = num_online_cpus();
  813. cxt.nrealreaders_stress = cxt.nrealwriters_stress;
  814. }
  815. if (nreaders_stress) {
  816. lock_is_read_held = 0;
  817. cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
  818. sizeof(*cxt.lrsa),
  819. GFP_KERNEL);
  820. if (cxt.lrsa == NULL) {
  821. VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
  822. firsterr = -ENOMEM;
  823. kfree(cxt.lwsa);
  824. cxt.lwsa = NULL;
  825. goto unwind;
  826. }
  827. for (i = 0; i < cxt.nrealreaders_stress; i++) {
  828. cxt.lrsa[i].n_lock_fail = 0;
  829. cxt.lrsa[i].n_lock_acquired = 0;
  830. }
  831. }
  832. }
  833. lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
  834. /* Prepare torture context. */
  835. if (onoff_interval > 0) {
  836. firsterr = torture_onoff_init(onoff_holdoff * HZ,
  837. onoff_interval * HZ);
  838. if (firsterr)
  839. goto unwind;
  840. }
  841. if (shuffle_interval > 0) {
  842. firsterr = torture_shuffle_init(shuffle_interval);
  843. if (firsterr)
  844. goto unwind;
  845. }
  846. if (shutdown_secs > 0) {
  847. firsterr = torture_shutdown_init(shutdown_secs,
  848. lock_torture_cleanup);
  849. if (firsterr)
  850. goto unwind;
  851. }
  852. if (stutter > 0) {
  853. firsterr = torture_stutter_init(stutter);
  854. if (firsterr)
  855. goto unwind;
  856. }
  857. if (nwriters_stress) {
  858. writer_tasks = kcalloc(cxt.nrealwriters_stress,
  859. sizeof(writer_tasks[0]),
  860. GFP_KERNEL);
  861. if (writer_tasks == NULL) {
  862. VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
  863. firsterr = -ENOMEM;
  864. goto unwind;
  865. }
  866. }
  867. if (cxt.cur_ops->readlock) {
  868. reader_tasks = kcalloc(cxt.nrealreaders_stress,
  869. sizeof(reader_tasks[0]),
  870. GFP_KERNEL);
  871. if (reader_tasks == NULL) {
  872. VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
  873. kfree(writer_tasks);
  874. writer_tasks = NULL;
  875. firsterr = -ENOMEM;
  876. goto unwind;
  877. }
  878. }
  879. /*
  880. * Create the kthreads and start torturing (oh, those poor little locks).
  881. *
  882. * TODO: Note that we interleave writers with readers, giving writers a
  883. * slight advantage, by creating its kthread first. This can be modified
  884. * for very specific needs, or even let the user choose the policy, if
  885. * ever wanted.
  886. */
  887. for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
  888. j < cxt.nrealreaders_stress; i++, j++) {
  889. if (i >= cxt.nrealwriters_stress)
  890. goto create_reader;
  891. /* Create writer. */
  892. firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
  893. writer_tasks[i]);
  894. if (firsterr)
  895. goto unwind;
  896. create_reader:
  897. if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
  898. continue;
  899. /* Create reader. */
  900. firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
  901. reader_tasks[j]);
  902. if (firsterr)
  903. goto unwind;
  904. }
  905. if (stat_interval > 0) {
  906. firsterr = torture_create_kthread(lock_torture_stats, NULL,
  907. stats_task);
  908. if (firsterr)
  909. goto unwind;
  910. }
  911. torture_init_end();
  912. return 0;
  913. unwind:
  914. torture_init_end();
  915. lock_torture_cleanup();
  916. return firsterr;
  917. }
  918. module_init(lock_torture_init);
  919. module_exit(lock_torture_cleanup);