locktorture.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /*
  2. * Module-based torture test facility for locking
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright (C) IBM Corporation, 2014
  19. *
  20. * Author: Paul E. McKenney <paulmck@us.ibm.com>
  21. * Based on kernel/rcu/torture.c.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/kthread.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/rwlock.h>
  28. #include <linux/mutex.h>
  29. #include <linux/rwsem.h>
  30. #include <linux/smp.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/sched.h>
  33. #include <linux/atomic.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/delay.h>
  36. #include <linux/slab.h>
  37. #include <linux/torture.h>
  38. MODULE_LICENSE("GPL");
  39. MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
  40. torture_param(int, nwriters_stress, -1,
  41. "Number of write-locking stress-test threads");
  42. torture_param(int, nreaders_stress, -1,
  43. "Number of read-locking stress-test threads");
  44. torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  45. torture_param(int, onoff_interval, 0,
  46. "Time between CPU hotplugs (s), 0=disable");
  47. torture_param(int, shuffle_interval, 3,
  48. "Number of jiffies between shuffles, 0=disable");
  49. torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
  50. torture_param(int, stat_interval, 60,
  51. "Number of seconds between stats printk()s");
  52. torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
  53. torture_param(bool, verbose, true,
  54. "Enable verbose debugging printk()s");
  55. static char *torture_type = "spin_lock";
  56. module_param(torture_type, charp, 0444);
  57. MODULE_PARM_DESC(torture_type,
  58. "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
  59. static struct task_struct *stats_task;
  60. static struct task_struct **writer_tasks;
  61. static struct task_struct **reader_tasks;
  62. static bool lock_is_write_held;
  63. static bool lock_is_read_held;
  64. struct lock_stress_stats {
  65. long n_lock_fail;
  66. long n_lock_acquired;
  67. };
  68. #if defined(MODULE)
  69. #define LOCKTORTURE_RUNNABLE_INIT 1
  70. #else
  71. #define LOCKTORTURE_RUNNABLE_INIT 0
  72. #endif
  73. int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
  74. module_param(torture_runnable, int, 0444);
  75. MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
  76. /* Forward reference. */
  77. static void lock_torture_cleanup(void);
  78. /*
  79. * Operations vector for selecting different types of tests.
  80. */
  81. struct lock_torture_ops {
  82. void (*init)(void);
  83. int (*writelock)(void);
  84. void (*write_delay)(struct torture_random_state *trsp);
  85. void (*writeunlock)(void);
  86. int (*readlock)(void);
  87. void (*read_delay)(struct torture_random_state *trsp);
  88. void (*readunlock)(void);
  89. unsigned long flags;
  90. const char *name;
  91. };
  92. struct lock_torture_cxt {
  93. int nrealwriters_stress;
  94. int nrealreaders_stress;
  95. bool debug_lock;
  96. atomic_t n_lock_torture_errors;
  97. struct lock_torture_ops *cur_ops;
  98. struct lock_stress_stats *lwsa; /* writer statistics */
  99. struct lock_stress_stats *lrsa; /* reader statistics */
  100. };
  101. static struct lock_torture_cxt cxt = { 0, 0, false,
  102. ATOMIC_INIT(0),
  103. NULL, NULL};
  104. /*
  105. * Definitions for lock torture testing.
  106. */
  107. static int torture_lock_busted_write_lock(void)
  108. {
  109. return 0; /* BUGGY, do not use in real life!!! */
  110. }
  111. static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
  112. {
  113. const unsigned long longdelay_ms = 100;
  114. /* We want a long delay occasionally to force massive contention. */
  115. if (!(torture_random(trsp) %
  116. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  117. mdelay(longdelay_ms);
  118. #ifdef CONFIG_PREEMPT
  119. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  120. preempt_schedule(); /* Allow test to be preempted. */
  121. #endif
  122. }
  123. static void torture_lock_busted_write_unlock(void)
  124. {
  125. /* BUGGY, do not use in real life!!! */
  126. }
  127. static struct lock_torture_ops lock_busted_ops = {
  128. .writelock = torture_lock_busted_write_lock,
  129. .write_delay = torture_lock_busted_write_delay,
  130. .writeunlock = torture_lock_busted_write_unlock,
  131. .readlock = NULL,
  132. .read_delay = NULL,
  133. .readunlock = NULL,
  134. .name = "lock_busted"
  135. };
  136. static DEFINE_SPINLOCK(torture_spinlock);
  137. static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
  138. {
  139. spin_lock(&torture_spinlock);
  140. return 0;
  141. }
  142. static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
  143. {
  144. const unsigned long shortdelay_us = 2;
  145. const unsigned long longdelay_ms = 100;
  146. /* We want a short delay mostly to emulate likely code, and
  147. * we want a long delay occasionally to force massive contention.
  148. */
  149. if (!(torture_random(trsp) %
  150. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  151. mdelay(longdelay_ms);
  152. if (!(torture_random(trsp) %
  153. (cxt.nrealwriters_stress * 2 * shortdelay_us)))
  154. udelay(shortdelay_us);
  155. #ifdef CONFIG_PREEMPT
  156. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  157. preempt_schedule(); /* Allow test to be preempted. */
  158. #endif
  159. }
  160. static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
  161. {
  162. spin_unlock(&torture_spinlock);
  163. }
  164. static struct lock_torture_ops spin_lock_ops = {
  165. .writelock = torture_spin_lock_write_lock,
  166. .write_delay = torture_spin_lock_write_delay,
  167. .writeunlock = torture_spin_lock_write_unlock,
  168. .readlock = NULL,
  169. .read_delay = NULL,
  170. .readunlock = NULL,
  171. .name = "spin_lock"
  172. };
  173. static int torture_spin_lock_write_lock_irq(void)
  174. __acquires(torture_spinlock)
  175. {
  176. unsigned long flags;
  177. spin_lock_irqsave(&torture_spinlock, flags);
  178. cxt.cur_ops->flags = flags;
  179. return 0;
  180. }
  181. static void torture_lock_spin_write_unlock_irq(void)
  182. __releases(torture_spinlock)
  183. {
  184. spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
  185. }
  186. static struct lock_torture_ops spin_lock_irq_ops = {
  187. .writelock = torture_spin_lock_write_lock_irq,
  188. .write_delay = torture_spin_lock_write_delay,
  189. .writeunlock = torture_lock_spin_write_unlock_irq,
  190. .readlock = NULL,
  191. .read_delay = NULL,
  192. .readunlock = NULL,
  193. .name = "spin_lock_irq"
  194. };
  195. static DEFINE_RWLOCK(torture_rwlock);
  196. static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
  197. {
  198. write_lock(&torture_rwlock);
  199. return 0;
  200. }
  201. static void torture_rwlock_write_delay(struct torture_random_state *trsp)
  202. {
  203. const unsigned long shortdelay_us = 2;
  204. const unsigned long longdelay_ms = 100;
  205. /* We want a short delay mostly to emulate likely code, and
  206. * we want a long delay occasionally to force massive contention.
  207. */
  208. if (!(torture_random(trsp) %
  209. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  210. mdelay(longdelay_ms);
  211. else
  212. udelay(shortdelay_us);
  213. }
  214. static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
  215. {
  216. write_unlock(&torture_rwlock);
  217. }
  218. static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
  219. {
  220. read_lock(&torture_rwlock);
  221. return 0;
  222. }
  223. static void torture_rwlock_read_delay(struct torture_random_state *trsp)
  224. {
  225. const unsigned long shortdelay_us = 10;
  226. const unsigned long longdelay_ms = 100;
  227. /* We want a short delay mostly to emulate likely code, and
  228. * we want a long delay occasionally to force massive contention.
  229. */
  230. if (!(torture_random(trsp) %
  231. (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
  232. mdelay(longdelay_ms);
  233. else
  234. udelay(shortdelay_us);
  235. }
  236. static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
  237. {
  238. read_unlock(&torture_rwlock);
  239. }
  240. static struct lock_torture_ops rw_lock_ops = {
  241. .writelock = torture_rwlock_write_lock,
  242. .write_delay = torture_rwlock_write_delay,
  243. .writeunlock = torture_rwlock_write_unlock,
  244. .readlock = torture_rwlock_read_lock,
  245. .read_delay = torture_rwlock_read_delay,
  246. .readunlock = torture_rwlock_read_unlock,
  247. .name = "rw_lock"
  248. };
  249. static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
  250. {
  251. unsigned long flags;
  252. write_lock_irqsave(&torture_rwlock, flags);
  253. cxt.cur_ops->flags = flags;
  254. return 0;
  255. }
  256. static void torture_rwlock_write_unlock_irq(void)
  257. __releases(torture_rwlock)
  258. {
  259. write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  260. }
  261. static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
  262. {
  263. unsigned long flags;
  264. read_lock_irqsave(&torture_rwlock, flags);
  265. cxt.cur_ops->flags = flags;
  266. return 0;
  267. }
  268. static void torture_rwlock_read_unlock_irq(void)
  269. __releases(torture_rwlock)
  270. {
  271. read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  272. }
  273. static struct lock_torture_ops rw_lock_irq_ops = {
  274. .writelock = torture_rwlock_write_lock_irq,
  275. .write_delay = torture_rwlock_write_delay,
  276. .writeunlock = torture_rwlock_write_unlock_irq,
  277. .readlock = torture_rwlock_read_lock_irq,
  278. .read_delay = torture_rwlock_read_delay,
  279. .readunlock = torture_rwlock_read_unlock_irq,
  280. .name = "rw_lock_irq"
  281. };
  282. static DEFINE_MUTEX(torture_mutex);
  283. static int torture_mutex_lock(void) __acquires(torture_mutex)
  284. {
  285. mutex_lock(&torture_mutex);
  286. return 0;
  287. }
  288. static void torture_mutex_delay(struct torture_random_state *trsp)
  289. {
  290. const unsigned long longdelay_ms = 100;
  291. /* We want a long delay occasionally to force massive contention. */
  292. if (!(torture_random(trsp) %
  293. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  294. mdelay(longdelay_ms * 5);
  295. else
  296. mdelay(longdelay_ms / 5);
  297. #ifdef CONFIG_PREEMPT
  298. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  299. preempt_schedule(); /* Allow test to be preempted. */
  300. #endif
  301. }
  302. static void torture_mutex_unlock(void) __releases(torture_mutex)
  303. {
  304. mutex_unlock(&torture_mutex);
  305. }
  306. static struct lock_torture_ops mutex_lock_ops = {
  307. .writelock = torture_mutex_lock,
  308. .write_delay = torture_mutex_delay,
  309. .writeunlock = torture_mutex_unlock,
  310. .readlock = NULL,
  311. .read_delay = NULL,
  312. .readunlock = NULL,
  313. .name = "mutex_lock"
  314. };
  315. static DECLARE_RWSEM(torture_rwsem);
  316. static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
  317. {
  318. down_write(&torture_rwsem);
  319. return 0;
  320. }
  321. static void torture_rwsem_write_delay(struct torture_random_state *trsp)
  322. {
  323. const unsigned long longdelay_ms = 100;
  324. /* We want a long delay occasionally to force massive contention. */
  325. if (!(torture_random(trsp) %
  326. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  327. mdelay(longdelay_ms * 10);
  328. else
  329. mdelay(longdelay_ms / 10);
  330. #ifdef CONFIG_PREEMPT
  331. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  332. preempt_schedule(); /* Allow test to be preempted. */
  333. #endif
  334. }
  335. static void torture_rwsem_up_write(void) __releases(torture_rwsem)
  336. {
  337. up_write(&torture_rwsem);
  338. }
  339. static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
  340. {
  341. down_read(&torture_rwsem);
  342. return 0;
  343. }
  344. static void torture_rwsem_read_delay(struct torture_random_state *trsp)
  345. {
  346. const unsigned long longdelay_ms = 100;
  347. /* We want a long delay occasionally to force massive contention. */
  348. if (!(torture_random(trsp) %
  349. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  350. mdelay(longdelay_ms * 2);
  351. else
  352. mdelay(longdelay_ms / 2);
  353. #ifdef CONFIG_PREEMPT
  354. if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
  355. preempt_schedule(); /* Allow test to be preempted. */
  356. #endif
  357. }
  358. static void torture_rwsem_up_read(void) __releases(torture_rwsem)
  359. {
  360. up_read(&torture_rwsem);
  361. }
  362. static struct lock_torture_ops rwsem_lock_ops = {
  363. .writelock = torture_rwsem_down_write,
  364. .write_delay = torture_rwsem_write_delay,
  365. .writeunlock = torture_rwsem_up_write,
  366. .readlock = torture_rwsem_down_read,
  367. .read_delay = torture_rwsem_read_delay,
  368. .readunlock = torture_rwsem_up_read,
  369. .name = "rwsem_lock"
  370. };
  371. /*
  372. * Lock torture writer kthread. Repeatedly acquires and releases
  373. * the lock, checking for duplicate acquisitions.
  374. */
  375. static int lock_torture_writer(void *arg)
  376. {
  377. struct lock_stress_stats *lwsp = arg;
  378. static DEFINE_TORTURE_RANDOM(rand);
  379. VERBOSE_TOROUT_STRING("lock_torture_writer task started");
  380. set_user_nice(current, MAX_NICE);
  381. do {
  382. if ((torture_random(&rand) & 0xfffff) == 0)
  383. schedule_timeout_uninterruptible(1);
  384. cxt.cur_ops->writelock();
  385. if (WARN_ON_ONCE(lock_is_write_held))
  386. lwsp->n_lock_fail++;
  387. lock_is_write_held = 1;
  388. if (WARN_ON_ONCE(lock_is_read_held))
  389. lwsp->n_lock_fail++; /* rare, but... */
  390. lwsp->n_lock_acquired++;
  391. cxt.cur_ops->write_delay(&rand);
  392. lock_is_write_held = 0;
  393. cxt.cur_ops->writeunlock();
  394. stutter_wait("lock_torture_writer");
  395. } while (!torture_must_stop());
  396. torture_kthread_stopping("lock_torture_writer");
  397. return 0;
  398. }
  399. /*
  400. * Lock torture reader kthread. Repeatedly acquires and releases
  401. * the reader lock.
  402. */
  403. static int lock_torture_reader(void *arg)
  404. {
  405. struct lock_stress_stats *lrsp = arg;
  406. static DEFINE_TORTURE_RANDOM(rand);
  407. VERBOSE_TOROUT_STRING("lock_torture_reader task started");
  408. set_user_nice(current, MAX_NICE);
  409. do {
  410. if ((torture_random(&rand) & 0xfffff) == 0)
  411. schedule_timeout_uninterruptible(1);
  412. cxt.cur_ops->readlock();
  413. lock_is_read_held = 1;
  414. if (WARN_ON_ONCE(lock_is_write_held))
  415. lrsp->n_lock_fail++; /* rare, but... */
  416. lrsp->n_lock_acquired++;
  417. cxt.cur_ops->read_delay(&rand);
  418. lock_is_read_held = 0;
  419. cxt.cur_ops->readunlock();
  420. stutter_wait("lock_torture_reader");
  421. } while (!torture_must_stop());
  422. torture_kthread_stopping("lock_torture_reader");
  423. return 0;
  424. }
  425. /*
  426. * Create an lock-torture-statistics message in the specified buffer.
  427. */
  428. static void __torture_print_stats(char *page,
  429. struct lock_stress_stats *statp, bool write)
  430. {
  431. bool fail = 0;
  432. int i, n_stress;
  433. long max = 0;
  434. long min = statp[0].n_lock_acquired;
  435. long long sum = 0;
  436. n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
  437. for (i = 0; i < n_stress; i++) {
  438. if (statp[i].n_lock_fail)
  439. fail = true;
  440. sum += statp[i].n_lock_acquired;
  441. if (max < statp[i].n_lock_fail)
  442. max = statp[i].n_lock_fail;
  443. if (min > statp[i].n_lock_fail)
  444. min = statp[i].n_lock_fail;
  445. }
  446. page += sprintf(page,
  447. "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
  448. write ? "Writes" : "Reads ",
  449. sum, max, min, max / 2 > min ? "???" : "",
  450. fail, fail ? "!!!" : "");
  451. if (fail)
  452. atomic_inc(&cxt.n_lock_torture_errors);
  453. }
  454. /*
  455. * Print torture statistics. Caller must ensure that there is only one
  456. * call to this function at a given time!!! This is normally accomplished
  457. * by relying on the module system to only have one copy of the module
  458. * loaded, and then by giving the lock_torture_stats kthread full control
  459. * (or the init/cleanup functions when lock_torture_stats thread is not
  460. * running).
  461. */
  462. static void lock_torture_stats_print(void)
  463. {
  464. int size = cxt.nrealwriters_stress * 200 + 8192;
  465. char *buf;
  466. if (cxt.cur_ops->readlock)
  467. size += cxt.nrealreaders_stress * 200 + 8192;
  468. buf = kmalloc(size, GFP_KERNEL);
  469. if (!buf) {
  470. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  471. size);
  472. return;
  473. }
  474. __torture_print_stats(buf, cxt.lwsa, true);
  475. pr_alert("%s", buf);
  476. kfree(buf);
  477. if (cxt.cur_ops->readlock) {
  478. buf = kmalloc(size, GFP_KERNEL);
  479. if (!buf) {
  480. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  481. size);
  482. return;
  483. }
  484. __torture_print_stats(buf, cxt.lrsa, false);
  485. pr_alert("%s", buf);
  486. kfree(buf);
  487. }
  488. }
  489. /*
  490. * Periodically prints torture statistics, if periodic statistics printing
  491. * was specified via the stat_interval module parameter.
  492. *
  493. * No need to worry about fullstop here, since this one doesn't reference
  494. * volatile state or register callbacks.
  495. */
  496. static int lock_torture_stats(void *arg)
  497. {
  498. VERBOSE_TOROUT_STRING("lock_torture_stats task started");
  499. do {
  500. schedule_timeout_interruptible(stat_interval * HZ);
  501. lock_torture_stats_print();
  502. torture_shutdown_absorb("lock_torture_stats");
  503. } while (!torture_must_stop());
  504. torture_kthread_stopping("lock_torture_stats");
  505. return 0;
  506. }
  507. static inline void
  508. lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
  509. const char *tag)
  510. {
  511. pr_alert("%s" TORTURE_FLAG
  512. "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
  513. torture_type, tag, cxt.debug_lock ? " [debug]": "",
  514. cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
  515. verbose, shuffle_interval, stutter, shutdown_secs,
  516. onoff_interval, onoff_holdoff);
  517. }
  518. static void lock_torture_cleanup(void)
  519. {
  520. int i;
  521. if (torture_cleanup_begin())
  522. return;
  523. if (writer_tasks) {
  524. for (i = 0; i < cxt.nrealwriters_stress; i++)
  525. torture_stop_kthread(lock_torture_writer,
  526. writer_tasks[i]);
  527. kfree(writer_tasks);
  528. writer_tasks = NULL;
  529. }
  530. if (reader_tasks) {
  531. for (i = 0; i < cxt.nrealreaders_stress; i++)
  532. torture_stop_kthread(lock_torture_reader,
  533. reader_tasks[i]);
  534. kfree(reader_tasks);
  535. reader_tasks = NULL;
  536. }
  537. torture_stop_kthread(lock_torture_stats, stats_task);
  538. lock_torture_stats_print(); /* -After- the stats thread is stopped! */
  539. if (atomic_read(&cxt.n_lock_torture_errors))
  540. lock_torture_print_module_parms(cxt.cur_ops,
  541. "End of test: FAILURE");
  542. else if (torture_onoff_failures())
  543. lock_torture_print_module_parms(cxt.cur_ops,
  544. "End of test: LOCK_HOTPLUG");
  545. else
  546. lock_torture_print_module_parms(cxt.cur_ops,
  547. "End of test: SUCCESS");
  548. torture_cleanup_end();
  549. }
  550. static int __init lock_torture_init(void)
  551. {
  552. int i, j;
  553. int firsterr = 0;
  554. static struct lock_torture_ops *torture_ops[] = {
  555. &lock_busted_ops,
  556. &spin_lock_ops, &spin_lock_irq_ops,
  557. &rw_lock_ops, &rw_lock_irq_ops,
  558. &mutex_lock_ops,
  559. &rwsem_lock_ops,
  560. };
  561. if (!torture_init_begin(torture_type, verbose, &torture_runnable))
  562. return -EBUSY;
  563. /* Process args and tell the world that the torturer is on the job. */
  564. for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
  565. cxt.cur_ops = torture_ops[i];
  566. if (strcmp(torture_type, cxt.cur_ops->name) == 0)
  567. break;
  568. }
  569. if (i == ARRAY_SIZE(torture_ops)) {
  570. pr_alert("lock-torture: invalid torture type: \"%s\"\n",
  571. torture_type);
  572. pr_alert("lock-torture types:");
  573. for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
  574. pr_alert(" %s", torture_ops[i]->name);
  575. pr_alert("\n");
  576. torture_init_end();
  577. return -EINVAL;
  578. }
  579. if (cxt.cur_ops->init)
  580. cxt.cur_ops->init(); /* no "goto unwind" prior to this point!!! */
  581. if (nwriters_stress >= 0)
  582. cxt.nrealwriters_stress = nwriters_stress;
  583. else
  584. cxt.nrealwriters_stress = 2 * num_online_cpus();
  585. #ifdef CONFIG_DEBUG_MUTEXES
  586. if (strncmp(torture_type, "mutex", 5) == 0)
  587. cxt.debug_lock = true;
  588. #endif
  589. #ifdef CONFIG_DEBUG_SPINLOCK
  590. if ((strncmp(torture_type, "spin", 4) == 0) ||
  591. (strncmp(torture_type, "rw_lock", 7) == 0))
  592. cxt.debug_lock = true;
  593. #endif
  594. /* Initialize the statistics so that each run gets its own numbers. */
  595. lock_is_write_held = 0;
  596. cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
  597. if (cxt.lwsa == NULL) {
  598. VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
  599. firsterr = -ENOMEM;
  600. goto unwind;
  601. }
  602. for (i = 0; i < cxt.nrealwriters_stress; i++) {
  603. cxt.lwsa[i].n_lock_fail = 0;
  604. cxt.lwsa[i].n_lock_acquired = 0;
  605. }
  606. if (cxt.cur_ops->readlock) {
  607. if (nreaders_stress >= 0)
  608. cxt.nrealreaders_stress = nreaders_stress;
  609. else {
  610. /*
  611. * By default distribute evenly the number of
  612. * readers and writers. We still run the same number
  613. * of threads as the writer-only locks default.
  614. */
  615. if (nwriters_stress < 0) /* user doesn't care */
  616. cxt.nrealwriters_stress = num_online_cpus();
  617. cxt.nrealreaders_stress = cxt.nrealwriters_stress;
  618. }
  619. lock_is_read_held = 0;
  620. cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
  621. if (cxt.lrsa == NULL) {
  622. VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
  623. firsterr = -ENOMEM;
  624. kfree(cxt.lwsa);
  625. goto unwind;
  626. }
  627. for (i = 0; i < cxt.nrealreaders_stress; i++) {
  628. cxt.lrsa[i].n_lock_fail = 0;
  629. cxt.lrsa[i].n_lock_acquired = 0;
  630. }
  631. }
  632. lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
  633. /* Prepare torture context. */
  634. if (onoff_interval > 0) {
  635. firsterr = torture_onoff_init(onoff_holdoff * HZ,
  636. onoff_interval * HZ);
  637. if (firsterr)
  638. goto unwind;
  639. }
  640. if (shuffle_interval > 0) {
  641. firsterr = torture_shuffle_init(shuffle_interval);
  642. if (firsterr)
  643. goto unwind;
  644. }
  645. if (shutdown_secs > 0) {
  646. firsterr = torture_shutdown_init(shutdown_secs,
  647. lock_torture_cleanup);
  648. if (firsterr)
  649. goto unwind;
  650. }
  651. if (stutter > 0) {
  652. firsterr = torture_stutter_init(stutter);
  653. if (firsterr)
  654. goto unwind;
  655. }
  656. writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
  657. GFP_KERNEL);
  658. if (writer_tasks == NULL) {
  659. VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
  660. firsterr = -ENOMEM;
  661. goto unwind;
  662. }
  663. if (cxt.cur_ops->readlock) {
  664. reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
  665. GFP_KERNEL);
  666. if (reader_tasks == NULL) {
  667. VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
  668. firsterr = -ENOMEM;
  669. goto unwind;
  670. }
  671. }
  672. /*
  673. * Create the kthreads and start torturing (oh, those poor little locks).
  674. *
  675. * TODO: Note that we interleave writers with readers, giving writers a
  676. * slight advantage, by creating its kthread first. This can be modified
  677. * for very specific needs, or even let the user choose the policy, if
  678. * ever wanted.
  679. */
  680. for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
  681. j < cxt.nrealreaders_stress; i++, j++) {
  682. if (i >= cxt.nrealwriters_stress)
  683. goto create_reader;
  684. /* Create writer. */
  685. firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
  686. writer_tasks[i]);
  687. if (firsterr)
  688. goto unwind;
  689. create_reader:
  690. if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
  691. continue;
  692. /* Create reader. */
  693. firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
  694. reader_tasks[j]);
  695. if (firsterr)
  696. goto unwind;
  697. }
  698. if (stat_interval > 0) {
  699. firsterr = torture_create_kthread(lock_torture_stats, NULL,
  700. stats_task);
  701. if (firsterr)
  702. goto unwind;
  703. }
  704. torture_init_end();
  705. return 0;
  706. unwind:
  707. torture_init_end();
  708. lock_torture_cleanup();
  709. return firsterr;
  710. }
  711. module_init(lock_torture_init);
  712. module_exit(lock_torture_cleanup);