debugobjects.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #define pr_fmt(fmt) "ODEBUG: " fmt
  11. #include <linux/debugobjects.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/sched.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/slab.h>
  17. #include <linux/hash.h>
  18. #define ODEBUG_HASH_BITS 14
  19. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  20. #define ODEBUG_POOL_SIZE 1024
  21. #define ODEBUG_POOL_MIN_LEVEL 256
  22. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  23. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  24. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  25. struct debug_bucket {
  26. struct hlist_head list;
  27. raw_spinlock_t lock;
  28. };
  29. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  30. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  31. static DEFINE_RAW_SPINLOCK(pool_lock);
  32. static HLIST_HEAD(obj_pool);
  33. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_free = ODEBUG_POOL_SIZE;
  35. static int obj_pool_used;
  36. static int obj_pool_max_used;
  37. static struct kmem_cache *obj_cache;
  38. static int debug_objects_maxchain __read_mostly;
  39. static int debug_objects_fixups __read_mostly;
  40. static int debug_objects_warnings __read_mostly;
  41. static int debug_objects_enabled __read_mostly
  42. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  43. static struct debug_obj_descr *descr_test __read_mostly;
  44. static void free_obj_work(struct work_struct *work);
  45. static DECLARE_WORK(debug_obj_work, free_obj_work);
  46. static int __init enable_object_debug(char *str)
  47. {
  48. debug_objects_enabled = 1;
  49. return 0;
  50. }
  51. static int __init disable_object_debug(char *str)
  52. {
  53. debug_objects_enabled = 0;
  54. return 0;
  55. }
  56. early_param("debug_objects", enable_object_debug);
  57. early_param("no_debug_objects", disable_object_debug);
  58. static const char *obj_states[ODEBUG_STATE_MAX] = {
  59. [ODEBUG_STATE_NONE] = "none",
  60. [ODEBUG_STATE_INIT] = "initialized",
  61. [ODEBUG_STATE_INACTIVE] = "inactive",
  62. [ODEBUG_STATE_ACTIVE] = "active",
  63. [ODEBUG_STATE_DESTROYED] = "destroyed",
  64. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  65. };
  66. static void fill_pool(void)
  67. {
  68. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  69. struct debug_obj *new;
  70. unsigned long flags;
  71. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  72. return;
  73. if (unlikely(!obj_cache))
  74. return;
  75. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  76. new = kmem_cache_zalloc(obj_cache, gfp);
  77. if (!new)
  78. return;
  79. raw_spin_lock_irqsave(&pool_lock, flags);
  80. hlist_add_head(&new->node, &obj_pool);
  81. obj_pool_free++;
  82. raw_spin_unlock_irqrestore(&pool_lock, flags);
  83. }
  84. }
  85. /*
  86. * Lookup an object in the hash bucket.
  87. */
  88. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  89. {
  90. struct debug_obj *obj;
  91. int cnt = 0;
  92. hlist_for_each_entry(obj, &b->list, node) {
  93. cnt++;
  94. if (obj->object == addr)
  95. return obj;
  96. }
  97. if (cnt > debug_objects_maxchain)
  98. debug_objects_maxchain = cnt;
  99. return NULL;
  100. }
  101. /*
  102. * Allocate a new object. If the pool is empty, switch off the debugger.
  103. * Must be called with interrupts disabled.
  104. */
  105. static struct debug_obj *
  106. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  107. {
  108. struct debug_obj *obj = NULL;
  109. raw_spin_lock(&pool_lock);
  110. if (obj_pool.first) {
  111. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  112. obj->object = addr;
  113. obj->descr = descr;
  114. obj->state = ODEBUG_STATE_NONE;
  115. obj->astate = 0;
  116. hlist_del(&obj->node);
  117. hlist_add_head(&obj->node, &b->list);
  118. obj_pool_used++;
  119. if (obj_pool_used > obj_pool_max_used)
  120. obj_pool_max_used = obj_pool_used;
  121. obj_pool_free--;
  122. if (obj_pool_free < obj_pool_min_free)
  123. obj_pool_min_free = obj_pool_free;
  124. }
  125. raw_spin_unlock(&pool_lock);
  126. return obj;
  127. }
  128. /*
  129. * workqueue function to free objects.
  130. */
  131. static void free_obj_work(struct work_struct *work)
  132. {
  133. struct debug_obj *obj;
  134. unsigned long flags;
  135. raw_spin_lock_irqsave(&pool_lock, flags);
  136. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  137. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  138. hlist_del(&obj->node);
  139. obj_pool_free--;
  140. /*
  141. * We release pool_lock across kmem_cache_free() to
  142. * avoid contention on pool_lock.
  143. */
  144. raw_spin_unlock_irqrestore(&pool_lock, flags);
  145. kmem_cache_free(obj_cache, obj);
  146. raw_spin_lock_irqsave(&pool_lock, flags);
  147. }
  148. raw_spin_unlock_irqrestore(&pool_lock, flags);
  149. }
  150. /*
  151. * Put the object back into the pool and schedule work to free objects
  152. * if necessary.
  153. */
  154. static void free_object(struct debug_obj *obj)
  155. {
  156. unsigned long flags;
  157. int sched = 0;
  158. raw_spin_lock_irqsave(&pool_lock, flags);
  159. /*
  160. * schedule work when the pool is filled and the cache is
  161. * initialized:
  162. */
  163. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  164. sched = keventd_up();
  165. hlist_add_head(&obj->node, &obj_pool);
  166. obj_pool_free++;
  167. obj_pool_used--;
  168. raw_spin_unlock_irqrestore(&pool_lock, flags);
  169. if (sched)
  170. schedule_work(&debug_obj_work);
  171. }
  172. /*
  173. * We run out of memory. That means we probably have tons of objects
  174. * allocated.
  175. */
  176. static void debug_objects_oom(void)
  177. {
  178. struct debug_bucket *db = obj_hash;
  179. struct hlist_node *tmp;
  180. HLIST_HEAD(freelist);
  181. struct debug_obj *obj;
  182. unsigned long flags;
  183. int i;
  184. pr_warn("Out of memory. ODEBUG disabled\n");
  185. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  186. raw_spin_lock_irqsave(&db->lock, flags);
  187. hlist_move_list(&db->list, &freelist);
  188. raw_spin_unlock_irqrestore(&db->lock, flags);
  189. /* Now free them */
  190. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  191. hlist_del(&obj->node);
  192. free_object(obj);
  193. }
  194. }
  195. }
  196. /*
  197. * We use the pfn of the address for the hash. That way we can check
  198. * for freed objects simply by checking the affected bucket.
  199. */
  200. static struct debug_bucket *get_bucket(unsigned long addr)
  201. {
  202. unsigned long hash;
  203. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  204. return &obj_hash[hash];
  205. }
  206. static void debug_print_object(struct debug_obj *obj, char *msg)
  207. {
  208. struct debug_obj_descr *descr = obj->descr;
  209. static int limit;
  210. if (limit < 5 && descr != descr_test) {
  211. void *hint = descr->debug_hint ?
  212. descr->debug_hint(obj->object) : NULL;
  213. limit++;
  214. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  215. "object type: %s hint: %pS\n",
  216. msg, obj_states[obj->state], obj->astate,
  217. descr->name, hint);
  218. }
  219. debug_objects_warnings++;
  220. }
  221. /*
  222. * Try to repair the damage, so we have a better chance to get useful
  223. * debug output.
  224. */
  225. static bool
  226. debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
  227. void * addr, enum debug_obj_state state)
  228. {
  229. if (fixup && fixup(addr, state)) {
  230. debug_objects_fixups++;
  231. return true;
  232. }
  233. return false;
  234. }
  235. static void debug_object_is_on_stack(void *addr, int onstack)
  236. {
  237. int is_on_stack;
  238. static int limit;
  239. if (limit > 4)
  240. return;
  241. is_on_stack = object_is_on_stack(addr);
  242. if (is_on_stack == onstack)
  243. return;
  244. limit++;
  245. if (is_on_stack)
  246. pr_warn("object is on stack, but not annotated\n");
  247. else
  248. pr_warn("object is not on stack, but annotated\n");
  249. WARN_ON(1);
  250. }
  251. static void
  252. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  253. {
  254. enum debug_obj_state state;
  255. struct debug_bucket *db;
  256. struct debug_obj *obj;
  257. unsigned long flags;
  258. fill_pool();
  259. db = get_bucket((unsigned long) addr);
  260. raw_spin_lock_irqsave(&db->lock, flags);
  261. obj = lookup_object(addr, db);
  262. if (!obj) {
  263. obj = alloc_object(addr, db, descr);
  264. if (!obj) {
  265. debug_objects_enabled = 0;
  266. raw_spin_unlock_irqrestore(&db->lock, flags);
  267. debug_objects_oom();
  268. return;
  269. }
  270. debug_object_is_on_stack(addr, onstack);
  271. }
  272. switch (obj->state) {
  273. case ODEBUG_STATE_NONE:
  274. case ODEBUG_STATE_INIT:
  275. case ODEBUG_STATE_INACTIVE:
  276. obj->state = ODEBUG_STATE_INIT;
  277. break;
  278. case ODEBUG_STATE_ACTIVE:
  279. debug_print_object(obj, "init");
  280. state = obj->state;
  281. raw_spin_unlock_irqrestore(&db->lock, flags);
  282. debug_object_fixup(descr->fixup_init, addr, state);
  283. return;
  284. case ODEBUG_STATE_DESTROYED:
  285. debug_print_object(obj, "init");
  286. break;
  287. default:
  288. break;
  289. }
  290. raw_spin_unlock_irqrestore(&db->lock, flags);
  291. }
  292. /**
  293. * debug_object_init - debug checks when an object is initialized
  294. * @addr: address of the object
  295. * @descr: pointer to an object specific debug description structure
  296. */
  297. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  298. {
  299. if (!debug_objects_enabled)
  300. return;
  301. __debug_object_init(addr, descr, 0);
  302. }
  303. EXPORT_SYMBOL_GPL(debug_object_init);
  304. /**
  305. * debug_object_init_on_stack - debug checks when an object on stack is
  306. * initialized
  307. * @addr: address of the object
  308. * @descr: pointer to an object specific debug description structure
  309. */
  310. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  311. {
  312. if (!debug_objects_enabled)
  313. return;
  314. __debug_object_init(addr, descr, 1);
  315. }
  316. EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
  317. /**
  318. * debug_object_activate - debug checks when an object is activated
  319. * @addr: address of the object
  320. * @descr: pointer to an object specific debug description structure
  321. * Returns 0 for success, -EINVAL for check failed.
  322. */
  323. int debug_object_activate(void *addr, struct debug_obj_descr *descr)
  324. {
  325. enum debug_obj_state state;
  326. struct debug_bucket *db;
  327. struct debug_obj *obj;
  328. unsigned long flags;
  329. int ret;
  330. struct debug_obj o = { .object = addr,
  331. .state = ODEBUG_STATE_NOTAVAILABLE,
  332. .descr = descr };
  333. if (!debug_objects_enabled)
  334. return 0;
  335. db = get_bucket((unsigned long) addr);
  336. raw_spin_lock_irqsave(&db->lock, flags);
  337. obj = lookup_object(addr, db);
  338. if (obj) {
  339. switch (obj->state) {
  340. case ODEBUG_STATE_INIT:
  341. case ODEBUG_STATE_INACTIVE:
  342. obj->state = ODEBUG_STATE_ACTIVE;
  343. ret = 0;
  344. break;
  345. case ODEBUG_STATE_ACTIVE:
  346. debug_print_object(obj, "activate");
  347. state = obj->state;
  348. raw_spin_unlock_irqrestore(&db->lock, flags);
  349. ret = debug_object_fixup(descr->fixup_activate, addr, state);
  350. return ret ? 0 : -EINVAL;
  351. case ODEBUG_STATE_DESTROYED:
  352. debug_print_object(obj, "activate");
  353. ret = -EINVAL;
  354. break;
  355. default:
  356. ret = 0;
  357. break;
  358. }
  359. raw_spin_unlock_irqrestore(&db->lock, flags);
  360. return ret;
  361. }
  362. raw_spin_unlock_irqrestore(&db->lock, flags);
  363. /*
  364. * We are here when a static object is activated. We
  365. * let the type specific code confirm whether this is
  366. * true or not. if true, we just make sure that the
  367. * static object is tracked in the object tracker. If
  368. * not, this must be a bug, so we try to fix it up.
  369. */
  370. if (descr->is_static_object && descr->is_static_object(addr)) {
  371. /* track this static object */
  372. debug_object_init(addr, descr);
  373. debug_object_activate(addr, descr);
  374. } else {
  375. debug_print_object(&o, "activate");
  376. ret = debug_object_fixup(descr->fixup_activate, addr,
  377. ODEBUG_STATE_NOTAVAILABLE);
  378. return ret ? 0 : -EINVAL;
  379. }
  380. return 0;
  381. }
  382. EXPORT_SYMBOL_GPL(debug_object_activate);
  383. /**
  384. * debug_object_deactivate - debug checks when an object is deactivated
  385. * @addr: address of the object
  386. * @descr: pointer to an object specific debug description structure
  387. */
  388. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  389. {
  390. struct debug_bucket *db;
  391. struct debug_obj *obj;
  392. unsigned long flags;
  393. if (!debug_objects_enabled)
  394. return;
  395. db = get_bucket((unsigned long) addr);
  396. raw_spin_lock_irqsave(&db->lock, flags);
  397. obj = lookup_object(addr, db);
  398. if (obj) {
  399. switch (obj->state) {
  400. case ODEBUG_STATE_INIT:
  401. case ODEBUG_STATE_INACTIVE:
  402. case ODEBUG_STATE_ACTIVE:
  403. if (!obj->astate)
  404. obj->state = ODEBUG_STATE_INACTIVE;
  405. else
  406. debug_print_object(obj, "deactivate");
  407. break;
  408. case ODEBUG_STATE_DESTROYED:
  409. debug_print_object(obj, "deactivate");
  410. break;
  411. default:
  412. break;
  413. }
  414. } else {
  415. struct debug_obj o = { .object = addr,
  416. .state = ODEBUG_STATE_NOTAVAILABLE,
  417. .descr = descr };
  418. debug_print_object(&o, "deactivate");
  419. }
  420. raw_spin_unlock_irqrestore(&db->lock, flags);
  421. }
  422. EXPORT_SYMBOL_GPL(debug_object_deactivate);
  423. /**
  424. * debug_object_destroy - debug checks when an object is destroyed
  425. * @addr: address of the object
  426. * @descr: pointer to an object specific debug description structure
  427. */
  428. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  429. {
  430. enum debug_obj_state state;
  431. struct debug_bucket *db;
  432. struct debug_obj *obj;
  433. unsigned long flags;
  434. if (!debug_objects_enabled)
  435. return;
  436. db = get_bucket((unsigned long) addr);
  437. raw_spin_lock_irqsave(&db->lock, flags);
  438. obj = lookup_object(addr, db);
  439. if (!obj)
  440. goto out_unlock;
  441. switch (obj->state) {
  442. case ODEBUG_STATE_NONE:
  443. case ODEBUG_STATE_INIT:
  444. case ODEBUG_STATE_INACTIVE:
  445. obj->state = ODEBUG_STATE_DESTROYED;
  446. break;
  447. case ODEBUG_STATE_ACTIVE:
  448. debug_print_object(obj, "destroy");
  449. state = obj->state;
  450. raw_spin_unlock_irqrestore(&db->lock, flags);
  451. debug_object_fixup(descr->fixup_destroy, addr, state);
  452. return;
  453. case ODEBUG_STATE_DESTROYED:
  454. debug_print_object(obj, "destroy");
  455. break;
  456. default:
  457. break;
  458. }
  459. out_unlock:
  460. raw_spin_unlock_irqrestore(&db->lock, flags);
  461. }
  462. EXPORT_SYMBOL_GPL(debug_object_destroy);
  463. /**
  464. * debug_object_free - debug checks when an object is freed
  465. * @addr: address of the object
  466. * @descr: pointer to an object specific debug description structure
  467. */
  468. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  469. {
  470. enum debug_obj_state state;
  471. struct debug_bucket *db;
  472. struct debug_obj *obj;
  473. unsigned long flags;
  474. if (!debug_objects_enabled)
  475. return;
  476. db = get_bucket((unsigned long) addr);
  477. raw_spin_lock_irqsave(&db->lock, flags);
  478. obj = lookup_object(addr, db);
  479. if (!obj)
  480. goto out_unlock;
  481. switch (obj->state) {
  482. case ODEBUG_STATE_ACTIVE:
  483. debug_print_object(obj, "free");
  484. state = obj->state;
  485. raw_spin_unlock_irqrestore(&db->lock, flags);
  486. debug_object_fixup(descr->fixup_free, addr, state);
  487. return;
  488. default:
  489. hlist_del(&obj->node);
  490. raw_spin_unlock_irqrestore(&db->lock, flags);
  491. free_object(obj);
  492. return;
  493. }
  494. out_unlock:
  495. raw_spin_unlock_irqrestore(&db->lock, flags);
  496. }
  497. EXPORT_SYMBOL_GPL(debug_object_free);
  498. /**
  499. * debug_object_assert_init - debug checks when object should be init-ed
  500. * @addr: address of the object
  501. * @descr: pointer to an object specific debug description structure
  502. */
  503. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  504. {
  505. struct debug_bucket *db;
  506. struct debug_obj *obj;
  507. unsigned long flags;
  508. if (!debug_objects_enabled)
  509. return;
  510. db = get_bucket((unsigned long) addr);
  511. raw_spin_lock_irqsave(&db->lock, flags);
  512. obj = lookup_object(addr, db);
  513. if (!obj) {
  514. struct debug_obj o = { .object = addr,
  515. .state = ODEBUG_STATE_NOTAVAILABLE,
  516. .descr = descr };
  517. raw_spin_unlock_irqrestore(&db->lock, flags);
  518. /*
  519. * Maybe the object is static, and we let the type specific
  520. * code confirm. Track this static object if true, else invoke
  521. * fixup.
  522. */
  523. if (descr->is_static_object && descr->is_static_object(addr)) {
  524. /* Track this static object */
  525. debug_object_init(addr, descr);
  526. } else {
  527. debug_print_object(&o, "assert_init");
  528. debug_object_fixup(descr->fixup_assert_init, addr,
  529. ODEBUG_STATE_NOTAVAILABLE);
  530. }
  531. return;
  532. }
  533. raw_spin_unlock_irqrestore(&db->lock, flags);
  534. }
  535. EXPORT_SYMBOL_GPL(debug_object_assert_init);
  536. /**
  537. * debug_object_active_state - debug checks object usage state machine
  538. * @addr: address of the object
  539. * @descr: pointer to an object specific debug description structure
  540. * @expect: expected state
  541. * @next: state to move to if expected state is found
  542. */
  543. void
  544. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  545. unsigned int expect, unsigned int next)
  546. {
  547. struct debug_bucket *db;
  548. struct debug_obj *obj;
  549. unsigned long flags;
  550. if (!debug_objects_enabled)
  551. return;
  552. db = get_bucket((unsigned long) addr);
  553. raw_spin_lock_irqsave(&db->lock, flags);
  554. obj = lookup_object(addr, db);
  555. if (obj) {
  556. switch (obj->state) {
  557. case ODEBUG_STATE_ACTIVE:
  558. if (obj->astate == expect)
  559. obj->astate = next;
  560. else
  561. debug_print_object(obj, "active_state");
  562. break;
  563. default:
  564. debug_print_object(obj, "active_state");
  565. break;
  566. }
  567. } else {
  568. struct debug_obj o = { .object = addr,
  569. .state = ODEBUG_STATE_NOTAVAILABLE,
  570. .descr = descr };
  571. debug_print_object(&o, "active_state");
  572. }
  573. raw_spin_unlock_irqrestore(&db->lock, flags);
  574. }
  575. EXPORT_SYMBOL_GPL(debug_object_active_state);
  576. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  577. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  578. {
  579. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  580. struct hlist_node *tmp;
  581. HLIST_HEAD(freelist);
  582. struct debug_obj_descr *descr;
  583. enum debug_obj_state state;
  584. struct debug_bucket *db;
  585. struct debug_obj *obj;
  586. int cnt;
  587. saddr = (unsigned long) address;
  588. eaddr = saddr + size;
  589. paddr = saddr & ODEBUG_CHUNK_MASK;
  590. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  591. chunks >>= ODEBUG_CHUNK_SHIFT;
  592. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  593. db = get_bucket(paddr);
  594. repeat:
  595. cnt = 0;
  596. raw_spin_lock_irqsave(&db->lock, flags);
  597. hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
  598. cnt++;
  599. oaddr = (unsigned long) obj->object;
  600. if (oaddr < saddr || oaddr >= eaddr)
  601. continue;
  602. switch (obj->state) {
  603. case ODEBUG_STATE_ACTIVE:
  604. debug_print_object(obj, "free");
  605. descr = obj->descr;
  606. state = obj->state;
  607. raw_spin_unlock_irqrestore(&db->lock, flags);
  608. debug_object_fixup(descr->fixup_free,
  609. (void *) oaddr, state);
  610. goto repeat;
  611. default:
  612. hlist_del(&obj->node);
  613. hlist_add_head(&obj->node, &freelist);
  614. break;
  615. }
  616. }
  617. raw_spin_unlock_irqrestore(&db->lock, flags);
  618. /* Now free them */
  619. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  620. hlist_del(&obj->node);
  621. free_object(obj);
  622. }
  623. if (cnt > debug_objects_maxchain)
  624. debug_objects_maxchain = cnt;
  625. }
  626. }
  627. void debug_check_no_obj_freed(const void *address, unsigned long size)
  628. {
  629. if (debug_objects_enabled)
  630. __debug_check_no_obj_freed(address, size);
  631. }
  632. #endif
  633. #ifdef CONFIG_DEBUG_FS
  634. static int debug_stats_show(struct seq_file *m, void *v)
  635. {
  636. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  637. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  638. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  639. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  640. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  641. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  642. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  643. return 0;
  644. }
  645. static int debug_stats_open(struct inode *inode, struct file *filp)
  646. {
  647. return single_open(filp, debug_stats_show, NULL);
  648. }
  649. static const struct file_operations debug_stats_fops = {
  650. .open = debug_stats_open,
  651. .read = seq_read,
  652. .llseek = seq_lseek,
  653. .release = single_release,
  654. };
  655. static int __init debug_objects_init_debugfs(void)
  656. {
  657. struct dentry *dbgdir, *dbgstats;
  658. if (!debug_objects_enabled)
  659. return 0;
  660. dbgdir = debugfs_create_dir("debug_objects", NULL);
  661. if (!dbgdir)
  662. return -ENOMEM;
  663. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  664. &debug_stats_fops);
  665. if (!dbgstats)
  666. goto err;
  667. return 0;
  668. err:
  669. debugfs_remove(dbgdir);
  670. return -ENOMEM;
  671. }
  672. __initcall(debug_objects_init_debugfs);
  673. #else
  674. static inline void debug_objects_init_debugfs(void) { }
  675. #endif
  676. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  677. /* Random data structure for the self test */
  678. struct self_test {
  679. unsigned long dummy1[6];
  680. int static_init;
  681. unsigned long dummy2[3];
  682. };
  683. static __initdata struct debug_obj_descr descr_type_test;
  684. static bool __init is_static_object(void *addr)
  685. {
  686. struct self_test *obj = addr;
  687. return obj->static_init;
  688. }
  689. /*
  690. * fixup_init is called when:
  691. * - an active object is initialized
  692. */
  693. static bool __init fixup_init(void *addr, enum debug_obj_state state)
  694. {
  695. struct self_test *obj = addr;
  696. switch (state) {
  697. case ODEBUG_STATE_ACTIVE:
  698. debug_object_deactivate(obj, &descr_type_test);
  699. debug_object_init(obj, &descr_type_test);
  700. return true;
  701. default:
  702. return false;
  703. }
  704. }
  705. /*
  706. * fixup_activate is called when:
  707. * - an active object is activated
  708. * - an unknown non-static object is activated
  709. */
  710. static bool __init fixup_activate(void *addr, enum debug_obj_state state)
  711. {
  712. struct self_test *obj = addr;
  713. switch (state) {
  714. case ODEBUG_STATE_NOTAVAILABLE:
  715. return true;
  716. case ODEBUG_STATE_ACTIVE:
  717. debug_object_deactivate(obj, &descr_type_test);
  718. debug_object_activate(obj, &descr_type_test);
  719. return true;
  720. default:
  721. return false;
  722. }
  723. }
  724. /*
  725. * fixup_destroy is called when:
  726. * - an active object is destroyed
  727. */
  728. static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
  729. {
  730. struct self_test *obj = addr;
  731. switch (state) {
  732. case ODEBUG_STATE_ACTIVE:
  733. debug_object_deactivate(obj, &descr_type_test);
  734. debug_object_destroy(obj, &descr_type_test);
  735. return true;
  736. default:
  737. return false;
  738. }
  739. }
  740. /*
  741. * fixup_free is called when:
  742. * - an active object is freed
  743. */
  744. static bool __init fixup_free(void *addr, enum debug_obj_state state)
  745. {
  746. struct self_test *obj = addr;
  747. switch (state) {
  748. case ODEBUG_STATE_ACTIVE:
  749. debug_object_deactivate(obj, &descr_type_test);
  750. debug_object_free(obj, &descr_type_test);
  751. return true;
  752. default:
  753. return false;
  754. }
  755. }
  756. static int __init
  757. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  758. {
  759. struct debug_bucket *db;
  760. struct debug_obj *obj;
  761. unsigned long flags;
  762. int res = -EINVAL;
  763. db = get_bucket((unsigned long) addr);
  764. raw_spin_lock_irqsave(&db->lock, flags);
  765. obj = lookup_object(addr, db);
  766. if (!obj && state != ODEBUG_STATE_NONE) {
  767. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  768. goto out;
  769. }
  770. if (obj && obj->state != state) {
  771. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  772. obj->state, state);
  773. goto out;
  774. }
  775. if (fixups != debug_objects_fixups) {
  776. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  777. fixups, debug_objects_fixups);
  778. goto out;
  779. }
  780. if (warnings != debug_objects_warnings) {
  781. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  782. warnings, debug_objects_warnings);
  783. goto out;
  784. }
  785. res = 0;
  786. out:
  787. raw_spin_unlock_irqrestore(&db->lock, flags);
  788. if (res)
  789. debug_objects_enabled = 0;
  790. return res;
  791. }
  792. static __initdata struct debug_obj_descr descr_type_test = {
  793. .name = "selftest",
  794. .is_static_object = is_static_object,
  795. .fixup_init = fixup_init,
  796. .fixup_activate = fixup_activate,
  797. .fixup_destroy = fixup_destroy,
  798. .fixup_free = fixup_free,
  799. };
  800. static __initdata struct self_test obj = { .static_init = 0 };
  801. static void __init debug_objects_selftest(void)
  802. {
  803. int fixups, oldfixups, warnings, oldwarnings;
  804. unsigned long flags;
  805. local_irq_save(flags);
  806. fixups = oldfixups = debug_objects_fixups;
  807. warnings = oldwarnings = debug_objects_warnings;
  808. descr_test = &descr_type_test;
  809. debug_object_init(&obj, &descr_type_test);
  810. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  811. goto out;
  812. debug_object_activate(&obj, &descr_type_test);
  813. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  814. goto out;
  815. debug_object_activate(&obj, &descr_type_test);
  816. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  817. goto out;
  818. debug_object_deactivate(&obj, &descr_type_test);
  819. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  820. goto out;
  821. debug_object_destroy(&obj, &descr_type_test);
  822. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  823. goto out;
  824. debug_object_init(&obj, &descr_type_test);
  825. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  826. goto out;
  827. debug_object_activate(&obj, &descr_type_test);
  828. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  829. goto out;
  830. debug_object_deactivate(&obj, &descr_type_test);
  831. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  832. goto out;
  833. debug_object_free(&obj, &descr_type_test);
  834. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  835. goto out;
  836. obj.static_init = 1;
  837. debug_object_activate(&obj, &descr_type_test);
  838. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  839. goto out;
  840. debug_object_init(&obj, &descr_type_test);
  841. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  842. goto out;
  843. debug_object_free(&obj, &descr_type_test);
  844. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  845. goto out;
  846. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  847. debug_object_init(&obj, &descr_type_test);
  848. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  849. goto out;
  850. debug_object_activate(&obj, &descr_type_test);
  851. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  852. goto out;
  853. __debug_check_no_obj_freed(&obj, sizeof(obj));
  854. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  855. goto out;
  856. #endif
  857. pr_info("selftest passed\n");
  858. out:
  859. debug_objects_fixups = oldfixups;
  860. debug_objects_warnings = oldwarnings;
  861. descr_test = NULL;
  862. local_irq_restore(flags);
  863. }
  864. #else
  865. static inline void debug_objects_selftest(void) { }
  866. #endif
  867. /*
  868. * Called during early boot to initialize the hash buckets and link
  869. * the static object pool objects into the poll list. After this call
  870. * the object tracker is fully operational.
  871. */
  872. void __init debug_objects_early_init(void)
  873. {
  874. int i;
  875. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  876. raw_spin_lock_init(&obj_hash[i].lock);
  877. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  878. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  879. }
  880. /*
  881. * Convert the statically allocated objects to dynamic ones:
  882. */
  883. static int __init debug_objects_replace_static_objects(void)
  884. {
  885. struct debug_bucket *db = obj_hash;
  886. struct hlist_node *tmp;
  887. struct debug_obj *obj, *new;
  888. HLIST_HEAD(objects);
  889. int i, cnt = 0;
  890. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  891. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  892. if (!obj)
  893. goto free;
  894. hlist_add_head(&obj->node, &objects);
  895. }
  896. /*
  897. * When debug_objects_mem_init() is called we know that only
  898. * one CPU is up, so disabling interrupts is enough
  899. * protection. This avoids the lockdep hell of lock ordering.
  900. */
  901. local_irq_disable();
  902. /* Remove the statically allocated objects from the pool */
  903. hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
  904. hlist_del(&obj->node);
  905. /* Move the allocated objects to the pool */
  906. hlist_move_list(&objects, &obj_pool);
  907. /* Replace the active object references */
  908. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  909. hlist_move_list(&db->list, &objects);
  910. hlist_for_each_entry(obj, &objects, node) {
  911. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  912. hlist_del(&new->node);
  913. /* copy object data */
  914. *new = *obj;
  915. hlist_add_head(&new->node, &db->list);
  916. cnt++;
  917. }
  918. }
  919. local_irq_enable();
  920. pr_debug("%d of %d active objects replaced\n",
  921. cnt, obj_pool_used);
  922. return 0;
  923. free:
  924. hlist_for_each_entry_safe(obj, tmp, &objects, node) {
  925. hlist_del(&obj->node);
  926. kmem_cache_free(obj_cache, obj);
  927. }
  928. return -ENOMEM;
  929. }
  930. /*
  931. * Called after the kmem_caches are functional to setup a dedicated
  932. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  933. * prevents that the debug code is called on kmem_cache_free() for the
  934. * debug tracker objects to avoid recursive calls.
  935. */
  936. void __init debug_objects_mem_init(void)
  937. {
  938. if (!debug_objects_enabled)
  939. return;
  940. obj_cache = kmem_cache_create("debug_objects_cache",
  941. sizeof (struct debug_obj), 0,
  942. SLAB_DEBUG_OBJECTS, NULL);
  943. if (!obj_cache || debug_objects_replace_static_objects()) {
  944. debug_objects_enabled = 0;
  945. if (obj_cache)
  946. kmem_cache_destroy(obj_cache);
  947. pr_warn("out of memory.\n");
  948. } else
  949. debug_objects_selftest();
  950. }