debugobjects.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098
  1. /*
  2. * Generic infrastructure for lifetime debugging of objects.
  3. *
  4. * Started by Thomas Gleixner
  5. *
  6. * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #define pr_fmt(fmt) "ODEBUG: " fmt
  11. #include <linux/debugobjects.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/sched.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/slab.h>
  17. #include <linux/hash.h>
  18. #define ODEBUG_HASH_BITS 14
  19. #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
  20. #define ODEBUG_POOL_SIZE 512
  21. #define ODEBUG_POOL_MIN_LEVEL 256
  22. #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
  23. #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
  24. #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
  25. struct debug_bucket {
  26. struct hlist_head list;
  27. raw_spinlock_t lock;
  28. };
  29. static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
  30. static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  31. static DEFINE_RAW_SPINLOCK(pool_lock);
  32. static HLIST_HEAD(obj_pool);
  33. static int obj_pool_min_free = ODEBUG_POOL_SIZE;
  34. static int obj_pool_free = ODEBUG_POOL_SIZE;
  35. static int obj_pool_used;
  36. static int obj_pool_max_used;
  37. static struct kmem_cache *obj_cache;
  38. static int debug_objects_maxchain __read_mostly;
  39. static int debug_objects_fixups __read_mostly;
  40. static int debug_objects_warnings __read_mostly;
  41. static int debug_objects_enabled __read_mostly
  42. = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  43. static struct debug_obj_descr *descr_test __read_mostly;
  44. static void free_obj_work(struct work_struct *work);
  45. static DECLARE_WORK(debug_obj_work, free_obj_work);
  46. static int __init enable_object_debug(char *str)
  47. {
  48. debug_objects_enabled = 1;
  49. return 0;
  50. }
  51. static int __init disable_object_debug(char *str)
  52. {
  53. debug_objects_enabled = 0;
  54. return 0;
  55. }
  56. early_param("debug_objects", enable_object_debug);
  57. early_param("no_debug_objects", disable_object_debug);
  58. static const char *obj_states[ODEBUG_STATE_MAX] = {
  59. [ODEBUG_STATE_NONE] = "none",
  60. [ODEBUG_STATE_INIT] = "initialized",
  61. [ODEBUG_STATE_INACTIVE] = "inactive",
  62. [ODEBUG_STATE_ACTIVE] = "active",
  63. [ODEBUG_STATE_DESTROYED] = "destroyed",
  64. [ODEBUG_STATE_NOTAVAILABLE] = "not available",
  65. };
  66. static void fill_pool(void)
  67. {
  68. gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  69. struct debug_obj *new;
  70. unsigned long flags;
  71. if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
  72. return;
  73. if (unlikely(!obj_cache))
  74. return;
  75. while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
  76. new = kmem_cache_zalloc(obj_cache, gfp);
  77. if (!new)
  78. return;
  79. raw_spin_lock_irqsave(&pool_lock, flags);
  80. hlist_add_head(&new->node, &obj_pool);
  81. obj_pool_free++;
  82. raw_spin_unlock_irqrestore(&pool_lock, flags);
  83. }
  84. }
  85. /*
  86. * Lookup an object in the hash bucket.
  87. */
  88. static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
  89. {
  90. struct debug_obj *obj;
  91. int cnt = 0;
  92. hlist_for_each_entry(obj, &b->list, node) {
  93. cnt++;
  94. if (obj->object == addr)
  95. return obj;
  96. }
  97. if (cnt > debug_objects_maxchain)
  98. debug_objects_maxchain = cnt;
  99. return NULL;
  100. }
  101. /*
  102. * Allocate a new object. If the pool is empty, switch off the debugger.
  103. * Must be called with interrupts disabled.
  104. */
  105. static struct debug_obj *
  106. alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
  107. {
  108. struct debug_obj *obj = NULL;
  109. raw_spin_lock(&pool_lock);
  110. if (obj_pool.first) {
  111. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  112. obj->object = addr;
  113. obj->descr = descr;
  114. obj->state = ODEBUG_STATE_NONE;
  115. obj->astate = 0;
  116. hlist_del(&obj->node);
  117. hlist_add_head(&obj->node, &b->list);
  118. obj_pool_used++;
  119. if (obj_pool_used > obj_pool_max_used)
  120. obj_pool_max_used = obj_pool_used;
  121. obj_pool_free--;
  122. if (obj_pool_free < obj_pool_min_free)
  123. obj_pool_min_free = obj_pool_free;
  124. }
  125. raw_spin_unlock(&pool_lock);
  126. return obj;
  127. }
  128. /*
  129. * workqueue function to free objects.
  130. */
  131. static void free_obj_work(struct work_struct *work)
  132. {
  133. struct debug_obj *obj;
  134. unsigned long flags;
  135. raw_spin_lock_irqsave(&pool_lock, flags);
  136. while (obj_pool_free > ODEBUG_POOL_SIZE) {
  137. obj = hlist_entry(obj_pool.first, typeof(*obj), node);
  138. hlist_del(&obj->node);
  139. obj_pool_free--;
  140. /*
  141. * We release pool_lock across kmem_cache_free() to
  142. * avoid contention on pool_lock.
  143. */
  144. raw_spin_unlock_irqrestore(&pool_lock, flags);
  145. kmem_cache_free(obj_cache, obj);
  146. raw_spin_lock_irqsave(&pool_lock, flags);
  147. }
  148. raw_spin_unlock_irqrestore(&pool_lock, flags);
  149. }
  150. /*
  151. * Put the object back into the pool and schedule work to free objects
  152. * if necessary.
  153. */
  154. static void free_object(struct debug_obj *obj)
  155. {
  156. unsigned long flags;
  157. int sched = 0;
  158. raw_spin_lock_irqsave(&pool_lock, flags);
  159. /*
  160. * schedule work when the pool is filled and the cache is
  161. * initialized:
  162. */
  163. if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
  164. sched = keventd_up();
  165. hlist_add_head(&obj->node, &obj_pool);
  166. obj_pool_free++;
  167. obj_pool_used--;
  168. raw_spin_unlock_irqrestore(&pool_lock, flags);
  169. if (sched)
  170. schedule_work(&debug_obj_work);
  171. }
  172. /*
  173. * We run out of memory. That means we probably have tons of objects
  174. * allocated.
  175. */
  176. static void debug_objects_oom(void)
  177. {
  178. struct debug_bucket *db = obj_hash;
  179. struct hlist_node *tmp;
  180. HLIST_HEAD(freelist);
  181. struct debug_obj *obj;
  182. unsigned long flags;
  183. int i;
  184. pr_warn("Out of memory. ODEBUG disabled\n");
  185. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  186. raw_spin_lock_irqsave(&db->lock, flags);
  187. hlist_move_list(&db->list, &freelist);
  188. raw_spin_unlock_irqrestore(&db->lock, flags);
  189. /* Now free them */
  190. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  191. hlist_del(&obj->node);
  192. free_object(obj);
  193. }
  194. }
  195. }
  196. /*
  197. * We use the pfn of the address for the hash. That way we can check
  198. * for freed objects simply by checking the affected bucket.
  199. */
  200. static struct debug_bucket *get_bucket(unsigned long addr)
  201. {
  202. unsigned long hash;
  203. hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
  204. return &obj_hash[hash];
  205. }
  206. static void debug_print_object(struct debug_obj *obj, char *msg)
  207. {
  208. struct debug_obj_descr *descr = obj->descr;
  209. static int limit;
  210. if (limit < 5 && descr != descr_test) {
  211. void *hint = descr->debug_hint ?
  212. descr->debug_hint(obj->object) : NULL;
  213. limit++;
  214. WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
  215. "object type: %s hint: %pS\n",
  216. msg, obj_states[obj->state], obj->astate,
  217. descr->name, hint);
  218. }
  219. debug_objects_warnings++;
  220. }
  221. /*
  222. * Try to repair the damage, so we have a better chance to get useful
  223. * debug output.
  224. */
  225. static int
  226. debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
  227. void * addr, enum debug_obj_state state)
  228. {
  229. int fixed = 0;
  230. if (fixup)
  231. fixed = fixup(addr, state);
  232. debug_objects_fixups += fixed;
  233. return fixed;
  234. }
  235. static void debug_object_is_on_stack(void *addr, int onstack)
  236. {
  237. int is_on_stack;
  238. static int limit;
  239. if (limit > 4)
  240. return;
  241. is_on_stack = object_is_on_stack(addr);
  242. if (is_on_stack == onstack)
  243. return;
  244. limit++;
  245. if (is_on_stack)
  246. pr_warn("object is on stack, but not annotated\n");
  247. else
  248. pr_warn("object is not on stack, but annotated\n");
  249. WARN_ON(1);
  250. }
  251. static void
  252. __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
  253. {
  254. enum debug_obj_state state;
  255. struct debug_bucket *db;
  256. struct debug_obj *obj;
  257. unsigned long flags;
  258. fill_pool();
  259. db = get_bucket((unsigned long) addr);
  260. raw_spin_lock_irqsave(&db->lock, flags);
  261. obj = lookup_object(addr, db);
  262. if (!obj) {
  263. obj = alloc_object(addr, db, descr);
  264. if (!obj) {
  265. debug_objects_enabled = 0;
  266. raw_spin_unlock_irqrestore(&db->lock, flags);
  267. debug_objects_oom();
  268. return;
  269. }
  270. debug_object_is_on_stack(addr, onstack);
  271. }
  272. switch (obj->state) {
  273. case ODEBUG_STATE_NONE:
  274. case ODEBUG_STATE_INIT:
  275. case ODEBUG_STATE_INACTIVE:
  276. obj->state = ODEBUG_STATE_INIT;
  277. break;
  278. case ODEBUG_STATE_ACTIVE:
  279. debug_print_object(obj, "init");
  280. state = obj->state;
  281. raw_spin_unlock_irqrestore(&db->lock, flags);
  282. debug_object_fixup(descr->fixup_init, addr, state);
  283. return;
  284. case ODEBUG_STATE_DESTROYED:
  285. debug_print_object(obj, "init");
  286. break;
  287. default:
  288. break;
  289. }
  290. raw_spin_unlock_irqrestore(&db->lock, flags);
  291. }
  292. /**
  293. * debug_object_init - debug checks when an object is initialized
  294. * @addr: address of the object
  295. * @descr: pointer to an object specific debug description structure
  296. */
  297. void debug_object_init(void *addr, struct debug_obj_descr *descr)
  298. {
  299. if (!debug_objects_enabled)
  300. return;
  301. __debug_object_init(addr, descr, 0);
  302. }
  303. /**
  304. * debug_object_init_on_stack - debug checks when an object on stack is
  305. * initialized
  306. * @addr: address of the object
  307. * @descr: pointer to an object specific debug description structure
  308. */
  309. void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
  310. {
  311. if (!debug_objects_enabled)
  312. return;
  313. __debug_object_init(addr, descr, 1);
  314. }
  315. /**
  316. * debug_object_activate - debug checks when an object is activated
  317. * @addr: address of the object
  318. * @descr: pointer to an object specific debug description structure
  319. * Returns 0 for success, -EINVAL for check failed.
  320. */
  321. int debug_object_activate(void *addr, struct debug_obj_descr *descr)
  322. {
  323. enum debug_obj_state state;
  324. struct debug_bucket *db;
  325. struct debug_obj *obj;
  326. unsigned long flags;
  327. int ret;
  328. struct debug_obj o = { .object = addr,
  329. .state = ODEBUG_STATE_NOTAVAILABLE,
  330. .descr = descr };
  331. if (!debug_objects_enabled)
  332. return 0;
  333. db = get_bucket((unsigned long) addr);
  334. raw_spin_lock_irqsave(&db->lock, flags);
  335. obj = lookup_object(addr, db);
  336. if (obj) {
  337. switch (obj->state) {
  338. case ODEBUG_STATE_INIT:
  339. case ODEBUG_STATE_INACTIVE:
  340. obj->state = ODEBUG_STATE_ACTIVE;
  341. ret = 0;
  342. break;
  343. case ODEBUG_STATE_ACTIVE:
  344. debug_print_object(obj, "activate");
  345. state = obj->state;
  346. raw_spin_unlock_irqrestore(&db->lock, flags);
  347. ret = debug_object_fixup(descr->fixup_activate, addr, state);
  348. return ret ? -EINVAL : 0;
  349. case ODEBUG_STATE_DESTROYED:
  350. debug_print_object(obj, "activate");
  351. ret = -EINVAL;
  352. break;
  353. default:
  354. ret = 0;
  355. break;
  356. }
  357. raw_spin_unlock_irqrestore(&db->lock, flags);
  358. return ret;
  359. }
  360. raw_spin_unlock_irqrestore(&db->lock, flags);
  361. /*
  362. * This happens when a static object is activated. We
  363. * let the type specific code decide whether this is
  364. * true or not.
  365. */
  366. if (debug_object_fixup(descr->fixup_activate, addr,
  367. ODEBUG_STATE_NOTAVAILABLE)) {
  368. debug_print_object(&o, "activate");
  369. return -EINVAL;
  370. }
  371. return 0;
  372. }
  373. /**
  374. * debug_object_deactivate - debug checks when an object is deactivated
  375. * @addr: address of the object
  376. * @descr: pointer to an object specific debug description structure
  377. */
  378. void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
  379. {
  380. struct debug_bucket *db;
  381. struct debug_obj *obj;
  382. unsigned long flags;
  383. if (!debug_objects_enabled)
  384. return;
  385. db = get_bucket((unsigned long) addr);
  386. raw_spin_lock_irqsave(&db->lock, flags);
  387. obj = lookup_object(addr, db);
  388. if (obj) {
  389. switch (obj->state) {
  390. case ODEBUG_STATE_INIT:
  391. case ODEBUG_STATE_INACTIVE:
  392. case ODEBUG_STATE_ACTIVE:
  393. if (!obj->astate)
  394. obj->state = ODEBUG_STATE_INACTIVE;
  395. else
  396. debug_print_object(obj, "deactivate");
  397. break;
  398. case ODEBUG_STATE_DESTROYED:
  399. debug_print_object(obj, "deactivate");
  400. break;
  401. default:
  402. break;
  403. }
  404. } else {
  405. struct debug_obj o = { .object = addr,
  406. .state = ODEBUG_STATE_NOTAVAILABLE,
  407. .descr = descr };
  408. debug_print_object(&o, "deactivate");
  409. }
  410. raw_spin_unlock_irqrestore(&db->lock, flags);
  411. }
  412. /**
  413. * debug_object_destroy - debug checks when an object is destroyed
  414. * @addr: address of the object
  415. * @descr: pointer to an object specific debug description structure
  416. */
  417. void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
  418. {
  419. enum debug_obj_state state;
  420. struct debug_bucket *db;
  421. struct debug_obj *obj;
  422. unsigned long flags;
  423. if (!debug_objects_enabled)
  424. return;
  425. db = get_bucket((unsigned long) addr);
  426. raw_spin_lock_irqsave(&db->lock, flags);
  427. obj = lookup_object(addr, db);
  428. if (!obj)
  429. goto out_unlock;
  430. switch (obj->state) {
  431. case ODEBUG_STATE_NONE:
  432. case ODEBUG_STATE_INIT:
  433. case ODEBUG_STATE_INACTIVE:
  434. obj->state = ODEBUG_STATE_DESTROYED;
  435. break;
  436. case ODEBUG_STATE_ACTIVE:
  437. debug_print_object(obj, "destroy");
  438. state = obj->state;
  439. raw_spin_unlock_irqrestore(&db->lock, flags);
  440. debug_object_fixup(descr->fixup_destroy, addr, state);
  441. return;
  442. case ODEBUG_STATE_DESTROYED:
  443. debug_print_object(obj, "destroy");
  444. break;
  445. default:
  446. break;
  447. }
  448. out_unlock:
  449. raw_spin_unlock_irqrestore(&db->lock, flags);
  450. }
  451. /**
  452. * debug_object_free - debug checks when an object is freed
  453. * @addr: address of the object
  454. * @descr: pointer to an object specific debug description structure
  455. */
  456. void debug_object_free(void *addr, struct debug_obj_descr *descr)
  457. {
  458. enum debug_obj_state state;
  459. struct debug_bucket *db;
  460. struct debug_obj *obj;
  461. unsigned long flags;
  462. if (!debug_objects_enabled)
  463. return;
  464. db = get_bucket((unsigned long) addr);
  465. raw_spin_lock_irqsave(&db->lock, flags);
  466. obj = lookup_object(addr, db);
  467. if (!obj)
  468. goto out_unlock;
  469. switch (obj->state) {
  470. case ODEBUG_STATE_ACTIVE:
  471. debug_print_object(obj, "free");
  472. state = obj->state;
  473. raw_spin_unlock_irqrestore(&db->lock, flags);
  474. debug_object_fixup(descr->fixup_free, addr, state);
  475. return;
  476. default:
  477. hlist_del(&obj->node);
  478. raw_spin_unlock_irqrestore(&db->lock, flags);
  479. free_object(obj);
  480. return;
  481. }
  482. out_unlock:
  483. raw_spin_unlock_irqrestore(&db->lock, flags);
  484. }
  485. /**
  486. * debug_object_assert_init - debug checks when object should be init-ed
  487. * @addr: address of the object
  488. * @descr: pointer to an object specific debug description structure
  489. */
  490. void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
  491. {
  492. struct debug_bucket *db;
  493. struct debug_obj *obj;
  494. unsigned long flags;
  495. if (!debug_objects_enabled)
  496. return;
  497. db = get_bucket((unsigned long) addr);
  498. raw_spin_lock_irqsave(&db->lock, flags);
  499. obj = lookup_object(addr, db);
  500. if (!obj) {
  501. struct debug_obj o = { .object = addr,
  502. .state = ODEBUG_STATE_NOTAVAILABLE,
  503. .descr = descr };
  504. raw_spin_unlock_irqrestore(&db->lock, flags);
  505. /*
  506. * Maybe the object is static. Let the type specific
  507. * code decide what to do.
  508. */
  509. if (debug_object_fixup(descr->fixup_assert_init, addr,
  510. ODEBUG_STATE_NOTAVAILABLE))
  511. debug_print_object(&o, "assert_init");
  512. return;
  513. }
  514. raw_spin_unlock_irqrestore(&db->lock, flags);
  515. }
  516. /**
  517. * debug_object_active_state - debug checks object usage state machine
  518. * @addr: address of the object
  519. * @descr: pointer to an object specific debug description structure
  520. * @expect: expected state
  521. * @next: state to move to if expected state is found
  522. */
  523. void
  524. debug_object_active_state(void *addr, struct debug_obj_descr *descr,
  525. unsigned int expect, unsigned int next)
  526. {
  527. struct debug_bucket *db;
  528. struct debug_obj *obj;
  529. unsigned long flags;
  530. if (!debug_objects_enabled)
  531. return;
  532. db = get_bucket((unsigned long) addr);
  533. raw_spin_lock_irqsave(&db->lock, flags);
  534. obj = lookup_object(addr, db);
  535. if (obj) {
  536. switch (obj->state) {
  537. case ODEBUG_STATE_ACTIVE:
  538. if (obj->astate == expect)
  539. obj->astate = next;
  540. else
  541. debug_print_object(obj, "active_state");
  542. break;
  543. default:
  544. debug_print_object(obj, "active_state");
  545. break;
  546. }
  547. } else {
  548. struct debug_obj o = { .object = addr,
  549. .state = ODEBUG_STATE_NOTAVAILABLE,
  550. .descr = descr };
  551. debug_print_object(&o, "active_state");
  552. }
  553. raw_spin_unlock_irqrestore(&db->lock, flags);
  554. }
  555. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  556. static void __debug_check_no_obj_freed(const void *address, unsigned long size)
  557. {
  558. unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
  559. struct hlist_node *tmp;
  560. HLIST_HEAD(freelist);
  561. struct debug_obj_descr *descr;
  562. enum debug_obj_state state;
  563. struct debug_bucket *db;
  564. struct debug_obj *obj;
  565. int cnt;
  566. saddr = (unsigned long) address;
  567. eaddr = saddr + size;
  568. paddr = saddr & ODEBUG_CHUNK_MASK;
  569. chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
  570. chunks >>= ODEBUG_CHUNK_SHIFT;
  571. for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
  572. db = get_bucket(paddr);
  573. repeat:
  574. cnt = 0;
  575. raw_spin_lock_irqsave(&db->lock, flags);
  576. hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
  577. cnt++;
  578. oaddr = (unsigned long) obj->object;
  579. if (oaddr < saddr || oaddr >= eaddr)
  580. continue;
  581. switch (obj->state) {
  582. case ODEBUG_STATE_ACTIVE:
  583. debug_print_object(obj, "free");
  584. descr = obj->descr;
  585. state = obj->state;
  586. raw_spin_unlock_irqrestore(&db->lock, flags);
  587. debug_object_fixup(descr->fixup_free,
  588. (void *) oaddr, state);
  589. goto repeat;
  590. default:
  591. hlist_del(&obj->node);
  592. hlist_add_head(&obj->node, &freelist);
  593. break;
  594. }
  595. }
  596. raw_spin_unlock_irqrestore(&db->lock, flags);
  597. /* Now free them */
  598. hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
  599. hlist_del(&obj->node);
  600. free_object(obj);
  601. }
  602. if (cnt > debug_objects_maxchain)
  603. debug_objects_maxchain = cnt;
  604. }
  605. }
  606. void debug_check_no_obj_freed(const void *address, unsigned long size)
  607. {
  608. if (debug_objects_enabled)
  609. __debug_check_no_obj_freed(address, size);
  610. }
  611. #endif
  612. #ifdef CONFIG_DEBUG_FS
  613. static int debug_stats_show(struct seq_file *m, void *v)
  614. {
  615. seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
  616. seq_printf(m, "warnings :%d\n", debug_objects_warnings);
  617. seq_printf(m, "fixups :%d\n", debug_objects_fixups);
  618. seq_printf(m, "pool_free :%d\n", obj_pool_free);
  619. seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
  620. seq_printf(m, "pool_used :%d\n", obj_pool_used);
  621. seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
  622. return 0;
  623. }
  624. static int debug_stats_open(struct inode *inode, struct file *filp)
  625. {
  626. return single_open(filp, debug_stats_show, NULL);
  627. }
  628. static const struct file_operations debug_stats_fops = {
  629. .open = debug_stats_open,
  630. .read = seq_read,
  631. .llseek = seq_lseek,
  632. .release = single_release,
  633. };
  634. static int __init debug_objects_init_debugfs(void)
  635. {
  636. struct dentry *dbgdir, *dbgstats;
  637. if (!debug_objects_enabled)
  638. return 0;
  639. dbgdir = debugfs_create_dir("debug_objects", NULL);
  640. if (!dbgdir)
  641. return -ENOMEM;
  642. dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
  643. &debug_stats_fops);
  644. if (!dbgstats)
  645. goto err;
  646. return 0;
  647. err:
  648. debugfs_remove(dbgdir);
  649. return -ENOMEM;
  650. }
  651. __initcall(debug_objects_init_debugfs);
  652. #else
  653. static inline void debug_objects_init_debugfs(void) { }
  654. #endif
  655. #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
  656. /* Random data structure for the self test */
  657. struct self_test {
  658. unsigned long dummy1[6];
  659. int static_init;
  660. unsigned long dummy2[3];
  661. };
  662. static __initdata struct debug_obj_descr descr_type_test;
  663. /*
  664. * fixup_init is called when:
  665. * - an active object is initialized
  666. */
  667. static int __init fixup_init(void *addr, enum debug_obj_state state)
  668. {
  669. struct self_test *obj = addr;
  670. switch (state) {
  671. case ODEBUG_STATE_ACTIVE:
  672. debug_object_deactivate(obj, &descr_type_test);
  673. debug_object_init(obj, &descr_type_test);
  674. return 1;
  675. default:
  676. return 0;
  677. }
  678. }
  679. /*
  680. * fixup_activate is called when:
  681. * - an active object is activated
  682. * - an unknown object is activated (might be a statically initialized object)
  683. */
  684. static int __init fixup_activate(void *addr, enum debug_obj_state state)
  685. {
  686. struct self_test *obj = addr;
  687. switch (state) {
  688. case ODEBUG_STATE_NOTAVAILABLE:
  689. if (obj->static_init == 1) {
  690. debug_object_init(obj, &descr_type_test);
  691. debug_object_activate(obj, &descr_type_test);
  692. return 0;
  693. }
  694. return 1;
  695. case ODEBUG_STATE_ACTIVE:
  696. debug_object_deactivate(obj, &descr_type_test);
  697. debug_object_activate(obj, &descr_type_test);
  698. return 1;
  699. default:
  700. return 0;
  701. }
  702. }
  703. /*
  704. * fixup_destroy is called when:
  705. * - an active object is destroyed
  706. */
  707. static int __init fixup_destroy(void *addr, enum debug_obj_state state)
  708. {
  709. struct self_test *obj = addr;
  710. switch (state) {
  711. case ODEBUG_STATE_ACTIVE:
  712. debug_object_deactivate(obj, &descr_type_test);
  713. debug_object_destroy(obj, &descr_type_test);
  714. return 1;
  715. default:
  716. return 0;
  717. }
  718. }
  719. /*
  720. * fixup_free is called when:
  721. * - an active object is freed
  722. */
  723. static int __init fixup_free(void *addr, enum debug_obj_state state)
  724. {
  725. struct self_test *obj = addr;
  726. switch (state) {
  727. case ODEBUG_STATE_ACTIVE:
  728. debug_object_deactivate(obj, &descr_type_test);
  729. debug_object_free(obj, &descr_type_test);
  730. return 1;
  731. default:
  732. return 0;
  733. }
  734. }
  735. static int __init
  736. check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
  737. {
  738. struct debug_bucket *db;
  739. struct debug_obj *obj;
  740. unsigned long flags;
  741. int res = -EINVAL;
  742. db = get_bucket((unsigned long) addr);
  743. raw_spin_lock_irqsave(&db->lock, flags);
  744. obj = lookup_object(addr, db);
  745. if (!obj && state != ODEBUG_STATE_NONE) {
  746. WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
  747. goto out;
  748. }
  749. if (obj && obj->state != state) {
  750. WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
  751. obj->state, state);
  752. goto out;
  753. }
  754. if (fixups != debug_objects_fixups) {
  755. WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
  756. fixups, debug_objects_fixups);
  757. goto out;
  758. }
  759. if (warnings != debug_objects_warnings) {
  760. WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
  761. warnings, debug_objects_warnings);
  762. goto out;
  763. }
  764. res = 0;
  765. out:
  766. raw_spin_unlock_irqrestore(&db->lock, flags);
  767. if (res)
  768. debug_objects_enabled = 0;
  769. return res;
  770. }
  771. static __initdata struct debug_obj_descr descr_type_test = {
  772. .name = "selftest",
  773. .fixup_init = fixup_init,
  774. .fixup_activate = fixup_activate,
  775. .fixup_destroy = fixup_destroy,
  776. .fixup_free = fixup_free,
  777. };
  778. static __initdata struct self_test obj = { .static_init = 0 };
  779. static void __init debug_objects_selftest(void)
  780. {
  781. int fixups, oldfixups, warnings, oldwarnings;
  782. unsigned long flags;
  783. local_irq_save(flags);
  784. fixups = oldfixups = debug_objects_fixups;
  785. warnings = oldwarnings = debug_objects_warnings;
  786. descr_test = &descr_type_test;
  787. debug_object_init(&obj, &descr_type_test);
  788. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  789. goto out;
  790. debug_object_activate(&obj, &descr_type_test);
  791. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  792. goto out;
  793. debug_object_activate(&obj, &descr_type_test);
  794. if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
  795. goto out;
  796. debug_object_deactivate(&obj, &descr_type_test);
  797. if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
  798. goto out;
  799. debug_object_destroy(&obj, &descr_type_test);
  800. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
  801. goto out;
  802. debug_object_init(&obj, &descr_type_test);
  803. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  804. goto out;
  805. debug_object_activate(&obj, &descr_type_test);
  806. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  807. goto out;
  808. debug_object_deactivate(&obj, &descr_type_test);
  809. if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
  810. goto out;
  811. debug_object_free(&obj, &descr_type_test);
  812. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  813. goto out;
  814. obj.static_init = 1;
  815. debug_object_activate(&obj, &descr_type_test);
  816. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  817. goto out;
  818. debug_object_init(&obj, &descr_type_test);
  819. if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
  820. goto out;
  821. debug_object_free(&obj, &descr_type_test);
  822. if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
  823. goto out;
  824. #ifdef CONFIG_DEBUG_OBJECTS_FREE
  825. debug_object_init(&obj, &descr_type_test);
  826. if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
  827. goto out;
  828. debug_object_activate(&obj, &descr_type_test);
  829. if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
  830. goto out;
  831. __debug_check_no_obj_freed(&obj, sizeof(obj));
  832. if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
  833. goto out;
  834. #endif
  835. pr_info("selftest passed\n");
  836. out:
  837. debug_objects_fixups = oldfixups;
  838. debug_objects_warnings = oldwarnings;
  839. descr_test = NULL;
  840. local_irq_restore(flags);
  841. }
  842. #else
  843. static inline void debug_objects_selftest(void) { }
  844. #endif
  845. /*
  846. * Called during early boot to initialize the hash buckets and link
  847. * the static object pool objects into the poll list. After this call
  848. * the object tracker is fully operational.
  849. */
  850. void __init debug_objects_early_init(void)
  851. {
  852. int i;
  853. for (i = 0; i < ODEBUG_HASH_SIZE; i++)
  854. raw_spin_lock_init(&obj_hash[i].lock);
  855. for (i = 0; i < ODEBUG_POOL_SIZE; i++)
  856. hlist_add_head(&obj_static_pool[i].node, &obj_pool);
  857. }
  858. /*
  859. * Convert the statically allocated objects to dynamic ones:
  860. */
  861. static int __init debug_objects_replace_static_objects(void)
  862. {
  863. struct debug_bucket *db = obj_hash;
  864. struct hlist_node *tmp;
  865. struct debug_obj *obj, *new;
  866. HLIST_HEAD(objects);
  867. int i, cnt = 0;
  868. for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  869. obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  870. if (!obj)
  871. goto free;
  872. hlist_add_head(&obj->node, &objects);
  873. }
  874. /*
  875. * When debug_objects_mem_init() is called we know that only
  876. * one CPU is up, so disabling interrupts is enough
  877. * protection. This avoids the lockdep hell of lock ordering.
  878. */
  879. local_irq_disable();
  880. /* Remove the statically allocated objects from the pool */
  881. hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
  882. hlist_del(&obj->node);
  883. /* Move the allocated objects to the pool */
  884. hlist_move_list(&objects, &obj_pool);
  885. /* Replace the active object references */
  886. for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  887. hlist_move_list(&db->list, &objects);
  888. hlist_for_each_entry(obj, &objects, node) {
  889. new = hlist_entry(obj_pool.first, typeof(*obj), node);
  890. hlist_del(&new->node);
  891. /* copy object data */
  892. *new = *obj;
  893. hlist_add_head(&new->node, &db->list);
  894. cnt++;
  895. }
  896. }
  897. local_irq_enable();
  898. pr_debug("%d of %d active objects replaced\n",
  899. cnt, obj_pool_used);
  900. return 0;
  901. free:
  902. hlist_for_each_entry_safe(obj, tmp, &objects, node) {
  903. hlist_del(&obj->node);
  904. kmem_cache_free(obj_cache, obj);
  905. }
  906. return -ENOMEM;
  907. }
  908. /*
  909. * Called after the kmem_caches are functional to setup a dedicated
  910. * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
  911. * prevents that the debug code is called on kmem_cache_free() for the
  912. * debug tracker objects to avoid recursive calls.
  913. */
  914. void __init debug_objects_mem_init(void)
  915. {
  916. if (!debug_objects_enabled)
  917. return;
  918. obj_cache = kmem_cache_create("debug_objects_cache",
  919. sizeof (struct debug_obj), 0,
  920. SLAB_DEBUG_OBJECTS, NULL);
  921. if (!obj_cache || debug_objects_replace_static_objects()) {
  922. debug_objects_enabled = 0;
  923. if (obj_cache)
  924. kmem_cache_destroy(obj_cache);
  925. pr_warn("out of memory.\n");
  926. } else
  927. debug_objects_selftest();
  928. }