lockdep_internals.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /*
  2. * kernel/lockdep_internals.h
  3. *
  4. * Runtime locking correctness validator
  5. *
  6. * lockdep subsystem internal functions and variables.
  7. */
  8. /*
  9. * Lock-class usage-state bits:
  10. */
  11. enum lock_usage_bit {
  12. #define LOCKDEP_STATE(__STATE) \
  13. LOCK_USED_IN_##__STATE, \
  14. LOCK_USED_IN_##__STATE##_READ, \
  15. LOCK_ENABLED_##__STATE, \
  16. LOCK_ENABLED_##__STATE##_READ,
  17. #include "lockdep_states.h"
  18. #undef LOCKDEP_STATE
  19. LOCK_USED,
  20. LOCK_USAGE_STATES
  21. };
  22. /*
  23. * Usage-state bitmasks:
  24. */
  25. #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
  26. enum {
  27. #define LOCKDEP_STATE(__STATE) \
  28. __LOCKF(USED_IN_##__STATE) \
  29. __LOCKF(USED_IN_##__STATE##_READ) \
  30. __LOCKF(ENABLED_##__STATE) \
  31. __LOCKF(ENABLED_##__STATE##_READ)
  32. #include "lockdep_states.h"
  33. #undef LOCKDEP_STATE
  34. __LOCKF(USED)
  35. };
  36. #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
  37. #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
  38. #define LOCKF_ENABLED_IRQ_READ \
  39. (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
  40. #define LOCKF_USED_IN_IRQ_READ \
  41. (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
  42. /*
  43. * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
  44. * .data and .bss to fit in required 32MB limit for the kernel. With
  45. * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
  46. * So, reduce the static allocations for lockdeps related structures so that
  47. * everything fits in current required size limit.
  48. */
  49. #ifdef CONFIG_PROVE_LOCKING_SMALL
  50. /*
  51. * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  52. * we track.
  53. *
  54. * We use the per-lock dependency maps in two ways: we grow it by adding
  55. * every to-be-taken lock to all currently held lock's own dependency
  56. * table (if it's not there yet), and we check it for lock order
  57. * conflicts and deadlocks.
  58. */
  59. #define MAX_LOCKDEP_ENTRIES 16384UL
  60. #define MAX_LOCKDEP_CHAINS_BITS 15
  61. #define MAX_STACK_TRACE_ENTRIES 262144UL
  62. #else
  63. #define MAX_LOCKDEP_ENTRIES 32768UL
  64. #define MAX_LOCKDEP_CHAINS_BITS 16
  65. /*
  66. * Stack-trace: tightly packed array of stack backtrace
  67. * addresses. Protected by the hash_lock.
  68. */
  69. #define MAX_STACK_TRACE_ENTRIES 524288UL
  70. #endif
  71. #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
  72. #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
  73. extern struct list_head all_lock_classes;
  74. extern struct lock_chain lock_chains[];
  75. #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
  76. extern void get_usage_chars(struct lock_class *class,
  77. char usage[LOCK_USAGE_CHARS]);
  78. extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
  79. struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
  80. extern unsigned long nr_lock_classes;
  81. extern unsigned long nr_list_entries;
  82. extern unsigned long nr_lock_chains;
  83. extern int nr_chain_hlocks;
  84. extern unsigned long nr_stack_trace_entries;
  85. extern unsigned int nr_hardirq_chains;
  86. extern unsigned int nr_softirq_chains;
  87. extern unsigned int nr_process_chains;
  88. extern unsigned int max_lockdep_depth;
  89. extern unsigned int max_recursion_depth;
  90. extern unsigned int max_bfs_queue_depth;
  91. #ifdef CONFIG_PROVE_LOCKING
  92. extern unsigned long lockdep_count_forward_deps(struct lock_class *);
  93. extern unsigned long lockdep_count_backward_deps(struct lock_class *);
  94. #else
  95. static inline unsigned long
  96. lockdep_count_forward_deps(struct lock_class *class)
  97. {
  98. return 0;
  99. }
  100. static inline unsigned long
  101. lockdep_count_backward_deps(struct lock_class *class)
  102. {
  103. return 0;
  104. }
  105. #endif
  106. #ifdef CONFIG_DEBUG_LOCKDEP
  107. #include <asm/local.h>
  108. /*
  109. * Various lockdep statistics.
  110. * We want them per cpu as they are often accessed in fast path
  111. * and we want to avoid too much cache bouncing.
  112. */
  113. struct lockdep_stats {
  114. int chain_lookup_hits;
  115. int chain_lookup_misses;
  116. int hardirqs_on_events;
  117. int hardirqs_off_events;
  118. int redundant_hardirqs_on;
  119. int redundant_hardirqs_off;
  120. int softirqs_on_events;
  121. int softirqs_off_events;
  122. int redundant_softirqs_on;
  123. int redundant_softirqs_off;
  124. int nr_unused_locks;
  125. int nr_cyclic_checks;
  126. int nr_cyclic_check_recursions;
  127. int nr_find_usage_forwards_checks;
  128. int nr_find_usage_forwards_recursions;
  129. int nr_find_usage_backwards_checks;
  130. int nr_find_usage_backwards_recursions;
  131. };
  132. DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
  133. #define __debug_atomic_inc(ptr) \
  134. this_cpu_inc(lockdep_stats.ptr);
  135. #define debug_atomic_inc(ptr) { \
  136. WARN_ON_ONCE(!irqs_disabled()); \
  137. __this_cpu_inc(lockdep_stats.ptr); \
  138. }
  139. #define debug_atomic_dec(ptr) { \
  140. WARN_ON_ONCE(!irqs_disabled()); \
  141. __this_cpu_dec(lockdep_stats.ptr); \
  142. }
  143. #define debug_atomic_read(ptr) ({ \
  144. struct lockdep_stats *__cpu_lockdep_stats; \
  145. unsigned long long __total = 0; \
  146. int __cpu; \
  147. for_each_possible_cpu(__cpu) { \
  148. __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
  149. __total += __cpu_lockdep_stats->ptr; \
  150. } \
  151. __total; \
  152. })
  153. #else
  154. # define __debug_atomic_inc(ptr) do { } while (0)
  155. # define debug_atomic_inc(ptr) do { } while (0)
  156. # define debug_atomic_dec(ptr) do { } while (0)
  157. # define debug_atomic_read(ptr) 0
  158. #endif