lockdep_internals.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * kernel/lockdep_internals.h
  4. *
  5. * Runtime locking correctness validator
  6. *
  7. * lockdep subsystem internal functions and variables.
  8. */
  9. /*
  10. * Lock-class usage-state bits:
  11. */
  12. enum lock_usage_bit {
  13. #define LOCKDEP_STATE(__STATE) \
  14. LOCK_USED_IN_##__STATE, \
  15. LOCK_USED_IN_##__STATE##_READ, \
  16. LOCK_ENABLED_##__STATE, \
  17. LOCK_ENABLED_##__STATE##_READ,
  18. #include "lockdep_states.h"
  19. #undef LOCKDEP_STATE
  20. LOCK_USED,
  21. LOCK_USAGE_STATES
  22. };
  23. /*
  24. * Usage-state bitmasks:
  25. */
  26. #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
  27. enum {
  28. #define LOCKDEP_STATE(__STATE) \
  29. __LOCKF(USED_IN_##__STATE) \
  30. __LOCKF(USED_IN_##__STATE##_READ) \
  31. __LOCKF(ENABLED_##__STATE) \
  32. __LOCKF(ENABLED_##__STATE##_READ)
  33. #include "lockdep_states.h"
  34. #undef LOCKDEP_STATE
  35. __LOCKF(USED)
  36. };
  37. #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
  38. #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
  39. #define LOCKF_ENABLED_IRQ_READ \
  40. (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
  41. #define LOCKF_USED_IN_IRQ_READ \
  42. (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
  43. /*
  44. * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
  45. * .data and .bss to fit in required 32MB limit for the kernel. With
  46. * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
  47. * So, reduce the static allocations for lockdeps related structures so that
  48. * everything fits in current required size limit.
  49. */
  50. #ifdef CONFIG_LOCKDEP_SMALL
  51. /*
  52. * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  53. * we track.
  54. *
  55. * We use the per-lock dependency maps in two ways: we grow it by adding
  56. * every to-be-taken lock to all currently held lock's own dependency
  57. * table (if it's not there yet), and we check it for lock order
  58. * conflicts and deadlocks.
  59. */
  60. #define MAX_LOCKDEP_ENTRIES 16384UL
  61. #define MAX_LOCKDEP_CHAINS_BITS 15
  62. #define MAX_STACK_TRACE_ENTRIES 262144UL
  63. #else
  64. #define MAX_LOCKDEP_ENTRIES 32768UL
  65. #define MAX_LOCKDEP_CHAINS_BITS 16
  66. /*
  67. * Stack-trace: tightly packed array of stack backtrace
  68. * addresses. Protected by the hash_lock.
  69. */
  70. #define MAX_STACK_TRACE_ENTRIES 524288UL
  71. #endif
  72. #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
  73. #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
  74. extern struct list_head all_lock_classes;
  75. extern struct lock_chain lock_chains[];
  76. #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
  77. extern void get_usage_chars(struct lock_class *class,
  78. char usage[LOCK_USAGE_CHARS]);
  79. extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
  80. struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
  81. extern unsigned long nr_lock_classes;
  82. extern unsigned long nr_list_entries;
  83. extern unsigned long nr_lock_chains;
  84. extern int nr_chain_hlocks;
  85. extern unsigned long nr_stack_trace_entries;
  86. extern unsigned int nr_hardirq_chains;
  87. extern unsigned int nr_softirq_chains;
  88. extern unsigned int nr_process_chains;
  89. extern unsigned int max_lockdep_depth;
  90. extern unsigned int max_recursion_depth;
  91. extern unsigned int max_bfs_queue_depth;
  92. #ifdef CONFIG_PROVE_LOCKING
  93. extern unsigned long lockdep_count_forward_deps(struct lock_class *);
  94. extern unsigned long lockdep_count_backward_deps(struct lock_class *);
  95. #else
  96. static inline unsigned long
  97. lockdep_count_forward_deps(struct lock_class *class)
  98. {
  99. return 0;
  100. }
  101. static inline unsigned long
  102. lockdep_count_backward_deps(struct lock_class *class)
  103. {
  104. return 0;
  105. }
  106. #endif
  107. #ifdef CONFIG_DEBUG_LOCKDEP
  108. #include <asm/local.h>
  109. /*
  110. * Various lockdep statistics.
  111. * We want them per cpu as they are often accessed in fast path
  112. * and we want to avoid too much cache bouncing.
  113. */
  114. struct lockdep_stats {
  115. int chain_lookup_hits;
  116. int chain_lookup_misses;
  117. int hardirqs_on_events;
  118. int hardirqs_off_events;
  119. int redundant_hardirqs_on;
  120. int redundant_hardirqs_off;
  121. int softirqs_on_events;
  122. int softirqs_off_events;
  123. int redundant_softirqs_on;
  124. int redundant_softirqs_off;
  125. int nr_unused_locks;
  126. int nr_redundant_checks;
  127. int nr_redundant;
  128. int nr_cyclic_checks;
  129. int nr_cyclic_check_recursions;
  130. int nr_find_usage_forwards_checks;
  131. int nr_find_usage_forwards_recursions;
  132. int nr_find_usage_backwards_checks;
  133. int nr_find_usage_backwards_recursions;
  134. };
  135. DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
  136. #define __debug_atomic_inc(ptr) \
  137. this_cpu_inc(lockdep_stats.ptr);
  138. #define debug_atomic_inc(ptr) { \
  139. WARN_ON_ONCE(!irqs_disabled()); \
  140. __this_cpu_inc(lockdep_stats.ptr); \
  141. }
  142. #define debug_atomic_dec(ptr) { \
  143. WARN_ON_ONCE(!irqs_disabled()); \
  144. __this_cpu_dec(lockdep_stats.ptr); \
  145. }
  146. #define debug_atomic_read(ptr) ({ \
  147. struct lockdep_stats *__cpu_lockdep_stats; \
  148. unsigned long long __total = 0; \
  149. int __cpu; \
  150. for_each_possible_cpu(__cpu) { \
  151. __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
  152. __total += __cpu_lockdep_stats->ptr; \
  153. } \
  154. __total; \
  155. })
  156. #else
  157. # define __debug_atomic_inc(ptr) do { } while (0)
  158. # define debug_atomic_inc(ptr) do { } while (0)
  159. # define debug_atomic_dec(ptr) do { } while (0)
  160. # define debug_atomic_read(ptr) 0
  161. #endif