hardirq.h 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef LINUX_HARDIRQ_H
  3. #define LINUX_HARDIRQ_H
  4. #include <linux/preempt.h>
  5. #include <linux/lockdep.h>
  6. #include <linux/ftrace_irq.h>
  7. #include <linux/vtime.h>
  8. #include <asm/hardirq.h>
  9. extern void synchronize_irq(unsigned int irq);
  10. extern bool synchronize_hardirq(unsigned int irq);
  11. #if defined(CONFIG_TINY_RCU)
  12. static inline void rcu_nmi_enter(void)
  13. {
  14. }
  15. static inline void rcu_nmi_exit(void)
  16. {
  17. }
  18. #else
  19. extern void rcu_nmi_enter(void);
  20. extern void rcu_nmi_exit(void);
  21. #endif
  22. /*
  23. * It is safe to do non-atomic ops on ->hardirq_context,
  24. * because NMI handlers may not preempt and the ops are
  25. * always balanced, so the interrupted value of ->hardirq_context
  26. * will always be restored.
  27. */
  28. #define __irq_enter() \
  29. do { \
  30. account_irq_enter_time(current); \
  31. preempt_count_add(HARDIRQ_OFFSET); \
  32. trace_hardirq_enter(); \
  33. } while (0)
  34. /*
  35. * Enter irq context (on NO_HZ, update jiffies):
  36. */
  37. extern void irq_enter(void);
  38. /*
  39. * Exit irq context without processing softirqs:
  40. */
  41. #define __irq_exit() \
  42. do { \
  43. trace_hardirq_exit(); \
  44. account_irq_exit_time(current); \
  45. preempt_count_sub(HARDIRQ_OFFSET); \
  46. } while (0)
  47. /*
  48. * Exit irq context and process softirqs if needed:
  49. */
  50. extern void irq_exit(void);
  51. #ifndef arch_nmi_enter
  52. #define arch_nmi_enter() do { } while (0)
  53. #define arch_nmi_exit() do { } while (0)
  54. #endif
  55. #define nmi_enter() \
  56. do { \
  57. arch_nmi_enter(); \
  58. printk_nmi_enter(); \
  59. lockdep_off(); \
  60. ftrace_nmi_enter(); \
  61. BUG_ON(in_nmi()); \
  62. preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
  63. rcu_nmi_enter(); \
  64. trace_hardirq_enter(); \
  65. } while (0)
  66. #define nmi_exit() \
  67. do { \
  68. trace_hardirq_exit(); \
  69. rcu_nmi_exit(); \
  70. BUG_ON(!in_nmi()); \
  71. preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
  72. ftrace_nmi_exit(); \
  73. lockdep_on(); \
  74. printk_nmi_exit(); \
  75. arch_nmi_exit(); \
  76. } while (0)
  77. #endif /* LINUX_HARDIRQ_H */