cache.h 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_CACHE_H
  3. #define __LINUX_CACHE_H
  4. #include <uapi/linux/kernel.h>
  5. #include <asm/cache.h>
  6. #ifndef L1_CACHE_ALIGN
  7. #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
  8. #endif
  9. #ifndef SMP_CACHE_BYTES
  10. #define SMP_CACHE_BYTES L1_CACHE_BYTES
  11. #endif
  12. /*
  13. * __read_mostly is used to keep rarely changing variables out of frequently
  14. * updated cachelines. If an architecture doesn't support it, ignore the
  15. * hint.
  16. */
  17. #ifndef __read_mostly
  18. #define __read_mostly
  19. #endif
  20. /*
  21. * __ro_after_init is used to mark things that are read-only after init (i.e.
  22. * after mark_rodata_ro() has been called). These are effectively read-only,
  23. * but may get written to during init, so can't live in .rodata (via "const").
  24. */
  25. #ifndef __ro_after_init
  26. #define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
  27. #endif
  28. #ifndef ____cacheline_aligned
  29. #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
  30. #endif
  31. #ifndef ____cacheline_aligned_in_smp
  32. #ifdef CONFIG_SMP
  33. #define ____cacheline_aligned_in_smp ____cacheline_aligned
  34. #else
  35. #define ____cacheline_aligned_in_smp
  36. #endif /* CONFIG_SMP */
  37. #endif
  38. #ifndef __cacheline_aligned
  39. #define __cacheline_aligned \
  40. __attribute__((__aligned__(SMP_CACHE_BYTES), \
  41. __section__(".data..cacheline_aligned")))
  42. #endif /* __cacheline_aligned */
  43. #ifndef __cacheline_aligned_in_smp
  44. #ifdef CONFIG_SMP
  45. #define __cacheline_aligned_in_smp __cacheline_aligned
  46. #else
  47. #define __cacheline_aligned_in_smp
  48. #endif /* CONFIG_SMP */
  49. #endif
  50. /*
  51. * The maximum alignment needed for some critical structures
  52. * These could be inter-node cacheline sizes/L3 cacheline
  53. * size etc. Define this in asm/cache.h for your arch
  54. */
  55. #ifndef INTERNODE_CACHE_SHIFT
  56. #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
  57. #endif
  58. #if !defined(____cacheline_internodealigned_in_smp)
  59. #if defined(CONFIG_SMP)
  60. #define ____cacheline_internodealigned_in_smp \
  61. __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
  62. #else
  63. #define ____cacheline_internodealigned_in_smp
  64. #endif
  65. #endif
  66. #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
  67. #define cache_line_size() L1_CACHE_BYTES
  68. #endif
  69. #endif /* __LINUX_CACHE_H */