barrier.h 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Memory barrier definitions. This is based on information published
  4. * in the Processor Abstraction Layer and the System Abstraction Layer
  5. * manual.
  6. *
  7. * Copyright (C) 1998-2003 Hewlett-Packard Co
  8. * David Mosberger-Tang <davidm@hpl.hp.com>
  9. * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  10. * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  11. */
  12. #ifndef _ASM_IA64_BARRIER_H
  13. #define _ASM_IA64_BARRIER_H
  14. #include <linux/compiler.h>
  15. /*
  16. * Macros to force memory ordering. In these descriptions, "previous"
  17. * and "subsequent" refer to program order; "visible" means that all
  18. * architecturally visible effects of a memory access have occurred
  19. * (at a minimum, this means the memory has been read or written).
  20. *
  21. * wmb(): Guarantees that all preceding stores to memory-
  22. * like regions are visible before any subsequent
  23. * stores and that all following stores will be
  24. * visible only after all previous stores.
  25. * rmb(): Like wmb(), but for reads.
  26. * mb(): wmb()/rmb() combo, i.e., all previous memory
  27. * accesses are visible before all subsequent
  28. * accesses and vice versa. This is also known as
  29. * a "fence."
  30. *
  31. * Note: "mb()" and its variants cannot be used as a fence to order
  32. * accesses to memory mapped I/O registers. For that, mf.a needs to
  33. * be used. However, we don't want to always use mf.a because (a)
  34. * it's (presumably) much slower than mf and (b) mf.a is supported for
  35. * sequential memory pages only.
  36. */
  37. #define mb() ia64_mf()
  38. #define rmb() mb()
  39. #define wmb() mb()
  40. #define dma_rmb() mb()
  41. #define dma_wmb() mb()
  42. # define __smp_mb() mb()
  43. #define __smp_mb__before_atomic() barrier()
  44. #define __smp_mb__after_atomic() barrier()
  45. /*
  46. * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
  47. * need for asm trickery!
  48. */
  49. #define __smp_store_release(p, v) \
  50. do { \
  51. compiletime_assert_atomic_type(*p); \
  52. barrier(); \
  53. WRITE_ONCE(*p, v); \
  54. } while (0)
  55. #define __smp_load_acquire(p) \
  56. ({ \
  57. typeof(*p) ___p1 = READ_ONCE(*p); \
  58. compiletime_assert_atomic_type(*p); \
  59. barrier(); \
  60. ___p1; \
  61. })
  62. /*
  63. * The group barrier in front of the rsm & ssm are necessary to ensure
  64. * that none of the previous instructions in the same group are
  65. * affected by the rsm/ssm.
  66. */
  67. #include <asm-generic/barrier.h>
  68. #endif /* _ASM_IA64_BARRIER_H */