locks.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. // locks.h - Thread synchronization primitives. PA-RISC implementation.
  2. /* Copyright (C) 2002, 2005 Free Software Foundation
  3. This file is part of libgcj.
  4. This software is copyrighted work licensed under the terms of the
  5. Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
  6. details. */
  7. #ifndef __SYSDEP_LOCKS_H__
  8. #define __SYSDEP_LOCKS_H__
  9. // Integer type big enough for object address.
  10. typedef size_t obj_addr_t;
  11. template<int _Inst>
  12. struct _pa_jv_cas_lock
  13. {
  14. static volatile int _S_pa_jv_cas_lock;
  15. };
  16. template<int _Inst>
  17. volatile int
  18. _pa_jv_cas_lock<_Inst>::_S_pa_jv_cas_lock __attribute__ ((aligned (16))) = 1;
  19. // Because of the lack of weak support when using the hpux som
  20. // linker, we explicitly instantiate the atomicity lock.
  21. template volatile int _pa_jv_cas_lock<0>::_S_pa_jv_cas_lock;
  22. // Atomically replace *addr by new_val if it was initially equal to old_val.
  23. // Return true if the comparison is successful.
  24. // Assumed to have acquire semantics, i.e. later memory operations
  25. // cannot execute before the compare_and_swap finishes.
  26. // The following implementation is atomic but it can deadlock
  27. // (e.g., if a thread dies holding the lock).
  28. inline static bool
  29. __attribute__ ((__unused__))
  30. compare_and_swap(volatile obj_addr_t *addr,
  31. obj_addr_t old_val,
  32. obj_addr_t new_val)
  33. {
  34. bool result;
  35. int tmp;
  36. volatile int& lock = _pa_jv_cas_lock<0>::_S_pa_jv_cas_lock;
  37. __asm__ __volatile__ ("ldcw 0(%1),%0\n\t"
  38. "cmpib,<>,n 0,%0,.+20\n\t"
  39. "ldw 0(%1),%0\n\t"
  40. "cmpib,= 0,%0,.-4\n\t"
  41. "nop\n\t"
  42. "b,n .-20"
  43. : "=&r" (tmp)
  44. : "r" (&lock)
  45. : "memory");
  46. if (*addr != old_val)
  47. result = false;
  48. else
  49. {
  50. *addr = new_val;
  51. result = true;
  52. }
  53. /* Reset lock with PA 2.0 "ordered" store. */
  54. __asm__ __volatile__ ("stw,ma %1,0(%0)"
  55. : : "r" (&lock), "r" (tmp) : "memory");
  56. return result;
  57. }
  58. // Set *addr to new_val with release semantics, i.e. making sure
  59. // that prior loads and stores complete before this
  60. // assignment.
  61. inline static void
  62. release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
  63. {
  64. __asm__ __volatile__(" " : : : "memory");
  65. *(addr) = new_val;
  66. }
  67. // Compare_and_swap with release semantics instead of acquire semantics.
  68. // On many architecture, the operation makes both guarantees, so the
  69. // implementation can be the same.
  70. inline static bool
  71. compare_and_swap_release(volatile obj_addr_t *addr,
  72. obj_addr_t old,
  73. obj_addr_t new_val)
  74. {
  75. return compare_and_swap(addr, old, new_val);
  76. }
  77. // Ensure that subsequent instructions do not execute on stale
  78. // data that was loaded from memory before the barrier.
  79. inline static void
  80. read_barrier()
  81. {
  82. __asm__ __volatile__(" " : : : "memory");
  83. }
  84. // Ensure that prior stores to memory are completed with respect to other
  85. // processors.
  86. inline static void
  87. write_barrier()
  88. {
  89. __asm__ __volatile__(" " : : : "memory");
  90. }
  91. #endif