ptrlock.c 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. /* Copyright (C) 2008-2015 Free Software Foundation, Inc.
  2. Contributed by Jakub Jelinek <jakub@redhat.com>.
  3. This file is part of the GNU Offloading and Multi Processing Library
  4. (libgomp).
  5. Libgomp is free software; you can redistribute it and/or modify it
  6. under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3, or (at your option)
  8. any later version.
  9. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
  10. WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  11. FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. more details.
  13. Under Section 7 of GPL version 3, you are granted additional
  14. permissions described in the GCC Runtime Library Exception, version
  15. 3.1, as published by the Free Software Foundation.
  16. You should have received a copy of the GNU General Public License and
  17. a copy of the GCC Runtime Library Exception along with this program;
  18. see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. <http://www.gnu.org/licenses/>. */
  20. /* This is a Linux specific implementation of a mutex synchronization
  21. mechanism for libgomp. This type is private to the library. This
  22. implementation uses atomic instructions and the futex syscall. */
  23. #include <endian.h>
  24. #include <limits.h>
  25. #include "wait.h"
  26. void *
  27. gomp_ptrlock_get_slow (gomp_ptrlock_t *ptrlock)
  28. {
  29. int *intptr;
  30. uintptr_t oldval = 1;
  31. __atomic_compare_exchange_n (ptrlock, &oldval, 2, false,
  32. MEMMODEL_RELAXED, MEMMODEL_RELAXED);
  33. /* futex works on ints, not pointers.
  34. But a valid work share pointer will be at least
  35. 8 byte aligned, so it is safe to assume the low
  36. 32-bits of the pointer won't contain values 1 or 2. */
  37. __asm volatile ("" : "=r" (intptr) : "0" (ptrlock));
  38. #if __BYTE_ORDER == __BIG_ENDIAN
  39. if (sizeof (*ptrlock) > sizeof (int))
  40. intptr += (sizeof (*ptrlock) / sizeof (int)) - 1;
  41. #endif
  42. do
  43. do_wait (intptr, 2);
  44. while (__atomic_load_n (intptr, MEMMODEL_RELAXED) == 2);
  45. __asm volatile ("" : : : "memory");
  46. return (void *) __atomic_load_n (ptrlock, MEMMODEL_ACQUIRE);
  47. }
  48. void
  49. gomp_ptrlock_set_slow (gomp_ptrlock_t *ptrlock)
  50. {
  51. int *intptr;
  52. __asm volatile ("" : "=r" (intptr) : "0" (ptrlock));
  53. #if __BYTE_ORDER == __BIG_ENDIAN
  54. if (sizeof (*ptrlock) > sizeof (int))
  55. intptr += (sizeof (*ptrlock) / sizeof (int)) - 1;
  56. #endif
  57. futex_wake (intptr, INT_MAX);
  58. }