specific.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. /*
  2. * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
  3. *
  4. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  5. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  6. *
  7. * Permission is hereby granted to use or copy this program
  8. * for any purpose, provided the above notices are retained on all copies.
  9. * Permission to modify the code and to distribute modified code is granted,
  10. * provided the above notices are retained, and a notice that the code was
  11. * modified is included with the above copyright notice.
  12. */
  13. #include "private/gc_priv.h" /* For GC_compare_and_exchange, GC_memory_barrier */
  14. #if defined(GC_LINUX_THREADS) || defined(GC_GNU_THREADS)
  15. #include "private/specific.h"
  16. static tse invalid_tse = {INVALID_QTID, 0, 0, INVALID_THREADID};
  17. /* A thread-specific data entry which will never */
  18. /* appear valid to a reader. Used to fill in empty */
  19. /* cache entries to avoid a check for 0. */
  20. int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *)) {
  21. int i;
  22. tsd * result = (tsd *)MALLOC_CLEAR(sizeof (tsd));
  23. /* A quick alignment check, since we need atomic stores */
  24. GC_ASSERT((unsigned long)(&invalid_tse.next) % sizeof(tse *) == 0);
  25. if (0 == result) return ENOMEM;
  26. pthread_mutex_init(&(result -> lock), NULL);
  27. for (i = 0; i < TS_CACHE_SIZE; ++i) {
  28. result -> cache[i] = &invalid_tse;
  29. }
  30. # ifdef GC_ASSERTIONS
  31. for (i = 0; i < TS_HASH_SIZE; ++i) {
  32. GC_ASSERT(result -> hash[i] == 0);
  33. }
  34. # endif
  35. *key_ptr = result;
  36. return 0;
  37. }
  38. int PREFIXED(setspecific) (tsd * key, void * value) {
  39. pthread_t self = pthread_self();
  40. int hash_val = HASH(self);
  41. volatile tse * entry = (volatile tse *)MALLOC_CLEAR(sizeof (tse));
  42. GC_ASSERT(self != INVALID_THREADID);
  43. if (0 == entry) return ENOMEM;
  44. pthread_mutex_lock(&(key -> lock));
  45. /* Could easily check for an existing entry here. */
  46. entry -> next = key -> hash[hash_val];
  47. entry -> thread = self;
  48. entry -> value = value;
  49. GC_ASSERT(entry -> qtid == INVALID_QTID);
  50. /* There can only be one writer at a time, but this needs to be */
  51. /* atomic with respect to concurrent readers. */
  52. *(volatile tse **)(key -> hash + hash_val) = entry;
  53. pthread_mutex_unlock(&(key -> lock));
  54. return 0;
  55. }
  56. /* Remove thread-specific data for this thread. Should be called on */
  57. /* thread exit. */
  58. void PREFIXED(remove_specific) (tsd * key) {
  59. pthread_t self = pthread_self();
  60. unsigned hash_val = HASH(self);
  61. tse *entry;
  62. tse **link = key -> hash + hash_val;
  63. pthread_mutex_lock(&(key -> lock));
  64. entry = *link;
  65. while (entry != NULL && entry -> thread != self) {
  66. link = &(entry -> next);
  67. entry = *link;
  68. }
  69. /* Invalidate qtid field, since qtids may be reused, and a later */
  70. /* cache lookup could otherwise find this entry. */
  71. entry -> qtid = INVALID_QTID;
  72. if (entry != NULL) {
  73. *link = entry -> next;
  74. /* Atomic! concurrent accesses still work. */
  75. /* They must, since readers don't lock. */
  76. /* We shouldn't need a volatile access here, */
  77. /* since both this and the preceding write */
  78. /* should become visible no later than */
  79. /* the pthread_mutex_unlock() call. */
  80. }
  81. /* If we wanted to deallocate the entry, we'd first have to clear */
  82. /* any cache entries pointing to it. That probably requires */
  83. /* additional synchronization, since we can't prevent a concurrent */
  84. /* cache lookup, which should still be examining deallocated memory.*/
  85. /* This can only happen if the concurrent access is from another */
  86. /* thread, and hence has missed the cache, but still... */
  87. /* With GC, we're done, since the pointers from the cache will */
  88. /* be overwritten, all local pointers to the entries will be */
  89. /* dropped, and the entry will then be reclaimed. */
  90. pthread_mutex_unlock(&(key -> lock));
  91. }
  92. /* Note that even the slow path doesn't lock. */
  93. void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
  94. tse * volatile * cache_ptr) {
  95. pthread_t self = pthread_self();
  96. unsigned hash_val = HASH(self);
  97. tse *entry = key -> hash[hash_val];
  98. GC_ASSERT(qtid != INVALID_QTID);
  99. while (entry != NULL && entry -> thread != self) {
  100. entry = entry -> next;
  101. }
  102. if (entry == NULL) return NULL;
  103. /* Set cache_entry. */
  104. entry -> qtid = qtid;
  105. /* It's safe to do this asynchronously. Either value */
  106. /* is safe, though may produce spurious misses. */
  107. /* We're replacing one qtid with another one for the */
  108. /* same thread. */
  109. *cache_ptr = entry;
  110. /* Again this is safe since pointer assignments are */
  111. /* presumed atomic, and either pointer is valid. */
  112. return entry -> value;
  113. }
  114. #endif /* GC_LINUX_THREADS */