mem.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. /*
  2. * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. #ifndef MEM_H_MODULE
  11. #define MEM_H_MODULE
  12. #if defined (__cplusplus)
  13. extern "C" {
  14. #endif
  15. /*-****************************************
  16. * Dependencies
  17. ******************************************/
  18. #include <stddef.h> /* size_t, ptrdiff_t */
  19. #include <string.h> /* memcpy */
  20. /*-****************************************
  21. * Compiler specifics
  22. ******************************************/
  23. #if defined(_MSC_VER) /* Visual Studio */
  24. # include <stdlib.h> /* _byteswap_ulong */
  25. # include <intrin.h> /* _byteswap_* */
  26. #endif
  27. #if defined(__GNUC__)
  28. # define MEM_STATIC static __inline __attribute__((unused))
  29. #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
  30. # define MEM_STATIC static inline
  31. #elif defined(_MSC_VER)
  32. # define MEM_STATIC static __inline
  33. #else
  34. # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
  35. #endif
  36. #ifndef __has_builtin
  37. # define __has_builtin(x) 0 /* compat. with non-clang compilers */
  38. #endif
  39. /* code only tested on 32 and 64 bits systems */
  40. #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
  41. MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
  42. /* detects whether we are being compiled under msan */
  43. #if defined (__has_feature)
  44. # if __has_feature(memory_sanitizer)
  45. # define MEMORY_SANITIZER 1
  46. # endif
  47. #endif
  48. #if defined (MEMORY_SANITIZER)
  49. /* Not all platforms that support msan provide sanitizers/msan_interface.h.
  50. * We therefore declare the functions we need ourselves, rather than trying to
  51. * include the header file... */
  52. #include <stdint.h> /* intptr_t */
  53. /* Make memory region fully initialized (without changing its contents). */
  54. void __msan_unpoison(const volatile void *a, size_t size);
  55. /* Make memory region fully uninitialized (without changing its contents).
  56. This is a legacy interface that does not update origin information. Use
  57. __msan_allocated_memory() instead. */
  58. void __msan_poison(const volatile void *a, size_t size);
  59. /* Returns the offset of the first (at least partially) poisoned byte in the
  60. memory range, or -1 if the whole range is good. */
  61. intptr_t __msan_test_shadow(const volatile void *x, size_t size);
  62. #endif
  63. /* detects whether we are being compiled under asan */
  64. #if defined (__has_feature)
  65. # if __has_feature(address_sanitizer)
  66. # define ADDRESS_SANITIZER 1
  67. # endif
  68. #elif defined(__SANITIZE_ADDRESS__)
  69. # define ADDRESS_SANITIZER 1
  70. #endif
  71. #if defined (ADDRESS_SANITIZER)
  72. /* Not all platforms that support asan provide sanitizers/asan_interface.h.
  73. * We therefore declare the functions we need ourselves, rather than trying to
  74. * include the header file... */
  75. /**
  76. * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
  77. *
  78. * This memory must be previously allocated by your program. Instrumented
  79. * code is forbidden from accessing addresses in this region until it is
  80. * unpoisoned. This function is not guaranteed to poison the entire region -
  81. * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
  82. * alignment restrictions.
  83. *
  84. * \note This function is not thread-safe because no two threads can poison or
  85. * unpoison memory in the same memory region simultaneously.
  86. *
  87. * \param addr Start of memory region.
  88. * \param size Size of memory region. */
  89. void __asan_poison_memory_region(void const volatile *addr, size_t size);
  90. /**
  91. * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
  92. *
  93. * This memory must be previously allocated by your program. Accessing
  94. * addresses in this region is allowed until this region is poisoned again.
  95. * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
  96. * to ASan alignment restrictions.
  97. *
  98. * \note This function is not thread-safe because no two threads can
  99. * poison or unpoison memory in the same memory region simultaneously.
  100. *
  101. * \param addr Start of memory region.
  102. * \param size Size of memory region. */
  103. void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
  104. #endif
  105. /*-**************************************************************
  106. * Basic Types
  107. *****************************************************************/
  108. #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  109. # include <stdint.h>
  110. typedef uint8_t BYTE;
  111. typedef uint16_t U16;
  112. typedef int16_t S16;
  113. typedef uint32_t U32;
  114. typedef int32_t S32;
  115. typedef uint64_t U64;
  116. typedef int64_t S64;
  117. #else
  118. # include <limits.h>
  119. #if CHAR_BIT != 8
  120. # error "this implementation requires char to be exactly 8-bit type"
  121. #endif
  122. typedef unsigned char BYTE;
  123. #if USHRT_MAX != 65535
  124. # error "this implementation requires short to be exactly 16-bit type"
  125. #endif
  126. typedef unsigned short U16;
  127. typedef signed short S16;
  128. #if UINT_MAX != 4294967295
  129. # error "this implementation requires int to be exactly 32-bit type"
  130. #endif
  131. typedef unsigned int U32;
  132. typedef signed int S32;
  133. /* note : there are no limits defined for long long type in C90.
  134. * limits exist in C99, however, in such case, <stdint.h> is preferred */
  135. typedef unsigned long long U64;
  136. typedef signed long long S64;
  137. #endif
  138. /*-**************************************************************
  139. * Memory I/O
  140. *****************************************************************/
  141. /* MEM_FORCE_MEMORY_ACCESS :
  142. * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
  143. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
  144. * The below switch allow to select different access method for improved performance.
  145. * Method 0 (default) : use `memcpy()`. Safe and portable.
  146. * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
  147. * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
  148. * Method 2 : direct access. This method is portable but violate C standard.
  149. * It can generate buggy code on targets depending on alignment.
  150. * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
  151. * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
  152. * Prefer these methods in priority order (0 > 1 > 2)
  153. */
  154. #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
  155. # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
  156. # define MEM_FORCE_MEMORY_ACCESS 2
  157. # elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
  158. # define MEM_FORCE_MEMORY_ACCESS 1
  159. # endif
  160. #endif
  161. MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
  162. MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
  163. MEM_STATIC unsigned MEM_isLittleEndian(void)
  164. {
  165. const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
  166. return one.c[0];
  167. }
  168. #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
  169. /* violates C standard, by lying on structure alignment.
  170. Only use if no other choice to achieve best performance on target platform */
  171. MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
  172. MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
  173. MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
  174. MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
  175. MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
  176. MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
  177. MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
  178. #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
  179. /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
  180. /* currently only defined for gcc and icc */
  181. #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
  182. __pragma( pack(push, 1) )
  183. typedef struct { U16 v; } unalign16;
  184. typedef struct { U32 v; } unalign32;
  185. typedef struct { U64 v; } unalign64;
  186. typedef struct { size_t v; } unalignArch;
  187. __pragma( pack(pop) )
  188. #else
  189. typedef struct { U16 v; } __attribute__((packed)) unalign16;
  190. typedef struct { U32 v; } __attribute__((packed)) unalign32;
  191. typedef struct { U64 v; } __attribute__((packed)) unalign64;
  192. typedef struct { size_t v; } __attribute__((packed)) unalignArch;
  193. #endif
  194. MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
  195. MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
  196. MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
  197. MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
  198. MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
  199. MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
  200. MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
  201. #else
  202. /* default method, safe and standard.
  203. can sometimes prove slower */
  204. MEM_STATIC U16 MEM_read16(const void* memPtr)
  205. {
  206. U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
  207. }
  208. MEM_STATIC U32 MEM_read32(const void* memPtr)
  209. {
  210. U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
  211. }
  212. MEM_STATIC U64 MEM_read64(const void* memPtr)
  213. {
  214. U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
  215. }
  216. MEM_STATIC size_t MEM_readST(const void* memPtr)
  217. {
  218. size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
  219. }
  220. MEM_STATIC void MEM_write16(void* memPtr, U16 value)
  221. {
  222. memcpy(memPtr, &value, sizeof(value));
  223. }
  224. MEM_STATIC void MEM_write32(void* memPtr, U32 value)
  225. {
  226. memcpy(memPtr, &value, sizeof(value));
  227. }
  228. MEM_STATIC void MEM_write64(void* memPtr, U64 value)
  229. {
  230. memcpy(memPtr, &value, sizeof(value));
  231. }
  232. #endif /* MEM_FORCE_MEMORY_ACCESS */
  233. MEM_STATIC U32 MEM_swap32(U32 in)
  234. {
  235. #if defined(_MSC_VER) /* Visual Studio */
  236. return _byteswap_ulong(in);
  237. #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
  238. || (defined(__clang__) && __has_builtin(__builtin_bswap32))
  239. return __builtin_bswap32(in);
  240. #else
  241. return ((in << 24) & 0xff000000 ) |
  242. ((in << 8) & 0x00ff0000 ) |
  243. ((in >> 8) & 0x0000ff00 ) |
  244. ((in >> 24) & 0x000000ff );
  245. #endif
  246. }
  247. MEM_STATIC U64 MEM_swap64(U64 in)
  248. {
  249. #if defined(_MSC_VER) /* Visual Studio */
  250. return _byteswap_uint64(in);
  251. #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
  252. || (defined(__clang__) && __has_builtin(__builtin_bswap64))
  253. return __builtin_bswap64(in);
  254. #else
  255. return ((in << 56) & 0xff00000000000000ULL) |
  256. ((in << 40) & 0x00ff000000000000ULL) |
  257. ((in << 24) & 0x0000ff0000000000ULL) |
  258. ((in << 8) & 0x000000ff00000000ULL) |
  259. ((in >> 8) & 0x00000000ff000000ULL) |
  260. ((in >> 24) & 0x0000000000ff0000ULL) |
  261. ((in >> 40) & 0x000000000000ff00ULL) |
  262. ((in >> 56) & 0x00000000000000ffULL);
  263. #endif
  264. }
  265. MEM_STATIC size_t MEM_swapST(size_t in)
  266. {
  267. if (MEM_32bits())
  268. return (size_t)MEM_swap32((U32)in);
  269. else
  270. return (size_t)MEM_swap64((U64)in);
  271. }
  272. /*=== Little endian r/w ===*/
  273. MEM_STATIC U16 MEM_readLE16(const void* memPtr)
  274. {
  275. if (MEM_isLittleEndian())
  276. return MEM_read16(memPtr);
  277. else {
  278. const BYTE* p = (const BYTE*)memPtr;
  279. return (U16)(p[0] + (p[1]<<8));
  280. }
  281. }
  282. MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
  283. {
  284. if (MEM_isLittleEndian()) {
  285. MEM_write16(memPtr, val);
  286. } else {
  287. BYTE* p = (BYTE*)memPtr;
  288. p[0] = (BYTE)val;
  289. p[1] = (BYTE)(val>>8);
  290. }
  291. }
  292. MEM_STATIC U32 MEM_readLE24(const void* memPtr)
  293. {
  294. return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
  295. }
  296. MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
  297. {
  298. MEM_writeLE16(memPtr, (U16)val);
  299. ((BYTE*)memPtr)[2] = (BYTE)(val>>16);
  300. }
  301. MEM_STATIC U32 MEM_readLE32(const void* memPtr)
  302. {
  303. if (MEM_isLittleEndian())
  304. return MEM_read32(memPtr);
  305. else
  306. return MEM_swap32(MEM_read32(memPtr));
  307. }
  308. MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
  309. {
  310. if (MEM_isLittleEndian())
  311. MEM_write32(memPtr, val32);
  312. else
  313. MEM_write32(memPtr, MEM_swap32(val32));
  314. }
  315. MEM_STATIC U64 MEM_readLE64(const void* memPtr)
  316. {
  317. if (MEM_isLittleEndian())
  318. return MEM_read64(memPtr);
  319. else
  320. return MEM_swap64(MEM_read64(memPtr));
  321. }
  322. MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
  323. {
  324. if (MEM_isLittleEndian())
  325. MEM_write64(memPtr, val64);
  326. else
  327. MEM_write64(memPtr, MEM_swap64(val64));
  328. }
  329. MEM_STATIC size_t MEM_readLEST(const void* memPtr)
  330. {
  331. if (MEM_32bits())
  332. return (size_t)MEM_readLE32(memPtr);
  333. else
  334. return (size_t)MEM_readLE64(memPtr);
  335. }
  336. MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
  337. {
  338. if (MEM_32bits())
  339. MEM_writeLE32(memPtr, (U32)val);
  340. else
  341. MEM_writeLE64(memPtr, (U64)val);
  342. }
  343. /*=== Big endian r/w ===*/
  344. MEM_STATIC U32 MEM_readBE32(const void* memPtr)
  345. {
  346. if (MEM_isLittleEndian())
  347. return MEM_swap32(MEM_read32(memPtr));
  348. else
  349. return MEM_read32(memPtr);
  350. }
  351. MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
  352. {
  353. if (MEM_isLittleEndian())
  354. MEM_write32(memPtr, MEM_swap32(val32));
  355. else
  356. MEM_write32(memPtr, val32);
  357. }
  358. MEM_STATIC U64 MEM_readBE64(const void* memPtr)
  359. {
  360. if (MEM_isLittleEndian())
  361. return MEM_swap64(MEM_read64(memPtr));
  362. else
  363. return MEM_read64(memPtr);
  364. }
  365. MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
  366. {
  367. if (MEM_isLittleEndian())
  368. MEM_write64(memPtr, MEM_swap64(val64));
  369. else
  370. MEM_write64(memPtr, val64);
  371. }
  372. MEM_STATIC size_t MEM_readBEST(const void* memPtr)
  373. {
  374. if (MEM_32bits())
  375. return (size_t)MEM_readBE32(memPtr);
  376. else
  377. return (size_t)MEM_readBE64(memPtr);
  378. }
  379. MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
  380. {
  381. if (MEM_32bits())
  382. MEM_writeBE32(memPtr, (U32)val);
  383. else
  384. MEM_writeBE64(memPtr, (U64)val);
  385. }
  386. #if defined (__cplusplus)
  387. }
  388. #endif
  389. #endif /* MEM_H_MODULE */