x86.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #ifndef VPX_PORTS_X86_H_
  11. #define VPX_PORTS_X86_H_
  12. #include <stdlib.h>
  13. #if defined(_MSC_VER)
  14. #include <intrin.h> /* For __cpuidex, __rdtsc */
  15. #endif
  16. #include "vpx_config.h"
  17. #include "vpx/vpx_integer.h"
  18. #ifdef __cplusplus
  19. extern "C" {
  20. #endif
  21. typedef enum {
  22. VPX_CPU_UNKNOWN = -1,
  23. VPX_CPU_AMD,
  24. VPX_CPU_AMD_OLD,
  25. VPX_CPU_CENTAUR,
  26. VPX_CPU_CYRIX,
  27. VPX_CPU_INTEL,
  28. VPX_CPU_NEXGEN,
  29. VPX_CPU_NSC,
  30. VPX_CPU_RISE,
  31. VPX_CPU_SIS,
  32. VPX_CPU_TRANSMETA,
  33. VPX_CPU_TRANSMETA_OLD,
  34. VPX_CPU_UMC,
  35. VPX_CPU_VIA,
  36. VPX_CPU_LAST
  37. } vpx_cpu_t;
  38. #if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
  39. #if ARCH_X86_64
  40. #define cpuid(func, func2, ax, bx, cx, dx)\
  41. __asm__ __volatile__ (\
  42. "cpuid \n\t" \
  43. : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
  44. : "a" (func), "c" (func2));
  45. #else
  46. #define cpuid(func, func2, ax, bx, cx, dx)\
  47. __asm__ __volatile__ (\
  48. "mov %%ebx, %%edi \n\t" \
  49. "cpuid \n\t" \
  50. "xchg %%edi, %%ebx \n\t" \
  51. : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
  52. : "a" (func), "c" (func2));
  53. #endif
  54. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
  55. #if ARCH_X86_64
  56. #define cpuid(func, func2, ax, bx, cx, dx)\
  57. asm volatile (\
  58. "xchg %rsi, %rbx \n\t" \
  59. "cpuid \n\t" \
  60. "movl %ebx, %edi \n\t" \
  61. "xchg %rsi, %rbx \n\t" \
  62. : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
  63. : "a" (func), "c" (func2));
  64. #else
  65. #define cpuid(func, func2, ax, bx, cx, dx)\
  66. asm volatile (\
  67. "pushl %ebx \n\t" \
  68. "cpuid \n\t" \
  69. "movl %ebx, %edi \n\t" \
  70. "popl %ebx \n\t" \
  71. : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
  72. : "a" (func), "c" (func2));
  73. #endif
  74. #else /* end __SUNPRO__ */
  75. #if ARCH_X86_64
  76. #if defined(_MSC_VER) && _MSC_VER > 1500
  77. #define cpuid(func, func2, a, b, c, d) do {\
  78. int regs[4];\
  79. __cpuidex(regs, func, func2); \
  80. a = regs[0]; b = regs[1]; c = regs[2]; d = regs[3];\
  81. } while(0)
  82. #else
  83. #define cpuid(func, func2, a, b, c, d) do {\
  84. int regs[4];\
  85. __cpuid(regs, func); \
  86. a = regs[0]; b = regs[1]; c = regs[2]; d = regs[3];\
  87. } while (0)
  88. #endif
  89. #else
  90. #define cpuid(func, func2, a, b, c, d)\
  91. __asm mov eax, func\
  92. __asm mov ecx, func2\
  93. __asm cpuid\
  94. __asm mov a, eax\
  95. __asm mov b, ebx\
  96. __asm mov c, ecx\
  97. __asm mov d, edx
  98. #endif
  99. #endif /* end others */
  100. // NaCl has no support for xgetbv or the raw opcode.
  101. #if !defined(__native_client__) && (defined(__i386__) || defined(__x86_64__))
  102. static INLINE uint64_t xgetbv(void) {
  103. const uint32_t ecx = 0;
  104. uint32_t eax, edx;
  105. // Use the raw opcode for xgetbv for compatibility with older toolchains.
  106. __asm__ volatile (
  107. ".byte 0x0f, 0x01, 0xd0\n"
  108. : "=a"(eax), "=d"(edx) : "c" (ecx));
  109. return ((uint64_t)edx << 32) | eax;
  110. }
  111. #elif (defined(_M_X64) || defined(_M_IX86)) && \
  112. defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219 // >= VS2010 SP1
  113. #include <immintrin.h>
  114. #define xgetbv() _xgetbv(0)
  115. #elif defined(_MSC_VER) && defined(_M_IX86)
  116. static INLINE uint64_t xgetbv(void) {
  117. uint32_t eax_, edx_;
  118. __asm {
  119. xor ecx, ecx // ecx = 0
  120. // Use the raw opcode for xgetbv for compatibility with older toolchains.
  121. __asm _emit 0x0f __asm _emit 0x01 __asm _emit 0xd0
  122. mov eax_, eax
  123. mov edx_, edx
  124. }
  125. return ((uint64_t)edx_ << 32) | eax_;
  126. }
  127. #else
  128. #define xgetbv() 0U // no AVX for older x64 or unrecognized toolchains.
  129. #endif
  130. #if defined(_MSC_VER) && _MSC_VER >= 1700
  131. #include <windows.h>
  132. #if WINAPI_FAMILY_PARTITION(WINAPI_FAMILY_APP)
  133. #define getenv(x) NULL
  134. #endif
  135. #endif
  136. #define HAS_MMX 0x01
  137. #define HAS_SSE 0x02
  138. #define HAS_SSE2 0x04
  139. #define HAS_SSE3 0x08
  140. #define HAS_SSSE3 0x10
  141. #define HAS_SSE4_1 0x20
  142. #define HAS_AVX 0x40
  143. #define HAS_AVX2 0x80
  144. #ifndef BIT
  145. #define BIT(n) (1<<n)
  146. #endif
  147. static INLINE int
  148. x86_simd_caps(void) {
  149. unsigned int flags = 0;
  150. unsigned int mask = ~0;
  151. unsigned int max_cpuid_val, reg_eax, reg_ebx, reg_ecx, reg_edx;
  152. char *env;
  153. (void)reg_ebx;
  154. /* See if the CPU capabilities are being overridden by the environment */
  155. env = getenv("VPX_SIMD_CAPS");
  156. if (env && *env)
  157. return (int)strtol(env, NULL, 0);
  158. env = getenv("VPX_SIMD_CAPS_MASK");
  159. if (env && *env)
  160. mask = (unsigned int)strtoul(env, NULL, 0);
  161. /* Ensure that the CPUID instruction supports extended features */
  162. cpuid(0, 0, max_cpuid_val, reg_ebx, reg_ecx, reg_edx);
  163. if (max_cpuid_val < 1)
  164. return 0;
  165. /* Get the standard feature flags */
  166. cpuid(1, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
  167. if (reg_edx & BIT(23)) flags |= HAS_MMX;
  168. if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
  169. if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
  170. if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
  171. if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
  172. if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
  173. // bits 27 (OSXSAVE) & 28 (256-bit AVX)
  174. if ((reg_ecx & (BIT(27) | BIT(28))) == (BIT(27) | BIT(28))) {
  175. if ((xgetbv() & 0x6) == 0x6) {
  176. flags |= HAS_AVX;
  177. if (max_cpuid_val >= 7) {
  178. /* Get the leaf 7 feature flags. Needed to check for AVX2 support */
  179. cpuid(7, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
  180. if (reg_ebx & BIT(5)) flags |= HAS_AVX2;
  181. }
  182. }
  183. }
  184. return flags & mask;
  185. }
  186. // Note:
  187. // 32-bit CPU cycle counter is light-weighted for most function performance
  188. // measurement. For large function (CPU time > a couple of seconds), 64-bit
  189. // counter should be used.
  190. // 32-bit CPU cycle counter
  191. static INLINE unsigned int
  192. x86_readtsc(void) {
  193. #if defined(__GNUC__) && __GNUC__
  194. unsigned int tsc;
  195. __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
  196. return tsc;
  197. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  198. unsigned int tsc;
  199. asm volatile("rdtsc\n\t":"=a"(tsc):);
  200. return tsc;
  201. #else
  202. #if ARCH_X86_64
  203. return (unsigned int)__rdtsc();
  204. #else
  205. __asm rdtsc;
  206. #endif
  207. #endif
  208. }
  209. // 64-bit CPU cycle counter
  210. static INLINE uint64_t
  211. x86_readtsc64(void) {
  212. #if defined(__GNUC__) && __GNUC__
  213. uint32_t hi, lo;
  214. __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi));
  215. return ((uint64_t)hi << 32) | lo;
  216. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  217. uint_t hi, lo;
  218. asm volatile("rdtsc\n\t" : "=a"(lo), "=d"(hi));
  219. return ((uint64_t)hi << 32) | lo;
  220. #else
  221. #if ARCH_X86_64
  222. return (uint64_t)__rdtsc();
  223. #else
  224. __asm rdtsc;
  225. #endif
  226. #endif
  227. }
  228. #if defined(__GNUC__) && __GNUC__
  229. #define x86_pause_hint()\
  230. __asm__ __volatile__ ("pause \n\t")
  231. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  232. #define x86_pause_hint()\
  233. asm volatile ("pause \n\t")
  234. #else
  235. #if ARCH_X86_64
  236. #define x86_pause_hint()\
  237. _mm_pause();
  238. #else
  239. #define x86_pause_hint()\
  240. __asm pause
  241. #endif
  242. #endif
  243. #if defined(__GNUC__) && __GNUC__
  244. static void
  245. x87_set_control_word(unsigned short mode) {
  246. __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
  247. }
  248. static unsigned short
  249. x87_get_control_word(void) {
  250. unsigned short mode;
  251. __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):);
  252. return mode;
  253. }
  254. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  255. static void
  256. x87_set_control_word(unsigned short mode) {
  257. asm volatile("fldcw %0" : : "m"(*&mode));
  258. }
  259. static unsigned short
  260. x87_get_control_word(void) {
  261. unsigned short mode;
  262. asm volatile("fstcw %0\n\t":"=m"(*&mode):);
  263. return mode;
  264. }
  265. #elif ARCH_X86_64
  266. /* No fldcw intrinsics on Windows x64, punt to external asm */
  267. extern void vpx_winx64_fldcw(unsigned short mode);
  268. extern unsigned short vpx_winx64_fstcw(void);
  269. #define x87_set_control_word vpx_winx64_fldcw
  270. #define x87_get_control_word vpx_winx64_fstcw
  271. #else
  272. static void
  273. x87_set_control_word(unsigned short mode) {
  274. __asm { fldcw mode }
  275. }
  276. static unsigned short
  277. x87_get_control_word(void) {
  278. unsigned short mode;
  279. __asm { fstcw mode }
  280. return mode;
  281. }
  282. #endif
  283. static INLINE unsigned int
  284. x87_set_double_precision(void) {
  285. unsigned int mode = x87_get_control_word();
  286. x87_set_control_word((mode&~0x300) | 0x200);
  287. return mode;
  288. }
  289. extern void vpx_reset_mmx_state(void);
  290. #ifdef __cplusplus
  291. } // extern "C"
  292. #endif
  293. #endif // VPX_PORTS_X86_H_