gcc_intrin.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*
  2. *
  3. * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
  4. * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
  5. */
  6. #ifndef _UAPI_ASM_IA64_GCC_INTRIN_H
  7. #define _UAPI_ASM_IA64_GCC_INTRIN_H
  8. #include <linux/types.h>
  9. #include <linux/compiler.h>
  10. /* define this macro to get some asm stmts included in 'c' files */
  11. #define ASM_SUPPORTED
  12. /* Optimization barrier */
  13. /* The "volatile" is due to gcc bugs */
  14. #define ia64_barrier() asm volatile ("":::"memory")
  15. #define ia64_stop() asm volatile (";;"::)
  16. #define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
  17. #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
  18. #define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
  19. #define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
  20. extern void ia64_bad_param_for_setreg (void);
  21. extern void ia64_bad_param_for_getreg (void);
  22. #define ia64_native_setreg(regnum, val) \
  23. ({ \
  24. switch (regnum) { \
  25. case _IA64_REG_PSR_L: \
  26. asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
  27. break; \
  28. case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
  29. asm volatile ("mov ar%0=%1" :: \
  30. "i" (regnum - _IA64_REG_AR_KR0), \
  31. "r"(val): "memory"); \
  32. break; \
  33. case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
  34. asm volatile ("mov cr%0=%1" :: \
  35. "i" (regnum - _IA64_REG_CR_DCR), \
  36. "r"(val): "memory" ); \
  37. break; \
  38. case _IA64_REG_SP: \
  39. asm volatile ("mov r12=%0" :: \
  40. "r"(val): "memory"); \
  41. break; \
  42. case _IA64_REG_GP: \
  43. asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
  44. break; \
  45. default: \
  46. ia64_bad_param_for_setreg(); \
  47. break; \
  48. } \
  49. })
  50. #define ia64_native_getreg(regnum) \
  51. ({ \
  52. __u64 ia64_intri_res; \
  53. \
  54. switch (regnum) { \
  55. case _IA64_REG_GP: \
  56. asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
  57. break; \
  58. case _IA64_REG_IP: \
  59. asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
  60. break; \
  61. case _IA64_REG_PSR: \
  62. asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
  63. break; \
  64. case _IA64_REG_TP: /* for current() */ \
  65. ia64_intri_res = ia64_r13; \
  66. break; \
  67. case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
  68. asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
  69. : "i"(regnum - _IA64_REG_AR_KR0)); \
  70. break; \
  71. case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
  72. asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
  73. : "i" (regnum - _IA64_REG_CR_DCR)); \
  74. break; \
  75. case _IA64_REG_SP: \
  76. asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
  77. break; \
  78. default: \
  79. ia64_bad_param_for_getreg(); \
  80. break; \
  81. } \
  82. ia64_intri_res; \
  83. })
  84. #define ia64_hint_pause 0
  85. #define ia64_hint(mode) \
  86. ({ \
  87. switch (mode) { \
  88. case ia64_hint_pause: \
  89. asm volatile ("hint @pause" ::: "memory"); \
  90. break; \
  91. } \
  92. })
  93. /* Integer values for mux1 instruction */
  94. #define ia64_mux1_brcst 0
  95. #define ia64_mux1_mix 8
  96. #define ia64_mux1_shuf 9
  97. #define ia64_mux1_alt 10
  98. #define ia64_mux1_rev 11
  99. #define ia64_mux1(x, mode) \
  100. ({ \
  101. __u64 ia64_intri_res; \
  102. \
  103. switch (mode) { \
  104. case ia64_mux1_brcst: \
  105. asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
  106. break; \
  107. case ia64_mux1_mix: \
  108. asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
  109. break; \
  110. case ia64_mux1_shuf: \
  111. asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
  112. break; \
  113. case ia64_mux1_alt: \
  114. asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
  115. break; \
  116. case ia64_mux1_rev: \
  117. asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
  118. break; \
  119. } \
  120. ia64_intri_res; \
  121. })
  122. #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
  123. # define ia64_popcnt(x) __builtin_popcountl(x)
  124. #else
  125. # define ia64_popcnt(x) \
  126. ({ \
  127. __u64 ia64_intri_res; \
  128. asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
  129. \
  130. ia64_intri_res; \
  131. })
  132. #endif
  133. #define ia64_getf_exp(x) \
  134. ({ \
  135. long ia64_intri_res; \
  136. \
  137. asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
  138. \
  139. ia64_intri_res; \
  140. })
  141. #define ia64_shrp(a, b, count) \
  142. ({ \
  143. __u64 ia64_intri_res; \
  144. asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
  145. ia64_intri_res; \
  146. })
  147. #define ia64_ldfs(regnum, x) \
  148. ({ \
  149. register double __f__ asm ("f"#regnum); \
  150. asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
  151. })
  152. #define ia64_ldfd(regnum, x) \
  153. ({ \
  154. register double __f__ asm ("f"#regnum); \
  155. asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
  156. })
  157. #define ia64_ldfe(regnum, x) \
  158. ({ \
  159. register double __f__ asm ("f"#regnum); \
  160. asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
  161. })
  162. #define ia64_ldf8(regnum, x) \
  163. ({ \
  164. register double __f__ asm ("f"#regnum); \
  165. asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
  166. })
  167. #define ia64_ldf_fill(regnum, x) \
  168. ({ \
  169. register double __f__ asm ("f"#regnum); \
  170. asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
  171. })
  172. #define ia64_st4_rel_nta(m, val) \
  173. ({ \
  174. asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
  175. })
  176. #define ia64_stfs(x, regnum) \
  177. ({ \
  178. register double __f__ asm ("f"#regnum); \
  179. asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  180. })
  181. #define ia64_stfd(x, regnum) \
  182. ({ \
  183. register double __f__ asm ("f"#regnum); \
  184. asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  185. })
  186. #define ia64_stfe(x, regnum) \
  187. ({ \
  188. register double __f__ asm ("f"#regnum); \
  189. asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  190. })
  191. #define ia64_stf8(x, regnum) \
  192. ({ \
  193. register double __f__ asm ("f"#regnum); \
  194. asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  195. })
  196. #define ia64_stf_spill(x, regnum) \
  197. ({ \
  198. register double __f__ asm ("f"#regnum); \
  199. asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  200. })
  201. #define ia64_fetchadd4_acq(p, inc) \
  202. ({ \
  203. \
  204. __u64 ia64_intri_res; \
  205. asm volatile ("fetchadd4.acq %0=[%1],%2" \
  206. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  207. : "memory"); \
  208. \
  209. ia64_intri_res; \
  210. })
  211. #define ia64_fetchadd4_rel(p, inc) \
  212. ({ \
  213. __u64 ia64_intri_res; \
  214. asm volatile ("fetchadd4.rel %0=[%1],%2" \
  215. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  216. : "memory"); \
  217. \
  218. ia64_intri_res; \
  219. })
  220. #define ia64_fetchadd8_acq(p, inc) \
  221. ({ \
  222. \
  223. __u64 ia64_intri_res; \
  224. asm volatile ("fetchadd8.acq %0=[%1],%2" \
  225. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  226. : "memory"); \
  227. \
  228. ia64_intri_res; \
  229. })
  230. #define ia64_fetchadd8_rel(p, inc) \
  231. ({ \
  232. __u64 ia64_intri_res; \
  233. asm volatile ("fetchadd8.rel %0=[%1],%2" \
  234. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  235. : "memory"); \
  236. \
  237. ia64_intri_res; \
  238. })
  239. #define ia64_xchg1(ptr,x) \
  240. ({ \
  241. __u64 ia64_intri_res; \
  242. asm volatile ("xchg1 %0=[%1],%2" \
  243. : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
  244. ia64_intri_res; \
  245. })
  246. #define ia64_xchg2(ptr,x) \
  247. ({ \
  248. __u64 ia64_intri_res; \
  249. asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
  250. : "r" (ptr), "r" (x) : "memory"); \
  251. ia64_intri_res; \
  252. })
  253. #define ia64_xchg4(ptr,x) \
  254. ({ \
  255. __u64 ia64_intri_res; \
  256. asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
  257. : "r" (ptr), "r" (x) : "memory"); \
  258. ia64_intri_res; \
  259. })
  260. #define ia64_xchg8(ptr,x) \
  261. ({ \
  262. __u64 ia64_intri_res; \
  263. asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
  264. : "r" (ptr), "r" (x) : "memory"); \
  265. ia64_intri_res; \
  266. })
  267. #define ia64_cmpxchg1_acq(ptr, new, old) \
  268. ({ \
  269. __u64 ia64_intri_res; \
  270. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  271. asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
  272. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  273. ia64_intri_res; \
  274. })
  275. #define ia64_cmpxchg1_rel(ptr, new, old) \
  276. ({ \
  277. __u64 ia64_intri_res; \
  278. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  279. asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
  280. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  281. ia64_intri_res; \
  282. })
  283. #define ia64_cmpxchg2_acq(ptr, new, old) \
  284. ({ \
  285. __u64 ia64_intri_res; \
  286. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  287. asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
  288. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  289. ia64_intri_res; \
  290. })
  291. #define ia64_cmpxchg2_rel(ptr, new, old) \
  292. ({ \
  293. __u64 ia64_intri_res; \
  294. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  295. \
  296. asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
  297. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  298. ia64_intri_res; \
  299. })
  300. #define ia64_cmpxchg4_acq(ptr, new, old) \
  301. ({ \
  302. __u64 ia64_intri_res; \
  303. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  304. asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
  305. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  306. ia64_intri_res; \
  307. })
  308. #define ia64_cmpxchg4_rel(ptr, new, old) \
  309. ({ \
  310. __u64 ia64_intri_res; \
  311. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  312. asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
  313. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  314. ia64_intri_res; \
  315. })
  316. #define ia64_cmpxchg8_acq(ptr, new, old) \
  317. ({ \
  318. __u64 ia64_intri_res; \
  319. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  320. asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
  321. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  322. ia64_intri_res; \
  323. })
  324. #define ia64_cmpxchg8_rel(ptr, new, old) \
  325. ({ \
  326. __u64 ia64_intri_res; \
  327. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  328. \
  329. asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
  330. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  331. ia64_intri_res; \
  332. })
  333. #define ia64_mf() asm volatile ("mf" ::: "memory")
  334. #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
  335. #define ia64_invala() asm volatile ("invala" ::: "memory")
  336. #define ia64_native_thash(addr) \
  337. ({ \
  338. unsigned long ia64_intri_res; \
  339. asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
  340. ia64_intri_res; \
  341. })
  342. #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
  343. #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
  344. #ifdef HAVE_SERIALIZE_DIRECTIVE
  345. # define ia64_dv_serialize_data() asm volatile (".serialize.data");
  346. # define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
  347. #else
  348. # define ia64_dv_serialize_data()
  349. # define ia64_dv_serialize_instruction()
  350. #endif
  351. #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
  352. #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
  353. #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
  354. #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
  355. :: "r"(trnum), "r"(addr) : "memory")
  356. #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
  357. :: "r"(trnum), "r"(addr) : "memory")
  358. #define ia64_tpa(addr) \
  359. ({ \
  360. unsigned long ia64_pa; \
  361. asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
  362. ia64_pa; \
  363. })
  364. #define __ia64_set_dbr(index, val) \
  365. asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  366. #define ia64_set_ibr(index, val) \
  367. asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  368. #define ia64_set_pkr(index, val) \
  369. asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  370. #define ia64_set_pmc(index, val) \
  371. asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
  372. #define ia64_set_pmd(index, val) \
  373. asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
  374. #define ia64_native_set_rr(index, val) \
  375. asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
  376. #define ia64_native_get_cpuid(index) \
  377. ({ \
  378. unsigned long ia64_intri_res; \
  379. asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
  380. ia64_intri_res; \
  381. })
  382. #define __ia64_get_dbr(index) \
  383. ({ \
  384. unsigned long ia64_intri_res; \
  385. asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  386. ia64_intri_res; \
  387. })
  388. #define ia64_get_ibr(index) \
  389. ({ \
  390. unsigned long ia64_intri_res; \
  391. asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  392. ia64_intri_res; \
  393. })
  394. #define ia64_get_pkr(index) \
  395. ({ \
  396. unsigned long ia64_intri_res; \
  397. asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  398. ia64_intri_res; \
  399. })
  400. #define ia64_get_pmc(index) \
  401. ({ \
  402. unsigned long ia64_intri_res; \
  403. asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  404. ia64_intri_res; \
  405. })
  406. #define ia64_native_get_pmd(index) \
  407. ({ \
  408. unsigned long ia64_intri_res; \
  409. asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  410. ia64_intri_res; \
  411. })
  412. #define ia64_native_get_rr(index) \
  413. ({ \
  414. unsigned long ia64_intri_res; \
  415. asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
  416. ia64_intri_res; \
  417. })
  418. #define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
  419. #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
  420. #define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
  421. #define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
  422. #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
  423. #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
  424. #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
  425. #define ia64_native_ptcga(addr, size) \
  426. do { \
  427. asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
  428. ia64_dv_serialize_data(); \
  429. } while (0)
  430. #define ia64_ptcl(addr, size) \
  431. do { \
  432. asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
  433. ia64_dv_serialize_data(); \
  434. } while (0)
  435. #define ia64_ptri(addr, size) \
  436. asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
  437. #define ia64_ptrd(addr, size) \
  438. asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
  439. #define ia64_ttag(addr) \
  440. ({ \
  441. __u64 ia64_intri_res; \
  442. asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
  443. ia64_intri_res; \
  444. })
  445. /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
  446. #define ia64_lfhint_none 0
  447. #define ia64_lfhint_nt1 1
  448. #define ia64_lfhint_nt2 2
  449. #define ia64_lfhint_nta 3
  450. #define ia64_lfetch(lfhint, y) \
  451. ({ \
  452. switch (lfhint) { \
  453. case ia64_lfhint_none: \
  454. asm volatile ("lfetch [%0]" : : "r"(y)); \
  455. break; \
  456. case ia64_lfhint_nt1: \
  457. asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
  458. break; \
  459. case ia64_lfhint_nt2: \
  460. asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
  461. break; \
  462. case ia64_lfhint_nta: \
  463. asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
  464. break; \
  465. } \
  466. })
  467. #define ia64_lfetch_excl(lfhint, y) \
  468. ({ \
  469. switch (lfhint) { \
  470. case ia64_lfhint_none: \
  471. asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
  472. break; \
  473. case ia64_lfhint_nt1: \
  474. asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
  475. break; \
  476. case ia64_lfhint_nt2: \
  477. asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
  478. break; \
  479. case ia64_lfhint_nta: \
  480. asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
  481. break; \
  482. } \
  483. })
  484. #define ia64_lfetch_fault(lfhint, y) \
  485. ({ \
  486. switch (lfhint) { \
  487. case ia64_lfhint_none: \
  488. asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
  489. break; \
  490. case ia64_lfhint_nt1: \
  491. asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
  492. break; \
  493. case ia64_lfhint_nt2: \
  494. asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
  495. break; \
  496. case ia64_lfhint_nta: \
  497. asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
  498. break; \
  499. } \
  500. })
  501. #define ia64_lfetch_fault_excl(lfhint, y) \
  502. ({ \
  503. switch (lfhint) { \
  504. case ia64_lfhint_none: \
  505. asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
  506. break; \
  507. case ia64_lfhint_nt1: \
  508. asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
  509. break; \
  510. case ia64_lfhint_nt2: \
  511. asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
  512. break; \
  513. case ia64_lfhint_nta: \
  514. asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
  515. break; \
  516. } \
  517. })
  518. #define ia64_native_intrin_local_irq_restore(x) \
  519. do { \
  520. asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
  521. "(p6) ssm psr.i;" \
  522. "(p7) rsm psr.i;;" \
  523. "(p6) srlz.d" \
  524. :: "r"((x)) : "p6", "p7", "memory"); \
  525. } while (0)
  526. #endif /* _UAPI_ASM_IA64_GCC_INTRIN_H */