plpar_wrappers.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
  2. #define _ASM_POWERPC_PLPAR_WRAPPERS_H
  3. #include <linux/string.h>
  4. #include <linux/irqflags.h>
  5. #include <asm/hvcall.h>
  6. #include <asm/paca.h>
  7. #include <asm/page.h>
  8. /* Get state of physical CPU from query_cpu_stopped */
  9. int smp_query_cpu_stopped(unsigned int pcpu);
  10. #define QCSS_STOPPED 0
  11. #define QCSS_STOPPING 1
  12. #define QCSS_NOT_STOPPED 2
  13. #define QCSS_HARDWARE_ERROR -1
  14. #define QCSS_HARDWARE_BUSY -2
  15. static inline long poll_pending(void)
  16. {
  17. return plpar_hcall_norets(H_POLL_PENDING);
  18. }
  19. static inline u8 get_cede_latency_hint(void)
  20. {
  21. return get_lppaca()->cede_latency_hint;
  22. }
  23. static inline void set_cede_latency_hint(u8 latency_hint)
  24. {
  25. get_lppaca()->cede_latency_hint = latency_hint;
  26. }
  27. static inline long cede_processor(void)
  28. {
  29. return plpar_hcall_norets(H_CEDE);
  30. }
  31. static inline long extended_cede_processor(unsigned long latency_hint)
  32. {
  33. long rc;
  34. u8 old_latency_hint = get_cede_latency_hint();
  35. set_cede_latency_hint(latency_hint);
  36. rc = cede_processor();
  37. #ifdef CONFIG_TRACE_IRQFLAGS
  38. /* Ensure that H_CEDE returns with IRQs on */
  39. if (WARN_ON(!(mfmsr() & MSR_EE)))
  40. __hard_irq_enable();
  41. #endif
  42. set_cede_latency_hint(old_latency_hint);
  43. return rc;
  44. }
  45. static inline long vpa_call(unsigned long flags, unsigned long cpu,
  46. unsigned long vpa)
  47. {
  48. flags = flags << H_VPA_FUNC_SHIFT;
  49. return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
  50. }
  51. static inline long unregister_vpa(unsigned long cpu)
  52. {
  53. return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
  54. }
  55. static inline long register_vpa(unsigned long cpu, unsigned long vpa)
  56. {
  57. return vpa_call(H_VPA_REG_VPA, cpu, vpa);
  58. }
  59. static inline long unregister_slb_shadow(unsigned long cpu)
  60. {
  61. return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
  62. }
  63. static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
  64. {
  65. return vpa_call(H_VPA_REG_SLB, cpu, vpa);
  66. }
  67. static inline long unregister_dtl(unsigned long cpu)
  68. {
  69. return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
  70. }
  71. static inline long register_dtl(unsigned long cpu, unsigned long vpa)
  72. {
  73. return vpa_call(H_VPA_REG_DTL, cpu, vpa);
  74. }
  75. static inline long plpar_page_set_loaned(unsigned long vpa)
  76. {
  77. unsigned long cmo_page_sz = cmo_get_page_size();
  78. long rc = 0;
  79. int i;
  80. for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
  81. rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
  82. for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
  83. plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
  84. vpa + i - cmo_page_sz, 0);
  85. return rc;
  86. }
  87. static inline long plpar_page_set_active(unsigned long vpa)
  88. {
  89. unsigned long cmo_page_sz = cmo_get_page_size();
  90. long rc = 0;
  91. int i;
  92. for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
  93. rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
  94. for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
  95. plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
  96. vpa + i - cmo_page_sz, 0);
  97. return rc;
  98. }
  99. extern void vpa_init(int cpu);
  100. static inline long plpar_pte_enter(unsigned long flags,
  101. unsigned long hpte_group, unsigned long hpte_v,
  102. unsigned long hpte_r, unsigned long *slot)
  103. {
  104. long rc;
  105. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  106. rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
  107. *slot = retbuf[0];
  108. return rc;
  109. }
  110. static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
  111. unsigned long avpn, unsigned long *old_pteh_ret,
  112. unsigned long *old_ptel_ret)
  113. {
  114. long rc;
  115. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  116. rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
  117. *old_pteh_ret = retbuf[0];
  118. *old_ptel_ret = retbuf[1];
  119. return rc;
  120. }
  121. /* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
  122. static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
  123. unsigned long avpn, unsigned long *old_pteh_ret,
  124. unsigned long *old_ptel_ret)
  125. {
  126. long rc;
  127. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  128. rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
  129. *old_pteh_ret = retbuf[0];
  130. *old_ptel_ret = retbuf[1];
  131. return rc;
  132. }
  133. static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
  134. unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
  135. {
  136. long rc;
  137. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  138. rc = plpar_hcall(H_READ, retbuf, flags, ptex);
  139. *old_pteh_ret = retbuf[0];
  140. *old_ptel_ret = retbuf[1];
  141. return rc;
  142. }
  143. /* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
  144. static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
  145. unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
  146. {
  147. long rc;
  148. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  149. rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
  150. *old_pteh_ret = retbuf[0];
  151. *old_ptel_ret = retbuf[1];
  152. return rc;
  153. }
  154. /*
  155. * ptes must be 8*sizeof(unsigned long)
  156. */
  157. static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
  158. unsigned long *ptes)
  159. {
  160. long rc;
  161. unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
  162. rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
  163. memcpy(ptes, retbuf, 8*sizeof(unsigned long));
  164. return rc;
  165. }
  166. /*
  167. * plpar_pte_read_4_raw can be called in real mode.
  168. * ptes must be 8*sizeof(unsigned long)
  169. */
  170. static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
  171. unsigned long *ptes)
  172. {
  173. long rc;
  174. unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
  175. rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
  176. memcpy(ptes, retbuf, 8*sizeof(unsigned long));
  177. return rc;
  178. }
  179. static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
  180. unsigned long avpn)
  181. {
  182. return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
  183. }
  184. static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
  185. unsigned long *tce_ret)
  186. {
  187. long rc;
  188. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  189. rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
  190. *tce_ret = retbuf[0];
  191. return rc;
  192. }
  193. static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
  194. unsigned long tceval)
  195. {
  196. return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
  197. }
  198. static inline long plpar_tce_put_indirect(unsigned long liobn,
  199. unsigned long ioba, unsigned long page, unsigned long count)
  200. {
  201. return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
  202. }
  203. static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
  204. unsigned long tceval, unsigned long count)
  205. {
  206. return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
  207. }
  208. /* Set various resource mode parameters */
  209. static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
  210. unsigned long value1, unsigned long value2)
  211. {
  212. return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
  213. }
  214. /*
  215. * Enable relocation on exceptions on this partition
  216. *
  217. * Note: this call has a partition wide scope and can take a while to complete.
  218. * If it returns H_LONG_BUSY_* it should be retried periodically until it
  219. * returns H_SUCCESS.
  220. */
  221. static inline long enable_reloc_on_exceptions(void)
  222. {
  223. /* mflags = 3: Exceptions at 0xC000000000004000 */
  224. return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
  225. }
  226. /*
  227. * Disable relocation on exceptions on this partition
  228. *
  229. * Note: this call has a partition wide scope and can take a while to complete.
  230. * If it returns H_LONG_BUSY_* it should be retried periodically until it
  231. * returns H_SUCCESS.
  232. */
  233. static inline long disable_reloc_on_exceptions(void) {
  234. return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
  235. }
  236. /*
  237. * Take exceptions in big endian mode on this partition
  238. *
  239. * Note: this call has a partition wide scope and can take a while to complete.
  240. * If it returns H_LONG_BUSY_* it should be retried periodically until it
  241. * returns H_SUCCESS.
  242. */
  243. static inline long enable_big_endian_exceptions(void)
  244. {
  245. /* mflags = 0: big endian exceptions */
  246. return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
  247. }
  248. /*
  249. * Take exceptions in little endian mode on this partition
  250. *
  251. * Note: this call has a partition wide scope and can take a while to complete.
  252. * If it returns H_LONG_BUSY_* it should be retried periodically until it
  253. * returns H_SUCCESS.
  254. */
  255. static inline long enable_little_endian_exceptions(void)
  256. {
  257. /* mflags = 1: little endian exceptions */
  258. return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
  259. }
  260. static inline long plapr_set_ciabr(unsigned long ciabr)
  261. {
  262. return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
  263. }
  264. static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
  265. {
  266. return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
  267. }
  268. static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
  269. {
  270. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  271. long rc;
  272. rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
  273. if (rc == H_SUCCESS) {
  274. p->character = retbuf[0];
  275. p->behaviour = retbuf[1];
  276. }
  277. return rc;
  278. }
  279. #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */