sigp.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. /*
  2. * handling interprocessor communication
  3. *
  4. * Copyright IBM Corp. 2008, 2013
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. * Christian Ehrhardt <ehrhardt@de.ibm.com>
  13. */
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/slab.h>
  17. #include <asm/sigp.h>
  18. #include "gaccess.h"
  19. #include "kvm-s390.h"
  20. #include "trace.h"
  21. static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
  22. u64 *reg)
  23. {
  24. struct kvm_s390_local_interrupt *li;
  25. int cpuflags;
  26. int rc;
  27. int ext_call_pending;
  28. li = &dst_vcpu->arch.local_int;
  29. cpuflags = atomic_read(li->cpuflags);
  30. ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
  31. if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
  32. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  33. else {
  34. *reg &= 0xffffffff00000000UL;
  35. if (ext_call_pending)
  36. *reg |= SIGP_STATUS_EXT_CALL_PENDING;
  37. if (cpuflags & CPUSTAT_STOPPED)
  38. *reg |= SIGP_STATUS_STOPPED;
  39. rc = SIGP_CC_STATUS_STORED;
  40. }
  41. VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
  42. rc);
  43. return rc;
  44. }
  45. static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
  46. struct kvm_vcpu *dst_vcpu)
  47. {
  48. struct kvm_s390_irq irq = {
  49. .type = KVM_S390_INT_EMERGENCY,
  50. .u.emerg.code = vcpu->vcpu_id,
  51. };
  52. int rc = 0;
  53. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  54. if (!rc)
  55. VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
  56. dst_vcpu->vcpu_id);
  57. return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
  58. }
  59. static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
  60. {
  61. return __inject_sigp_emergency(vcpu, dst_vcpu);
  62. }
  63. static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
  64. struct kvm_vcpu *dst_vcpu,
  65. u16 asn, u64 *reg)
  66. {
  67. const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
  68. u16 p_asn, s_asn;
  69. psw_t *psw;
  70. bool idle;
  71. idle = is_vcpu_idle(vcpu);
  72. psw = &dst_vcpu->arch.sie_block->gpsw;
  73. p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
  74. s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
  75. /* Inject the emergency signal? */
  76. if (!is_vcpu_stopped(vcpu)
  77. || (psw->mask & psw_int_mask) != psw_int_mask
  78. || (idle && psw->addr != 0)
  79. || (!idle && (asn == p_asn || asn == s_asn))) {
  80. return __inject_sigp_emergency(vcpu, dst_vcpu);
  81. } else {
  82. *reg &= 0xffffffff00000000UL;
  83. *reg |= SIGP_STATUS_INCORRECT_STATE;
  84. return SIGP_CC_STATUS_STORED;
  85. }
  86. }
  87. static int __sigp_external_call(struct kvm_vcpu *vcpu,
  88. struct kvm_vcpu *dst_vcpu, u64 *reg)
  89. {
  90. struct kvm_s390_irq irq = {
  91. .type = KVM_S390_INT_EXTERNAL_CALL,
  92. .u.extcall.code = vcpu->vcpu_id,
  93. };
  94. int rc;
  95. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  96. if (rc == -EBUSY) {
  97. *reg &= 0xffffffff00000000UL;
  98. *reg |= SIGP_STATUS_EXT_CALL_PENDING;
  99. return SIGP_CC_STATUS_STORED;
  100. } else if (rc == 0) {
  101. VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
  102. dst_vcpu->vcpu_id);
  103. }
  104. return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
  105. }
  106. static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
  107. {
  108. struct kvm_s390_irq irq = {
  109. .type = KVM_S390_SIGP_STOP,
  110. };
  111. int rc;
  112. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  113. if (rc == -EBUSY)
  114. rc = SIGP_CC_BUSY;
  115. else if (rc == 0)
  116. VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
  117. dst_vcpu->vcpu_id);
  118. return rc;
  119. }
  120. static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
  121. struct kvm_vcpu *dst_vcpu, u64 *reg)
  122. {
  123. struct kvm_s390_irq irq = {
  124. .type = KVM_S390_SIGP_STOP,
  125. .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
  126. };
  127. int rc;
  128. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  129. if (rc == -EBUSY)
  130. rc = SIGP_CC_BUSY;
  131. else if (rc == 0)
  132. VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
  133. dst_vcpu->vcpu_id);
  134. return rc;
  135. }
  136. static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter,
  137. u64 *status_reg)
  138. {
  139. unsigned int i;
  140. struct kvm_vcpu *v;
  141. bool all_stopped = true;
  142. kvm_for_each_vcpu(i, v, vcpu->kvm) {
  143. if (v == vcpu)
  144. continue;
  145. if (!is_vcpu_stopped(v))
  146. all_stopped = false;
  147. }
  148. *status_reg &= 0xffffffff00000000UL;
  149. /* Reject set arch order, with czam we're always in z/Arch mode. */
  150. *status_reg |= (all_stopped ? SIGP_STATUS_INVALID_PARAMETER :
  151. SIGP_STATUS_INCORRECT_STATE);
  152. return SIGP_CC_STATUS_STORED;
  153. }
  154. static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
  155. u32 address, u64 *reg)
  156. {
  157. struct kvm_s390_irq irq = {
  158. .type = KVM_S390_SIGP_SET_PREFIX,
  159. .u.prefix.address = address & 0x7fffe000u,
  160. };
  161. int rc;
  162. /*
  163. * Make sure the new value is valid memory. We only need to check the
  164. * first page, since address is 8k aligned and memory pieces are always
  165. * at least 1MB aligned and have at least a size of 1MB.
  166. */
  167. if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
  168. *reg &= 0xffffffff00000000UL;
  169. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  170. return SIGP_CC_STATUS_STORED;
  171. }
  172. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  173. if (rc == -EBUSY) {
  174. *reg &= 0xffffffff00000000UL;
  175. *reg |= SIGP_STATUS_INCORRECT_STATE;
  176. return SIGP_CC_STATUS_STORED;
  177. }
  178. return rc;
  179. }
  180. static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
  181. struct kvm_vcpu *dst_vcpu,
  182. u32 addr, u64 *reg)
  183. {
  184. int flags;
  185. int rc;
  186. flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
  187. if (!(flags & CPUSTAT_STOPPED)) {
  188. *reg &= 0xffffffff00000000UL;
  189. *reg |= SIGP_STATUS_INCORRECT_STATE;
  190. return SIGP_CC_STATUS_STORED;
  191. }
  192. addr &= 0x7ffffe00;
  193. rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
  194. if (rc == -EFAULT) {
  195. *reg &= 0xffffffff00000000UL;
  196. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  197. rc = SIGP_CC_STATUS_STORED;
  198. }
  199. return rc;
  200. }
  201. static int __sigp_sense_running(struct kvm_vcpu *vcpu,
  202. struct kvm_vcpu *dst_vcpu, u64 *reg)
  203. {
  204. struct kvm_s390_local_interrupt *li;
  205. int rc;
  206. if (!test_kvm_facility(vcpu->kvm, 9)) {
  207. *reg &= 0xffffffff00000000UL;
  208. *reg |= SIGP_STATUS_INVALID_ORDER;
  209. return SIGP_CC_STATUS_STORED;
  210. }
  211. li = &dst_vcpu->arch.local_int;
  212. if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
  213. /* running */
  214. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  215. } else {
  216. /* not running */
  217. *reg &= 0xffffffff00000000UL;
  218. *reg |= SIGP_STATUS_NOT_RUNNING;
  219. rc = SIGP_CC_STATUS_STORED;
  220. }
  221. VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
  222. dst_vcpu->vcpu_id, rc);
  223. return rc;
  224. }
  225. static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
  226. struct kvm_vcpu *dst_vcpu, u8 order_code)
  227. {
  228. struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
  229. /* handle (RE)START in user space */
  230. int rc = -EOPNOTSUPP;
  231. /* make sure we don't race with STOP irq injection */
  232. spin_lock(&li->lock);
  233. if (kvm_s390_is_stop_irq_pending(dst_vcpu))
  234. rc = SIGP_CC_BUSY;
  235. spin_unlock(&li->lock);
  236. return rc;
  237. }
  238. static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
  239. struct kvm_vcpu *dst_vcpu, u8 order_code)
  240. {
  241. /* handle (INITIAL) CPU RESET in user space */
  242. return -EOPNOTSUPP;
  243. }
  244. static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
  245. struct kvm_vcpu *dst_vcpu)
  246. {
  247. /* handle unknown orders in user space */
  248. return -EOPNOTSUPP;
  249. }
  250. static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
  251. u16 cpu_addr, u32 parameter, u64 *status_reg)
  252. {
  253. int rc;
  254. struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
  255. if (!dst_vcpu)
  256. return SIGP_CC_NOT_OPERATIONAL;
  257. switch (order_code) {
  258. case SIGP_SENSE:
  259. vcpu->stat.instruction_sigp_sense++;
  260. rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
  261. break;
  262. case SIGP_EXTERNAL_CALL:
  263. vcpu->stat.instruction_sigp_external_call++;
  264. rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
  265. break;
  266. case SIGP_EMERGENCY_SIGNAL:
  267. vcpu->stat.instruction_sigp_emergency++;
  268. rc = __sigp_emergency(vcpu, dst_vcpu);
  269. break;
  270. case SIGP_STOP:
  271. vcpu->stat.instruction_sigp_stop++;
  272. rc = __sigp_stop(vcpu, dst_vcpu);
  273. break;
  274. case SIGP_STOP_AND_STORE_STATUS:
  275. vcpu->stat.instruction_sigp_stop_store_status++;
  276. rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
  277. break;
  278. case SIGP_STORE_STATUS_AT_ADDRESS:
  279. vcpu->stat.instruction_sigp_store_status++;
  280. rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
  281. status_reg);
  282. break;
  283. case SIGP_SET_PREFIX:
  284. vcpu->stat.instruction_sigp_prefix++;
  285. rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
  286. break;
  287. case SIGP_COND_EMERGENCY_SIGNAL:
  288. vcpu->stat.instruction_sigp_cond_emergency++;
  289. rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
  290. status_reg);
  291. break;
  292. case SIGP_SENSE_RUNNING:
  293. vcpu->stat.instruction_sigp_sense_running++;
  294. rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
  295. break;
  296. case SIGP_START:
  297. vcpu->stat.instruction_sigp_start++;
  298. rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
  299. break;
  300. case SIGP_RESTART:
  301. vcpu->stat.instruction_sigp_restart++;
  302. rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
  303. break;
  304. case SIGP_INITIAL_CPU_RESET:
  305. vcpu->stat.instruction_sigp_init_cpu_reset++;
  306. rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
  307. break;
  308. case SIGP_CPU_RESET:
  309. vcpu->stat.instruction_sigp_cpu_reset++;
  310. rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
  311. break;
  312. default:
  313. vcpu->stat.instruction_sigp_unknown++;
  314. rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
  315. }
  316. if (rc == -EOPNOTSUPP)
  317. VCPU_EVENT(vcpu, 4,
  318. "sigp order %u -> cpu %x: handled in user space",
  319. order_code, dst_vcpu->vcpu_id);
  320. return rc;
  321. }
  322. static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
  323. u16 cpu_addr)
  324. {
  325. if (!vcpu->kvm->arch.user_sigp)
  326. return 0;
  327. switch (order_code) {
  328. case SIGP_SENSE:
  329. case SIGP_EXTERNAL_CALL:
  330. case SIGP_EMERGENCY_SIGNAL:
  331. case SIGP_COND_EMERGENCY_SIGNAL:
  332. case SIGP_SENSE_RUNNING:
  333. return 0;
  334. /* update counters as we're directly dropping to user space */
  335. case SIGP_STOP:
  336. vcpu->stat.instruction_sigp_stop++;
  337. break;
  338. case SIGP_STOP_AND_STORE_STATUS:
  339. vcpu->stat.instruction_sigp_stop_store_status++;
  340. break;
  341. case SIGP_STORE_STATUS_AT_ADDRESS:
  342. vcpu->stat.instruction_sigp_store_status++;
  343. break;
  344. case SIGP_STORE_ADDITIONAL_STATUS:
  345. vcpu->stat.instruction_sigp_store_adtl_status++;
  346. break;
  347. case SIGP_SET_PREFIX:
  348. vcpu->stat.instruction_sigp_prefix++;
  349. break;
  350. case SIGP_START:
  351. vcpu->stat.instruction_sigp_start++;
  352. break;
  353. case SIGP_RESTART:
  354. vcpu->stat.instruction_sigp_restart++;
  355. break;
  356. case SIGP_INITIAL_CPU_RESET:
  357. vcpu->stat.instruction_sigp_init_cpu_reset++;
  358. break;
  359. case SIGP_CPU_RESET:
  360. vcpu->stat.instruction_sigp_cpu_reset++;
  361. break;
  362. default:
  363. vcpu->stat.instruction_sigp_unknown++;
  364. }
  365. VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
  366. order_code, cpu_addr);
  367. return 1;
  368. }
  369. int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
  370. {
  371. int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  372. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  373. u32 parameter;
  374. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  375. u8 order_code;
  376. int rc;
  377. /* sigp in userspace can exit */
  378. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  379. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  380. order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
  381. if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
  382. return -EOPNOTSUPP;
  383. if (r1 % 2)
  384. parameter = vcpu->run->s.regs.gprs[r1];
  385. else
  386. parameter = vcpu->run->s.regs.gprs[r1 + 1];
  387. trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
  388. switch (order_code) {
  389. case SIGP_SET_ARCHITECTURE:
  390. vcpu->stat.instruction_sigp_arch++;
  391. rc = __sigp_set_arch(vcpu, parameter,
  392. &vcpu->run->s.regs.gprs[r1]);
  393. break;
  394. default:
  395. rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
  396. parameter,
  397. &vcpu->run->s.regs.gprs[r1]);
  398. }
  399. if (rc < 0)
  400. return rc;
  401. kvm_s390_set_psw_cc(vcpu, rc);
  402. return 0;
  403. }
  404. /*
  405. * Handle SIGP partial execution interception.
  406. *
  407. * This interception will occur at the source cpu when a source cpu sends an
  408. * external call to a target cpu and the target cpu has the WAIT bit set in
  409. * its cpuflags. Interception will occurr after the interrupt indicator bits at
  410. * the target cpu have been set. All error cases will lead to instruction
  411. * interception, therefore nothing is to be checked or prepared.
  412. */
  413. int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
  414. {
  415. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  416. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  417. struct kvm_vcpu *dest_vcpu;
  418. u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
  419. trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
  420. if (order_code == SIGP_EXTERNAL_CALL) {
  421. dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
  422. BUG_ON(dest_vcpu == NULL);
  423. kvm_s390_vcpu_wakeup(dest_vcpu);
  424. kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
  425. return 0;
  426. }
  427. return -EOPNOTSUPP;
  428. }