chsc.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251
  1. /*
  2. * S/390 common I/O routines -- channel subsystem call
  3. *
  4. * Copyright IBM Corp. 1999,2012
  5. * Author(s): Ingo Adlung (adlung@de.ibm.com)
  6. * Cornelia Huck (cornelia.huck@de.ibm.com)
  7. * Arnd Bergmann (arndb@de.ibm.com)
  8. */
  9. #define KMSG_COMPONENT "cio"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <linux/pci.h>
  16. #include <asm/cio.h>
  17. #include <asm/chpid.h>
  18. #include <asm/chsc.h>
  19. #include <asm/crw.h>
  20. #include <asm/isc.h>
  21. #include "css.h"
  22. #include "cio.h"
  23. #include "cio_debug.h"
  24. #include "ioasm.h"
  25. #include "chp.h"
  26. #include "chsc.h"
  27. static void *sei_page;
  28. static void *chsc_page;
  29. static DEFINE_SPINLOCK(chsc_page_lock);
  30. /**
  31. * chsc_error_from_response() - convert a chsc response to an error
  32. * @response: chsc response code
  33. *
  34. * Returns an appropriate Linux error code for @response.
  35. */
  36. int chsc_error_from_response(int response)
  37. {
  38. switch (response) {
  39. case 0x0001:
  40. return 0;
  41. case 0x0002:
  42. case 0x0003:
  43. case 0x0006:
  44. case 0x0007:
  45. case 0x0008:
  46. case 0x000a:
  47. case 0x0104:
  48. return -EINVAL;
  49. case 0x0004:
  50. return -EOPNOTSUPP;
  51. case 0x000b:
  52. case 0x0107: /* "Channel busy" for the op 0x003d */
  53. return -EBUSY;
  54. case 0x0100:
  55. case 0x0102:
  56. return -ENOMEM;
  57. default:
  58. return -EIO;
  59. }
  60. }
  61. EXPORT_SYMBOL_GPL(chsc_error_from_response);
  62. struct chsc_ssd_area {
  63. struct chsc_header request;
  64. u16 :10;
  65. u16 ssid:2;
  66. u16 :4;
  67. u16 f_sch; /* first subchannel */
  68. u16 :16;
  69. u16 l_sch; /* last subchannel */
  70. u32 :32;
  71. struct chsc_header response;
  72. u32 :32;
  73. u8 sch_valid : 1;
  74. u8 dev_valid : 1;
  75. u8 st : 3; /* subchannel type */
  76. u8 zeroes : 3;
  77. u8 unit_addr; /* unit address */
  78. u16 devno; /* device number */
  79. u8 path_mask;
  80. u8 fla_valid_mask;
  81. u16 sch; /* subchannel */
  82. u8 chpid[8]; /* chpids 0-7 */
  83. u16 fla[8]; /* full link addresses 0-7 */
  84. } __attribute__ ((packed));
  85. int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
  86. {
  87. struct chsc_ssd_area *ssd_area;
  88. int ccode;
  89. int ret;
  90. int i;
  91. int mask;
  92. spin_lock_irq(&chsc_page_lock);
  93. memset(chsc_page, 0, PAGE_SIZE);
  94. ssd_area = chsc_page;
  95. ssd_area->request.length = 0x0010;
  96. ssd_area->request.code = 0x0004;
  97. ssd_area->ssid = schid.ssid;
  98. ssd_area->f_sch = schid.sch_no;
  99. ssd_area->l_sch = schid.sch_no;
  100. ccode = chsc(ssd_area);
  101. /* Check response. */
  102. if (ccode > 0) {
  103. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  104. goto out;
  105. }
  106. ret = chsc_error_from_response(ssd_area->response.code);
  107. if (ret != 0) {
  108. CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
  109. schid.ssid, schid.sch_no,
  110. ssd_area->response.code);
  111. goto out;
  112. }
  113. if (!ssd_area->sch_valid) {
  114. ret = -ENODEV;
  115. goto out;
  116. }
  117. /* Copy data */
  118. ret = 0;
  119. memset(ssd, 0, sizeof(struct chsc_ssd_info));
  120. if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
  121. (ssd_area->st != SUBCHANNEL_TYPE_MSG))
  122. goto out;
  123. ssd->path_mask = ssd_area->path_mask;
  124. ssd->fla_valid_mask = ssd_area->fla_valid_mask;
  125. for (i = 0; i < 8; i++) {
  126. mask = 0x80 >> i;
  127. if (ssd_area->path_mask & mask) {
  128. chp_id_init(&ssd->chpid[i]);
  129. ssd->chpid[i].id = ssd_area->chpid[i];
  130. }
  131. if (ssd_area->fla_valid_mask & mask)
  132. ssd->fla[i] = ssd_area->fla[i];
  133. }
  134. out:
  135. spin_unlock_irq(&chsc_page_lock);
  136. return ret;
  137. }
  138. /**
  139. * chsc_ssqd() - store subchannel QDIO data (SSQD)
  140. * @schid: id of the subchannel on which SSQD is performed
  141. * @ssqd: request and response block for SSQD
  142. *
  143. * Returns 0 on success.
  144. */
  145. int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
  146. {
  147. memset(ssqd, 0, sizeof(*ssqd));
  148. ssqd->request.length = 0x0010;
  149. ssqd->request.code = 0x0024;
  150. ssqd->first_sch = schid.sch_no;
  151. ssqd->last_sch = schid.sch_no;
  152. ssqd->ssid = schid.ssid;
  153. if (chsc(ssqd))
  154. return -EIO;
  155. return chsc_error_from_response(ssqd->response.code);
  156. }
  157. EXPORT_SYMBOL_GPL(chsc_ssqd);
  158. /**
  159. * chsc_sadc() - set adapter device controls (SADC)
  160. * @schid: id of the subchannel on which SADC is performed
  161. * @scssc: request and response block for SADC
  162. * @summary_indicator_addr: summary indicator address
  163. * @subchannel_indicator_addr: subchannel indicator address
  164. *
  165. * Returns 0 on success.
  166. */
  167. int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
  168. u64 summary_indicator_addr, u64 subchannel_indicator_addr)
  169. {
  170. memset(scssc, 0, sizeof(*scssc));
  171. scssc->request.length = 0x0fe0;
  172. scssc->request.code = 0x0021;
  173. scssc->operation_code = 0;
  174. scssc->summary_indicator_addr = summary_indicator_addr;
  175. scssc->subchannel_indicator_addr = subchannel_indicator_addr;
  176. scssc->ks = PAGE_DEFAULT_KEY >> 4;
  177. scssc->kc = PAGE_DEFAULT_KEY >> 4;
  178. scssc->isc = QDIO_AIRQ_ISC;
  179. scssc->schid = schid;
  180. /* enable the time delay disablement facility */
  181. if (css_general_characteristics.aif_tdd)
  182. scssc->word_with_d_bit = 0x10000000;
  183. if (chsc(scssc))
  184. return -EIO;
  185. return chsc_error_from_response(scssc->response.code);
  186. }
  187. EXPORT_SYMBOL_GPL(chsc_sadc);
  188. static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
  189. {
  190. spin_lock_irq(sch->lock);
  191. if (sch->driver && sch->driver->chp_event)
  192. if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
  193. goto out_unreg;
  194. spin_unlock_irq(sch->lock);
  195. return 0;
  196. out_unreg:
  197. sch->lpm = 0;
  198. spin_unlock_irq(sch->lock);
  199. css_schedule_eval(sch->schid);
  200. return 0;
  201. }
  202. void chsc_chp_offline(struct chp_id chpid)
  203. {
  204. char dbf_txt[15];
  205. struct chp_link link;
  206. sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
  207. CIO_TRACE_EVENT(2, dbf_txt);
  208. if (chp_get_status(chpid) <= 0)
  209. return;
  210. memset(&link, 0, sizeof(struct chp_link));
  211. link.chpid = chpid;
  212. /* Wait until previous actions have settled. */
  213. css_wait_for_slow_path();
  214. for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
  215. }
  216. static int __s390_process_res_acc(struct subchannel *sch, void *data)
  217. {
  218. spin_lock_irq(sch->lock);
  219. if (sch->driver && sch->driver->chp_event)
  220. sch->driver->chp_event(sch, data, CHP_ONLINE);
  221. spin_unlock_irq(sch->lock);
  222. return 0;
  223. }
  224. static void s390_process_res_acc(struct chp_link *link)
  225. {
  226. char dbf_txt[15];
  227. sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
  228. link->chpid.id);
  229. CIO_TRACE_EVENT( 2, dbf_txt);
  230. if (link->fla != 0) {
  231. sprintf(dbf_txt, "fla%x", link->fla);
  232. CIO_TRACE_EVENT( 2, dbf_txt);
  233. }
  234. /* Wait until previous actions have settled. */
  235. css_wait_for_slow_path();
  236. /*
  237. * I/O resources may have become accessible.
  238. * Scan through all subchannels that may be concerned and
  239. * do a validation on those.
  240. * The more information we have (info), the less scanning
  241. * will we have to do.
  242. */
  243. for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
  244. css_schedule_reprobe();
  245. }
  246. static int
  247. __get_chpid_from_lir(void *data)
  248. {
  249. struct lir {
  250. u8 iq;
  251. u8 ic;
  252. u16 sci;
  253. /* incident-node descriptor */
  254. u32 indesc[28];
  255. /* attached-node descriptor */
  256. u32 andesc[28];
  257. /* incident-specific information */
  258. u32 isinfo[28];
  259. } __attribute__ ((packed)) *lir;
  260. lir = data;
  261. if (!(lir->iq&0x80))
  262. /* NULL link incident record */
  263. return -EINVAL;
  264. if (!(lir->indesc[0]&0xc0000000))
  265. /* node descriptor not valid */
  266. return -EINVAL;
  267. if (!(lir->indesc[0]&0x10000000))
  268. /* don't handle device-type nodes - FIXME */
  269. return -EINVAL;
  270. /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
  271. return (u16) (lir->indesc[0]&0x000000ff);
  272. }
  273. struct chsc_sei_nt0_area {
  274. u8 flags;
  275. u8 vf; /* validity flags */
  276. u8 rs; /* reporting source */
  277. u8 cc; /* content code */
  278. u16 fla; /* full link address */
  279. u16 rsid; /* reporting source id */
  280. u32 reserved1;
  281. u32 reserved2;
  282. /* ccdf has to be big enough for a link-incident record */
  283. u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
  284. } __packed;
  285. struct chsc_sei_nt2_area {
  286. u8 flags; /* p and v bit */
  287. u8 reserved1;
  288. u8 reserved2;
  289. u8 cc; /* content code */
  290. u32 reserved3[13];
  291. u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
  292. } __packed;
  293. #define CHSC_SEI_NT0 (1ULL << 63)
  294. #define CHSC_SEI_NT2 (1ULL << 61)
  295. struct chsc_sei {
  296. struct chsc_header request;
  297. u32 reserved1;
  298. u64 ntsm; /* notification type mask */
  299. struct chsc_header response;
  300. u32 :24;
  301. u8 nt;
  302. union {
  303. struct chsc_sei_nt0_area nt0_area;
  304. struct chsc_sei_nt2_area nt2_area;
  305. u8 nt_area[PAGE_SIZE - 24];
  306. } u;
  307. } __packed;
  308. static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
  309. {
  310. struct chp_id chpid;
  311. int id;
  312. CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
  313. sei_area->rs, sei_area->rsid);
  314. if (sei_area->rs != 4)
  315. return;
  316. id = __get_chpid_from_lir(sei_area->ccdf);
  317. if (id < 0)
  318. CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
  319. else {
  320. chp_id_init(&chpid);
  321. chpid.id = id;
  322. chsc_chp_offline(chpid);
  323. }
  324. }
  325. static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
  326. {
  327. struct chp_link link;
  328. struct chp_id chpid;
  329. int status;
  330. CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
  331. "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
  332. if (sei_area->rs != 4)
  333. return;
  334. chp_id_init(&chpid);
  335. chpid.id = sei_area->rsid;
  336. /* allocate a new channel path structure, if needed */
  337. status = chp_get_status(chpid);
  338. if (status < 0)
  339. chp_new(chpid);
  340. else if (!status)
  341. return;
  342. memset(&link, 0, sizeof(struct chp_link));
  343. link.chpid = chpid;
  344. if ((sei_area->vf & 0xc0) != 0) {
  345. link.fla = sei_area->fla;
  346. if ((sei_area->vf & 0xc0) == 0xc0)
  347. /* full link address */
  348. link.fla_mask = 0xffff;
  349. else
  350. /* link address */
  351. link.fla_mask = 0xff00;
  352. }
  353. s390_process_res_acc(&link);
  354. }
  355. static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
  356. {
  357. struct channel_path *chp;
  358. struct chp_id chpid;
  359. u8 *data;
  360. int num;
  361. CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
  362. if (sei_area->rs != 0)
  363. return;
  364. data = sei_area->ccdf;
  365. chp_id_init(&chpid);
  366. for (num = 0; num <= __MAX_CHPID; num++) {
  367. if (!chp_test_bit(data, num))
  368. continue;
  369. chpid.id = num;
  370. CIO_CRW_EVENT(4, "Update information for channel path "
  371. "%x.%02x\n", chpid.cssid, chpid.id);
  372. chp = chpid_to_chp(chpid);
  373. if (!chp) {
  374. chp_new(chpid);
  375. continue;
  376. }
  377. mutex_lock(&chp->lock);
  378. chp_update_desc(chp);
  379. mutex_unlock(&chp->lock);
  380. }
  381. }
  382. struct chp_config_data {
  383. u8 map[32];
  384. u8 op;
  385. u8 pc;
  386. };
  387. static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
  388. {
  389. struct chp_config_data *data;
  390. struct chp_id chpid;
  391. int num;
  392. char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
  393. CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
  394. if (sei_area->rs != 0)
  395. return;
  396. data = (struct chp_config_data *) &(sei_area->ccdf);
  397. chp_id_init(&chpid);
  398. for (num = 0; num <= __MAX_CHPID; num++) {
  399. if (!chp_test_bit(data->map, num))
  400. continue;
  401. chpid.id = num;
  402. pr_notice("Processing %s for channel path %x.%02x\n",
  403. events[data->op], chpid.cssid, chpid.id);
  404. switch (data->op) {
  405. case 0:
  406. chp_cfg_schedule(chpid, 1);
  407. break;
  408. case 1:
  409. chp_cfg_schedule(chpid, 0);
  410. break;
  411. case 2:
  412. chp_cfg_cancel_deconfigure(chpid);
  413. break;
  414. }
  415. }
  416. }
  417. static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
  418. {
  419. int ret;
  420. CIO_CRW_EVENT(4, "chsc: scm change notification\n");
  421. if (sei_area->rs != 7)
  422. return;
  423. ret = scm_update_information();
  424. if (ret)
  425. CIO_CRW_EVENT(0, "chsc: updating change notification"
  426. " failed (rc=%d).\n", ret);
  427. }
  428. static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
  429. {
  430. int ret;
  431. CIO_CRW_EVENT(4, "chsc: scm available information\n");
  432. if (sei_area->rs != 7)
  433. return;
  434. ret = scm_process_availability_information();
  435. if (ret)
  436. CIO_CRW_EVENT(0, "chsc: process availability information"
  437. " failed (rc=%d).\n", ret);
  438. }
  439. static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
  440. {
  441. switch (sei_area->cc) {
  442. case 1:
  443. zpci_event_error(sei_area->ccdf);
  444. break;
  445. case 2:
  446. zpci_event_availability(sei_area->ccdf);
  447. break;
  448. default:
  449. CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
  450. sei_area->cc);
  451. break;
  452. }
  453. }
  454. static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
  455. {
  456. /* which kind of information was stored? */
  457. switch (sei_area->cc) {
  458. case 1: /* link incident*/
  459. chsc_process_sei_link_incident(sei_area);
  460. break;
  461. case 2: /* i/o resource accessibility */
  462. chsc_process_sei_res_acc(sei_area);
  463. break;
  464. case 7: /* channel-path-availability information */
  465. chsc_process_sei_chp_avail(sei_area);
  466. break;
  467. case 8: /* channel-path-configuration notification */
  468. chsc_process_sei_chp_config(sei_area);
  469. break;
  470. case 12: /* scm change notification */
  471. chsc_process_sei_scm_change(sei_area);
  472. break;
  473. case 14: /* scm available notification */
  474. chsc_process_sei_scm_avail(sei_area);
  475. break;
  476. default: /* other stuff */
  477. CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
  478. sei_area->cc);
  479. break;
  480. }
  481. /* Check if we might have lost some information. */
  482. if (sei_area->flags & 0x40) {
  483. CIO_CRW_EVENT(2, "chsc: event overflow\n");
  484. css_schedule_eval_all();
  485. }
  486. }
  487. static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
  488. {
  489. static int ntsm_unsupported;
  490. while (true) {
  491. memset(sei, 0, sizeof(*sei));
  492. sei->request.length = 0x0010;
  493. sei->request.code = 0x000e;
  494. if (!ntsm_unsupported)
  495. sei->ntsm = ntsm;
  496. if (chsc(sei))
  497. break;
  498. if (sei->response.code != 0x0001) {
  499. CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
  500. sei->response.code, sei->ntsm);
  501. if (sei->response.code == 3 && sei->ntsm) {
  502. /* Fallback for old firmware. */
  503. ntsm_unsupported = 1;
  504. continue;
  505. }
  506. break;
  507. }
  508. CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
  509. switch (sei->nt) {
  510. case 0:
  511. chsc_process_sei_nt0(&sei->u.nt0_area);
  512. break;
  513. case 2:
  514. chsc_process_sei_nt2(&sei->u.nt2_area);
  515. break;
  516. default:
  517. CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
  518. break;
  519. }
  520. if (!(sei->u.nt0_area.flags & 0x80))
  521. break;
  522. }
  523. }
  524. /*
  525. * Handle channel subsystem related CRWs.
  526. * Use store event information to find out what's going on.
  527. *
  528. * Note: Access to sei_page is serialized through machine check handler
  529. * thread, so no need for locking.
  530. */
  531. static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
  532. {
  533. struct chsc_sei *sei = sei_page;
  534. if (overflow) {
  535. css_schedule_eval_all();
  536. return;
  537. }
  538. CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
  539. "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
  540. crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
  541. crw0->erc, crw0->rsid);
  542. CIO_TRACE_EVENT(2, "prcss");
  543. chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
  544. }
  545. void chsc_chp_online(struct chp_id chpid)
  546. {
  547. char dbf_txt[15];
  548. struct chp_link link;
  549. sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
  550. CIO_TRACE_EVENT(2, dbf_txt);
  551. if (chp_get_status(chpid) != 0) {
  552. memset(&link, 0, sizeof(struct chp_link));
  553. link.chpid = chpid;
  554. /* Wait until previous actions have settled. */
  555. css_wait_for_slow_path();
  556. for_each_subchannel_staged(__s390_process_res_acc, NULL,
  557. &link);
  558. css_schedule_reprobe();
  559. }
  560. }
  561. static void __s390_subchannel_vary_chpid(struct subchannel *sch,
  562. struct chp_id chpid, int on)
  563. {
  564. unsigned long flags;
  565. struct chp_link link;
  566. memset(&link, 0, sizeof(struct chp_link));
  567. link.chpid = chpid;
  568. spin_lock_irqsave(sch->lock, flags);
  569. if (sch->driver && sch->driver->chp_event)
  570. sch->driver->chp_event(sch, &link,
  571. on ? CHP_VARY_ON : CHP_VARY_OFF);
  572. spin_unlock_irqrestore(sch->lock, flags);
  573. }
  574. static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
  575. {
  576. struct chp_id *chpid = data;
  577. __s390_subchannel_vary_chpid(sch, *chpid, 0);
  578. return 0;
  579. }
  580. static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
  581. {
  582. struct chp_id *chpid = data;
  583. __s390_subchannel_vary_chpid(sch, *chpid, 1);
  584. return 0;
  585. }
  586. /**
  587. * chsc_chp_vary - propagate channel-path vary operation to subchannels
  588. * @chpid: channl-path ID
  589. * @on: non-zero for vary online, zero for vary offline
  590. */
  591. int chsc_chp_vary(struct chp_id chpid, int on)
  592. {
  593. struct channel_path *chp = chpid_to_chp(chpid);
  594. /* Wait until previous actions have settled. */
  595. css_wait_for_slow_path();
  596. /*
  597. * Redo PathVerification on the devices the chpid connects to
  598. */
  599. if (on) {
  600. /* Try to update the channel path description. */
  601. chp_update_desc(chp);
  602. for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
  603. NULL, &chpid);
  604. css_schedule_reprobe();
  605. } else
  606. for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
  607. NULL, &chpid);
  608. return 0;
  609. }
  610. static void
  611. chsc_remove_cmg_attr(struct channel_subsystem *css)
  612. {
  613. int i;
  614. for (i = 0; i <= __MAX_CHPID; i++) {
  615. if (!css->chps[i])
  616. continue;
  617. chp_remove_cmg_attr(css->chps[i]);
  618. }
  619. }
  620. static int
  621. chsc_add_cmg_attr(struct channel_subsystem *css)
  622. {
  623. int i, ret;
  624. ret = 0;
  625. for (i = 0; i <= __MAX_CHPID; i++) {
  626. if (!css->chps[i])
  627. continue;
  628. ret = chp_add_cmg_attr(css->chps[i]);
  629. if (ret)
  630. goto cleanup;
  631. }
  632. return ret;
  633. cleanup:
  634. for (--i; i >= 0; i--) {
  635. if (!css->chps[i])
  636. continue;
  637. chp_remove_cmg_attr(css->chps[i]);
  638. }
  639. return ret;
  640. }
  641. int __chsc_do_secm(struct channel_subsystem *css, int enable)
  642. {
  643. struct {
  644. struct chsc_header request;
  645. u32 operation_code : 2;
  646. u32 : 30;
  647. u32 key : 4;
  648. u32 : 28;
  649. u32 zeroes1;
  650. u32 cub_addr1;
  651. u32 zeroes2;
  652. u32 cub_addr2;
  653. u32 reserved[13];
  654. struct chsc_header response;
  655. u32 status : 8;
  656. u32 : 4;
  657. u32 fmt : 4;
  658. u32 : 16;
  659. } __attribute__ ((packed)) *secm_area;
  660. int ret, ccode;
  661. spin_lock_irq(&chsc_page_lock);
  662. memset(chsc_page, 0, PAGE_SIZE);
  663. secm_area = chsc_page;
  664. secm_area->request.length = 0x0050;
  665. secm_area->request.code = 0x0016;
  666. secm_area->key = PAGE_DEFAULT_KEY >> 4;
  667. secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
  668. secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
  669. secm_area->operation_code = enable ? 0 : 1;
  670. ccode = chsc(secm_area);
  671. if (ccode > 0) {
  672. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  673. goto out;
  674. }
  675. switch (secm_area->response.code) {
  676. case 0x0102:
  677. case 0x0103:
  678. ret = -EINVAL;
  679. break;
  680. default:
  681. ret = chsc_error_from_response(secm_area->response.code);
  682. }
  683. if (ret != 0)
  684. CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
  685. secm_area->response.code);
  686. out:
  687. spin_unlock_irq(&chsc_page_lock);
  688. return ret;
  689. }
  690. int
  691. chsc_secm(struct channel_subsystem *css, int enable)
  692. {
  693. int ret;
  694. if (enable && !css->cm_enabled) {
  695. css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  696. css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  697. if (!css->cub_addr1 || !css->cub_addr2) {
  698. free_page((unsigned long)css->cub_addr1);
  699. free_page((unsigned long)css->cub_addr2);
  700. return -ENOMEM;
  701. }
  702. }
  703. ret = __chsc_do_secm(css, enable);
  704. if (!ret) {
  705. css->cm_enabled = enable;
  706. if (css->cm_enabled) {
  707. ret = chsc_add_cmg_attr(css);
  708. if (ret) {
  709. __chsc_do_secm(css, 0);
  710. css->cm_enabled = 0;
  711. }
  712. } else
  713. chsc_remove_cmg_attr(css);
  714. }
  715. if (!css->cm_enabled) {
  716. free_page((unsigned long)css->cub_addr1);
  717. free_page((unsigned long)css->cub_addr2);
  718. }
  719. return ret;
  720. }
  721. int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
  722. int c, int m, void *page)
  723. {
  724. struct chsc_scpd *scpd_area;
  725. int ccode, ret;
  726. if ((rfmt == 1) && !css_general_characteristics.fcs)
  727. return -EINVAL;
  728. if ((rfmt == 2) && !css_general_characteristics.cib)
  729. return -EINVAL;
  730. memset(page, 0, PAGE_SIZE);
  731. scpd_area = page;
  732. scpd_area->request.length = 0x0010;
  733. scpd_area->request.code = 0x0002;
  734. scpd_area->cssid = chpid.cssid;
  735. scpd_area->first_chpid = chpid.id;
  736. scpd_area->last_chpid = chpid.id;
  737. scpd_area->m = m;
  738. scpd_area->c = c;
  739. scpd_area->fmt = fmt;
  740. scpd_area->rfmt = rfmt;
  741. ccode = chsc(scpd_area);
  742. if (ccode > 0)
  743. return (ccode == 3) ? -ENODEV : -EBUSY;
  744. ret = chsc_error_from_response(scpd_area->response.code);
  745. if (ret)
  746. CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
  747. scpd_area->response.code);
  748. return ret;
  749. }
  750. EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
  751. int chsc_determine_base_channel_path_desc(struct chp_id chpid,
  752. struct channel_path_desc *desc)
  753. {
  754. struct chsc_response_struct *chsc_resp;
  755. struct chsc_scpd *scpd_area;
  756. unsigned long flags;
  757. int ret;
  758. spin_lock_irqsave(&chsc_page_lock, flags);
  759. scpd_area = chsc_page;
  760. ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
  761. if (ret)
  762. goto out;
  763. chsc_resp = (void *)&scpd_area->response;
  764. memcpy(desc, &chsc_resp->data, sizeof(*desc));
  765. out:
  766. spin_unlock_irqrestore(&chsc_page_lock, flags);
  767. return ret;
  768. }
  769. int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
  770. struct channel_path_desc_fmt1 *desc)
  771. {
  772. struct chsc_response_struct *chsc_resp;
  773. struct chsc_scpd *scpd_area;
  774. unsigned long flags;
  775. int ret;
  776. spin_lock_irqsave(&chsc_page_lock, flags);
  777. scpd_area = chsc_page;
  778. ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
  779. if (ret)
  780. goto out;
  781. chsc_resp = (void *)&scpd_area->response;
  782. memcpy(desc, &chsc_resp->data, sizeof(*desc));
  783. out:
  784. spin_unlock_irqrestore(&chsc_page_lock, flags);
  785. return ret;
  786. }
  787. static void
  788. chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
  789. struct cmg_chars *chars)
  790. {
  791. struct cmg_chars *cmg_chars;
  792. int i, mask;
  793. cmg_chars = chp->cmg_chars;
  794. for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
  795. mask = 0x80 >> (i + 3);
  796. if (cmcv & mask)
  797. cmg_chars->values[i] = chars->values[i];
  798. else
  799. cmg_chars->values[i] = 0;
  800. }
  801. }
  802. int chsc_get_channel_measurement_chars(struct channel_path *chp)
  803. {
  804. struct cmg_chars *cmg_chars;
  805. int ccode, ret;
  806. struct {
  807. struct chsc_header request;
  808. u32 : 24;
  809. u32 first_chpid : 8;
  810. u32 : 24;
  811. u32 last_chpid : 8;
  812. u32 zeroes1;
  813. struct chsc_header response;
  814. u32 zeroes2;
  815. u32 not_valid : 1;
  816. u32 shared : 1;
  817. u32 : 22;
  818. u32 chpid : 8;
  819. u32 cmcv : 5;
  820. u32 : 11;
  821. u32 cmgq : 8;
  822. u32 cmg : 8;
  823. u32 zeroes3;
  824. u32 data[NR_MEASUREMENT_CHARS];
  825. } __attribute__ ((packed)) *scmc_area;
  826. chp->cmg_chars = NULL;
  827. cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
  828. if (!cmg_chars)
  829. return -ENOMEM;
  830. spin_lock_irq(&chsc_page_lock);
  831. memset(chsc_page, 0, PAGE_SIZE);
  832. scmc_area = chsc_page;
  833. scmc_area->request.length = 0x0010;
  834. scmc_area->request.code = 0x0022;
  835. scmc_area->first_chpid = chp->chpid.id;
  836. scmc_area->last_chpid = chp->chpid.id;
  837. ccode = chsc(scmc_area);
  838. if (ccode > 0) {
  839. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  840. goto out;
  841. }
  842. ret = chsc_error_from_response(scmc_area->response.code);
  843. if (ret) {
  844. CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
  845. scmc_area->response.code);
  846. goto out;
  847. }
  848. if (scmc_area->not_valid) {
  849. chp->cmg = -1;
  850. chp->shared = -1;
  851. goto out;
  852. }
  853. chp->cmg = scmc_area->cmg;
  854. chp->shared = scmc_area->shared;
  855. if (chp->cmg != 2 && chp->cmg != 3) {
  856. /* No cmg-dependent data. */
  857. goto out;
  858. }
  859. chp->cmg_chars = cmg_chars;
  860. chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
  861. (struct cmg_chars *) &scmc_area->data);
  862. out:
  863. spin_unlock_irq(&chsc_page_lock);
  864. if (!chp->cmg_chars)
  865. kfree(cmg_chars);
  866. return ret;
  867. }
  868. int __init chsc_init(void)
  869. {
  870. int ret;
  871. sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  872. chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  873. if (!sei_page || !chsc_page) {
  874. ret = -ENOMEM;
  875. goto out_err;
  876. }
  877. ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
  878. if (ret)
  879. goto out_err;
  880. return ret;
  881. out_err:
  882. free_page((unsigned long)chsc_page);
  883. free_page((unsigned long)sei_page);
  884. return ret;
  885. }
  886. void __init chsc_init_cleanup(void)
  887. {
  888. crw_unregister_handler(CRW_RSC_CSS);
  889. free_page((unsigned long)chsc_page);
  890. free_page((unsigned long)sei_page);
  891. }
  892. int chsc_enable_facility(int operation_code)
  893. {
  894. unsigned long flags;
  895. int ret;
  896. struct {
  897. struct chsc_header request;
  898. u8 reserved1:4;
  899. u8 format:4;
  900. u8 reserved2;
  901. u16 operation_code;
  902. u32 reserved3;
  903. u32 reserved4;
  904. u32 operation_data_area[252];
  905. struct chsc_header response;
  906. u32 reserved5:4;
  907. u32 format2:4;
  908. u32 reserved6:24;
  909. } __attribute__ ((packed)) *sda_area;
  910. spin_lock_irqsave(&chsc_page_lock, flags);
  911. memset(chsc_page, 0, PAGE_SIZE);
  912. sda_area = chsc_page;
  913. sda_area->request.length = 0x0400;
  914. sda_area->request.code = 0x0031;
  915. sda_area->operation_code = operation_code;
  916. ret = chsc(sda_area);
  917. if (ret > 0) {
  918. ret = (ret == 3) ? -ENODEV : -EBUSY;
  919. goto out;
  920. }
  921. switch (sda_area->response.code) {
  922. case 0x0101:
  923. ret = -EOPNOTSUPP;
  924. break;
  925. default:
  926. ret = chsc_error_from_response(sda_area->response.code);
  927. }
  928. if (ret != 0)
  929. CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
  930. operation_code, sda_area->response.code);
  931. out:
  932. spin_unlock_irqrestore(&chsc_page_lock, flags);
  933. return ret;
  934. }
  935. struct css_general_char css_general_characteristics;
  936. struct css_chsc_char css_chsc_characteristics;
  937. int __init
  938. chsc_determine_css_characteristics(void)
  939. {
  940. int result;
  941. struct {
  942. struct chsc_header request;
  943. u32 reserved1;
  944. u32 reserved2;
  945. u32 reserved3;
  946. struct chsc_header response;
  947. u32 reserved4;
  948. u32 general_char[510];
  949. u32 chsc_char[508];
  950. } __attribute__ ((packed)) *scsc_area;
  951. spin_lock_irq(&chsc_page_lock);
  952. memset(chsc_page, 0, PAGE_SIZE);
  953. scsc_area = chsc_page;
  954. scsc_area->request.length = 0x0010;
  955. scsc_area->request.code = 0x0010;
  956. result = chsc(scsc_area);
  957. if (result) {
  958. result = (result == 3) ? -ENODEV : -EBUSY;
  959. goto exit;
  960. }
  961. result = chsc_error_from_response(scsc_area->response.code);
  962. if (result == 0) {
  963. memcpy(&css_general_characteristics, scsc_area->general_char,
  964. sizeof(css_general_characteristics));
  965. memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
  966. sizeof(css_chsc_characteristics));
  967. } else
  968. CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
  969. scsc_area->response.code);
  970. exit:
  971. spin_unlock_irq(&chsc_page_lock);
  972. return result;
  973. }
  974. EXPORT_SYMBOL_GPL(css_general_characteristics);
  975. EXPORT_SYMBOL_GPL(css_chsc_characteristics);
  976. int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
  977. {
  978. struct {
  979. struct chsc_header request;
  980. unsigned int rsvd0;
  981. unsigned int op : 8;
  982. unsigned int rsvd1 : 8;
  983. unsigned int ctrl : 16;
  984. unsigned int rsvd2[5];
  985. struct chsc_header response;
  986. unsigned int rsvd3[7];
  987. } __attribute__ ((packed)) *rr;
  988. int rc;
  989. memset(page, 0, PAGE_SIZE);
  990. rr = page;
  991. rr->request.length = 0x0020;
  992. rr->request.code = 0x0033;
  993. rr->op = op;
  994. rr->ctrl = ctrl;
  995. rc = chsc(rr);
  996. if (rc)
  997. return -EIO;
  998. rc = (rr->response.code == 0x0001) ? 0 : -EIO;
  999. return rc;
  1000. }
  1001. int chsc_sstpi(void *page, void *result, size_t size)
  1002. {
  1003. struct {
  1004. struct chsc_header request;
  1005. unsigned int rsvd0[3];
  1006. struct chsc_header response;
  1007. char data[size];
  1008. } __attribute__ ((packed)) *rr;
  1009. int rc;
  1010. memset(page, 0, PAGE_SIZE);
  1011. rr = page;
  1012. rr->request.length = 0x0010;
  1013. rr->request.code = 0x0038;
  1014. rc = chsc(rr);
  1015. if (rc)
  1016. return -EIO;
  1017. memcpy(result, &rr->data, size);
  1018. return (rr->response.code == 0x0001) ? 0 : -EIO;
  1019. }
  1020. int chsc_siosl(struct subchannel_id schid)
  1021. {
  1022. struct {
  1023. struct chsc_header request;
  1024. u32 word1;
  1025. struct subchannel_id sid;
  1026. u32 word3;
  1027. struct chsc_header response;
  1028. u32 word[11];
  1029. } __attribute__ ((packed)) *siosl_area;
  1030. unsigned long flags;
  1031. int ccode;
  1032. int rc;
  1033. spin_lock_irqsave(&chsc_page_lock, flags);
  1034. memset(chsc_page, 0, PAGE_SIZE);
  1035. siosl_area = chsc_page;
  1036. siosl_area->request.length = 0x0010;
  1037. siosl_area->request.code = 0x0046;
  1038. siosl_area->word1 = 0x80000000;
  1039. siosl_area->sid = schid;
  1040. ccode = chsc(siosl_area);
  1041. if (ccode > 0) {
  1042. if (ccode == 3)
  1043. rc = -ENODEV;
  1044. else
  1045. rc = -EBUSY;
  1046. CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
  1047. schid.ssid, schid.sch_no, ccode);
  1048. goto out;
  1049. }
  1050. rc = chsc_error_from_response(siosl_area->response.code);
  1051. if (rc)
  1052. CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
  1053. schid.ssid, schid.sch_no,
  1054. siosl_area->response.code);
  1055. else
  1056. CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
  1057. schid.ssid, schid.sch_no);
  1058. out:
  1059. spin_unlock_irqrestore(&chsc_page_lock, flags);
  1060. return rc;
  1061. }
  1062. EXPORT_SYMBOL_GPL(chsc_siosl);
  1063. /**
  1064. * chsc_scm_info() - store SCM information (SSI)
  1065. * @scm_area: request and response block for SSI
  1066. * @token: continuation token
  1067. *
  1068. * Returns 0 on success.
  1069. */
  1070. int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
  1071. {
  1072. int ccode, ret;
  1073. memset(scm_area, 0, sizeof(*scm_area));
  1074. scm_area->request.length = 0x0020;
  1075. scm_area->request.code = 0x004C;
  1076. scm_area->reqtok = token;
  1077. ccode = chsc(scm_area);
  1078. if (ccode > 0) {
  1079. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  1080. goto out;
  1081. }
  1082. ret = chsc_error_from_response(scm_area->response.code);
  1083. if (ret != 0)
  1084. CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
  1085. scm_area->response.code);
  1086. out:
  1087. return ret;
  1088. }
  1089. EXPORT_SYMBOL_GPL(chsc_scm_info);
  1090. /**
  1091. * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
  1092. * @schid: id of the subchannel on which PNSO is performed
  1093. * @brinfo_area: request and response block for the operation
  1094. * @resume_token: resume token for multiblock response
  1095. * @cnc: Boolean change-notification control
  1096. *
  1097. * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
  1098. *
  1099. * Returns 0 on success.
  1100. */
  1101. int chsc_pnso_brinfo(struct subchannel_id schid,
  1102. struct chsc_pnso_area *brinfo_area,
  1103. struct chsc_brinfo_resume_token resume_token,
  1104. int cnc)
  1105. {
  1106. memset(brinfo_area, 0, sizeof(*brinfo_area));
  1107. brinfo_area->request.length = 0x0030;
  1108. brinfo_area->request.code = 0x003d; /* network-subchannel operation */
  1109. brinfo_area->m = schid.m;
  1110. brinfo_area->ssid = schid.ssid;
  1111. brinfo_area->sch = schid.sch_no;
  1112. brinfo_area->cssid = schid.cssid;
  1113. brinfo_area->oc = 0; /* Store-network-bridging-information list */
  1114. brinfo_area->resume_token = resume_token;
  1115. brinfo_area->n = (cnc != 0);
  1116. if (chsc(brinfo_area))
  1117. return -EIO;
  1118. return chsc_error_from_response(brinfo_area->response.code);
  1119. }
  1120. EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);