esas2r_disc.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185
  1. /*
  2. * linux/drivers/scsi/esas2r/esas2r_disc.c
  3. * esas2r device discovery routines
  4. *
  5. * Copyright (c) 2001-2013 ATTO Technology, Inc.
  6. * (mailto:linuxdrivers@attotech.com)
  7. */
  8. /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  9. /*
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; version 2 of the License.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * NO WARRANTY
  20. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24. * solely responsible for determining the appropriateness of using and
  25. * distributing the Program and assumes all risks associated with its
  26. * exercise of rights under this Agreement, including but not limited to
  27. * the risks and costs of program errors, damage to or loss of data,
  28. * programs or equipment, and unavailability or interruption of operations.
  29. *
  30. * DISCLAIMER OF LIABILITY
  31. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38. *
  39. * You should have received a copy of the GNU General Public License
  40. * along with this program; if not, write to the Free Software
  41. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  42. */
  43. /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  44. #include "esas2r.h"
  45. /* Miscellaneous internal discovery routines */
  46. static void esas2r_disc_abort(struct esas2r_adapter *a,
  47. struct esas2r_request *rq);
  48. static bool esas2r_disc_continue(struct esas2r_adapter *a,
  49. struct esas2r_request *rq);
  50. static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
  51. static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
  52. static bool esas2r_disc_start_request(struct esas2r_adapter *a,
  53. struct esas2r_request *rq);
  54. /* Internal discovery routines that process the states */
  55. static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
  56. struct esas2r_request *rq);
  57. static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
  58. struct esas2r_request *rq);
  59. static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
  60. struct esas2r_request *rq);
  61. static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
  62. struct esas2r_request *rq);
  63. static bool esas2r_disc_part_info(struct esas2r_adapter *a,
  64. struct esas2r_request *rq);
  65. static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
  66. struct esas2r_request *rq);
  67. static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
  68. struct esas2r_request *rq);
  69. static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
  70. struct esas2r_request *rq);
  71. static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
  72. struct esas2r_request *rq);
  73. static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
  74. struct esas2r_request *rq);
  75. static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
  76. struct esas2r_request *rq);
  77. static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
  78. struct esas2r_request *rq);
  79. void esas2r_disc_initialize(struct esas2r_adapter *a)
  80. {
  81. struct esas2r_sas_nvram *nvr = a->nvram;
  82. esas2r_trace_enter();
  83. clear_bit(AF_DISC_IN_PROG, &a->flags);
  84. clear_bit(AF2_DEV_SCAN, &a->flags2);
  85. clear_bit(AF2_DEV_CNT_OK, &a->flags2);
  86. a->disc_start_time = jiffies_to_msecs(jiffies);
  87. a->disc_wait_time = nvr->dev_wait_time * 1000;
  88. a->disc_wait_cnt = nvr->dev_wait_count;
  89. if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
  90. a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
  91. /*
  92. * If we are doing chip reset or power management processing, always
  93. * wait for devices. use the NVRAM device count if it is greater than
  94. * previously discovered devices.
  95. */
  96. esas2r_hdebug("starting discovery...");
  97. a->general_req.interrupt_cx = NULL;
  98. if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
  99. test_bit(AF_POWER_MGT, &a->flags)) {
  100. if (a->prev_dev_cnt == 0) {
  101. /* Don't bother waiting if there is nothing to wait
  102. * for.
  103. */
  104. a->disc_wait_time = 0;
  105. } else {
  106. /*
  107. * Set the device wait count to what was previously
  108. * found. We don't care if the user only configured
  109. * a time because we know the exact count to wait for.
  110. * There is no need to honor the user's wishes to
  111. * always wait the full time.
  112. */
  113. a->disc_wait_cnt = a->prev_dev_cnt;
  114. /*
  115. * bump the minimum wait time to 15 seconds since the
  116. * default is 3 (system boot or the boot driver usually
  117. * buys us more time).
  118. */
  119. if (a->disc_wait_time < 15000)
  120. a->disc_wait_time = 15000;
  121. }
  122. }
  123. esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
  124. esas2r_trace("disc wait time: %d", a->disc_wait_time);
  125. if (a->disc_wait_time == 0)
  126. esas2r_disc_check_complete(a);
  127. esas2r_trace_exit();
  128. }
  129. void esas2r_disc_start_waiting(struct esas2r_adapter *a)
  130. {
  131. unsigned long flags;
  132. spin_lock_irqsave(&a->mem_lock, flags);
  133. if (a->disc_ctx.disc_evt)
  134. esas2r_disc_start_port(a);
  135. spin_unlock_irqrestore(&a->mem_lock, flags);
  136. }
  137. void esas2r_disc_check_for_work(struct esas2r_adapter *a)
  138. {
  139. struct esas2r_request *rq = &a->general_req;
  140. /* service any pending interrupts first */
  141. esas2r_polled_interrupt(a);
  142. /*
  143. * now, interrupt processing may have queued up a discovery event. go
  144. * see if we have one to start. we couldn't start it in the ISR since
  145. * polled discovery would cause a deadlock.
  146. */
  147. esas2r_disc_start_waiting(a);
  148. if (rq->interrupt_cx == NULL)
  149. return;
  150. if (rq->req_stat == RS_STARTED
  151. && rq->timeout <= RQ_MAX_TIMEOUT) {
  152. /* wait for the current discovery request to complete. */
  153. esas2r_wait_request(a, rq);
  154. if (rq->req_stat == RS_TIMEOUT) {
  155. esas2r_disc_abort(a, rq);
  156. esas2r_local_reset_adapter(a);
  157. return;
  158. }
  159. }
  160. if (rq->req_stat == RS_PENDING
  161. || rq->req_stat == RS_STARTED)
  162. return;
  163. esas2r_disc_continue(a, rq);
  164. }
  165. void esas2r_disc_check_complete(struct esas2r_adapter *a)
  166. {
  167. unsigned long flags;
  168. esas2r_trace_enter();
  169. /* check to see if we should be waiting for devices */
  170. if (a->disc_wait_time) {
  171. u32 currtime = jiffies_to_msecs(jiffies);
  172. u32 time = currtime - a->disc_start_time;
  173. /*
  174. * Wait until the device wait time is exhausted or the device
  175. * wait count is satisfied.
  176. */
  177. if (time < a->disc_wait_time
  178. && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
  179. || a->disc_wait_cnt == 0)) {
  180. /* After three seconds of waiting, schedule a scan. */
  181. if (time >= 3000
  182. && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
  183. spin_lock_irqsave(&a->mem_lock, flags);
  184. esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
  185. spin_unlock_irqrestore(&a->mem_lock, flags);
  186. }
  187. esas2r_trace_exit();
  188. return;
  189. }
  190. /*
  191. * We are done waiting...we think. Adjust the wait time to
  192. * consume events after the count is met.
  193. */
  194. if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
  195. a->disc_wait_time = time + 3000;
  196. /* If we haven't done a full scan yet, do it now. */
  197. if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
  198. spin_lock_irqsave(&a->mem_lock, flags);
  199. esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
  200. spin_unlock_irqrestore(&a->mem_lock, flags);
  201. esas2r_trace_exit();
  202. return;
  203. }
  204. /*
  205. * Now, if there is still time left to consume events, continue
  206. * waiting.
  207. */
  208. if (time < a->disc_wait_time) {
  209. esas2r_trace_exit();
  210. return;
  211. }
  212. } else {
  213. if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
  214. spin_lock_irqsave(&a->mem_lock, flags);
  215. esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
  216. spin_unlock_irqrestore(&a->mem_lock, flags);
  217. }
  218. }
  219. /* We want to stop waiting for devices. */
  220. a->disc_wait_time = 0;
  221. if (test_bit(AF_DISC_POLLED, &a->flags) &&
  222. test_bit(AF_DISC_IN_PROG, &a->flags)) {
  223. /*
  224. * Polled discovery is still pending so continue the active
  225. * discovery until it is done. At that point, we will stop
  226. * polled discovery and transition to interrupt driven
  227. * discovery.
  228. */
  229. } else {
  230. /*
  231. * Done waiting for devices. Note that we get here immediately
  232. * after deferred waiting completes because that is interrupt
  233. * driven; i.e. There is no transition.
  234. */
  235. esas2r_disc_fix_curr_requests(a);
  236. clear_bit(AF_DISC_PENDING, &a->flags);
  237. /*
  238. * We have deferred target state changes until now because we
  239. * don't want to report any removals (due to the first arrival)
  240. * until the device wait time expires.
  241. */
  242. set_bit(AF_PORT_CHANGE, &a->flags);
  243. }
  244. esas2r_trace_exit();
  245. }
  246. void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
  247. {
  248. struct esas2r_disc_context *dc = &a->disc_ctx;
  249. esas2r_trace_enter();
  250. esas2r_trace("disc_event: %d", disc_evt);
  251. /* Initialize the discovery context */
  252. dc->disc_evt |= disc_evt;
  253. /*
  254. * Don't start discovery before or during polled discovery. if we did,
  255. * we would have a deadlock if we are in the ISR already.
  256. */
  257. if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
  258. !test_bit(AF_DISC_POLLED, &a->flags))
  259. esas2r_disc_start_port(a);
  260. esas2r_trace_exit();
  261. }
  262. bool esas2r_disc_start_port(struct esas2r_adapter *a)
  263. {
  264. struct esas2r_request *rq = &a->general_req;
  265. struct esas2r_disc_context *dc = &a->disc_ctx;
  266. bool ret;
  267. esas2r_trace_enter();
  268. if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
  269. esas2r_trace_exit();
  270. return false;
  271. }
  272. /* If there is a discovery waiting, process it. */
  273. if (dc->disc_evt) {
  274. if (test_bit(AF_DISC_POLLED, &a->flags)
  275. && a->disc_wait_time == 0) {
  276. /*
  277. * We are doing polled discovery, but we no longer want
  278. * to wait for devices. Stop polled discovery and
  279. * transition to interrupt driven discovery.
  280. */
  281. esas2r_trace_exit();
  282. return false;
  283. }
  284. } else {
  285. /* Discovery is complete. */
  286. esas2r_hdebug("disc done");
  287. set_bit(AF_PORT_CHANGE, &a->flags);
  288. esas2r_trace_exit();
  289. return false;
  290. }
  291. /* Handle the discovery context */
  292. esas2r_trace("disc_evt: %d", dc->disc_evt);
  293. set_bit(AF_DISC_IN_PROG, &a->flags);
  294. dc->flags = 0;
  295. if (test_bit(AF_DISC_POLLED, &a->flags))
  296. dc->flags |= DCF_POLLED;
  297. rq->interrupt_cx = dc;
  298. rq->req_stat = RS_SUCCESS;
  299. /* Decode the event code */
  300. if (dc->disc_evt & DCDE_DEV_SCAN) {
  301. dc->disc_evt &= ~DCDE_DEV_SCAN;
  302. dc->flags |= DCF_DEV_SCAN;
  303. dc->state = DCS_BLOCK_DEV_SCAN;
  304. } else if (dc->disc_evt & DCDE_DEV_CHANGE) {
  305. dc->disc_evt &= ~DCDE_DEV_CHANGE;
  306. dc->flags |= DCF_DEV_CHANGE;
  307. dc->state = DCS_DEV_RMV;
  308. }
  309. /* Continue interrupt driven discovery */
  310. if (!test_bit(AF_DISC_POLLED, &a->flags))
  311. ret = esas2r_disc_continue(a, rq);
  312. else
  313. ret = true;
  314. esas2r_trace_exit();
  315. return ret;
  316. }
  317. static bool esas2r_disc_continue(struct esas2r_adapter *a,
  318. struct esas2r_request *rq)
  319. {
  320. struct esas2r_disc_context *dc =
  321. (struct esas2r_disc_context *)rq->interrupt_cx;
  322. bool rslt;
  323. /* Device discovery/removal */
  324. while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
  325. rslt = false;
  326. switch (dc->state) {
  327. case DCS_DEV_RMV:
  328. rslt = esas2r_disc_dev_remove(a, rq);
  329. break;
  330. case DCS_DEV_ADD:
  331. rslt = esas2r_disc_dev_add(a, rq);
  332. break;
  333. case DCS_BLOCK_DEV_SCAN:
  334. rslt = esas2r_disc_block_dev_scan(a, rq);
  335. break;
  336. case DCS_RAID_GRP_INFO:
  337. rslt = esas2r_disc_raid_grp_info(a, rq);
  338. break;
  339. case DCS_PART_INFO:
  340. rslt = esas2r_disc_part_info(a, rq);
  341. break;
  342. case DCS_PT_DEV_INFO:
  343. rslt = esas2r_disc_passthru_dev_info(a, rq);
  344. break;
  345. case DCS_PT_DEV_ADDR:
  346. rslt = esas2r_disc_passthru_dev_addr(a, rq);
  347. break;
  348. case DCS_DISC_DONE:
  349. dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
  350. break;
  351. default:
  352. esas2r_bugon();
  353. dc->state = DCS_DISC_DONE;
  354. break;
  355. }
  356. if (rslt)
  357. return true;
  358. }
  359. /* Discovery is done...for now. */
  360. rq->interrupt_cx = NULL;
  361. if (!test_bit(AF_DISC_PENDING, &a->flags))
  362. esas2r_disc_fix_curr_requests(a);
  363. clear_bit(AF_DISC_IN_PROG, &a->flags);
  364. /* Start the next discovery. */
  365. return esas2r_disc_start_port(a);
  366. }
  367. static bool esas2r_disc_start_request(struct esas2r_adapter *a,
  368. struct esas2r_request *rq)
  369. {
  370. unsigned long flags;
  371. /* Set the timeout to a minimum value. */
  372. if (rq->timeout < ESAS2R_DEFAULT_TMO)
  373. rq->timeout = ESAS2R_DEFAULT_TMO;
  374. /*
  375. * Override the request type to distinguish discovery requests. If we
  376. * end up deferring the request, esas2r_disc_local_start_request()
  377. * will be called to restart it.
  378. */
  379. rq->req_type = RT_DISC_REQ;
  380. spin_lock_irqsave(&a->queue_lock, flags);
  381. if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
  382. !test_bit(AF_FLASHING, &a->flags))
  383. esas2r_disc_local_start_request(a, rq);
  384. else
  385. list_add_tail(&rq->req_list, &a->defer_list);
  386. spin_unlock_irqrestore(&a->queue_lock, flags);
  387. return true;
  388. }
  389. void esas2r_disc_local_start_request(struct esas2r_adapter *a,
  390. struct esas2r_request *rq)
  391. {
  392. esas2r_trace_enter();
  393. list_add_tail(&rq->req_list, &a->active_list);
  394. esas2r_start_vda_request(a, rq);
  395. esas2r_trace_exit();
  396. return;
  397. }
  398. static void esas2r_disc_abort(struct esas2r_adapter *a,
  399. struct esas2r_request *rq)
  400. {
  401. struct esas2r_disc_context *dc =
  402. (struct esas2r_disc_context *)rq->interrupt_cx;
  403. esas2r_trace_enter();
  404. /* abort the current discovery */
  405. dc->state = DCS_DISC_DONE;
  406. esas2r_trace_exit();
  407. }
  408. static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
  409. struct esas2r_request *rq)
  410. {
  411. struct esas2r_disc_context *dc =
  412. (struct esas2r_disc_context *)rq->interrupt_cx;
  413. bool rslt;
  414. esas2r_trace_enter();
  415. esas2r_rq_init_request(rq, a);
  416. esas2r_build_mgt_req(a,
  417. rq,
  418. VDAMGT_DEV_SCAN,
  419. 0,
  420. 0,
  421. 0,
  422. NULL);
  423. rq->comp_cb = esas2r_disc_block_dev_scan_cb;
  424. rq->timeout = 30000;
  425. rq->interrupt_cx = dc;
  426. rslt = esas2r_disc_start_request(a, rq);
  427. esas2r_trace_exit();
  428. return rslt;
  429. }
  430. static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
  431. struct esas2r_request *rq)
  432. {
  433. struct esas2r_disc_context *dc =
  434. (struct esas2r_disc_context *)rq->interrupt_cx;
  435. unsigned long flags;
  436. esas2r_trace_enter();
  437. spin_lock_irqsave(&a->mem_lock, flags);
  438. if (rq->req_stat == RS_SUCCESS)
  439. dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
  440. dc->state = DCS_RAID_GRP_INFO;
  441. dc->raid_grp_ix = 0;
  442. esas2r_rq_destroy_request(rq, a);
  443. /* continue discovery if it's interrupt driven */
  444. if (!(dc->flags & DCF_POLLED))
  445. esas2r_disc_continue(a, rq);
  446. spin_unlock_irqrestore(&a->mem_lock, flags);
  447. esas2r_trace_exit();
  448. }
  449. static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
  450. struct esas2r_request *rq)
  451. {
  452. struct esas2r_disc_context *dc =
  453. (struct esas2r_disc_context *)rq->interrupt_cx;
  454. bool rslt;
  455. struct atto_vda_grp_info *grpinfo;
  456. esas2r_trace_enter();
  457. esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
  458. if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
  459. dc->state = DCS_DISC_DONE;
  460. esas2r_trace_exit();
  461. return false;
  462. }
  463. esas2r_rq_init_request(rq, a);
  464. grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
  465. memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
  466. esas2r_build_mgt_req(a,
  467. rq,
  468. VDAMGT_GRP_INFO,
  469. dc->scan_gen,
  470. 0,
  471. sizeof(struct atto_vda_grp_info),
  472. NULL);
  473. grpinfo->grp_index = dc->raid_grp_ix;
  474. rq->comp_cb = esas2r_disc_raid_grp_info_cb;
  475. rq->interrupt_cx = dc;
  476. rslt = esas2r_disc_start_request(a, rq);
  477. esas2r_trace_exit();
  478. return rslt;
  479. }
  480. static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
  481. struct esas2r_request *rq)
  482. {
  483. struct esas2r_disc_context *dc =
  484. (struct esas2r_disc_context *)rq->interrupt_cx;
  485. unsigned long flags;
  486. struct atto_vda_grp_info *grpinfo;
  487. esas2r_trace_enter();
  488. spin_lock_irqsave(&a->mem_lock, flags);
  489. if (rq->req_stat == RS_SCAN_GEN) {
  490. dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
  491. dc->raid_grp_ix = 0;
  492. goto done;
  493. }
  494. if (rq->req_stat == RS_SUCCESS) {
  495. grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
  496. if (grpinfo->status != VDA_GRP_STAT_ONLINE
  497. && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
  498. /* go to the next group. */
  499. dc->raid_grp_ix++;
  500. } else {
  501. memcpy(&dc->raid_grp_name[0],
  502. &grpinfo->grp_name[0],
  503. sizeof(grpinfo->grp_name));
  504. dc->interleave = le32_to_cpu(grpinfo->interleave);
  505. dc->block_size = le32_to_cpu(grpinfo->block_size);
  506. dc->state = DCS_PART_INFO;
  507. dc->part_num = 0;
  508. }
  509. } else {
  510. if (!(rq->req_stat == RS_GRP_INVALID)) {
  511. esas2r_log(ESAS2R_LOG_WARN,
  512. "A request for RAID group info failed - "
  513. "returned with %x",
  514. rq->req_stat);
  515. }
  516. dc->dev_ix = 0;
  517. dc->state = DCS_PT_DEV_INFO;
  518. }
  519. done:
  520. esas2r_rq_destroy_request(rq, a);
  521. /* continue discovery if it's interrupt driven */
  522. if (!(dc->flags & DCF_POLLED))
  523. esas2r_disc_continue(a, rq);
  524. spin_unlock_irqrestore(&a->mem_lock, flags);
  525. esas2r_trace_exit();
  526. }
  527. static bool esas2r_disc_part_info(struct esas2r_adapter *a,
  528. struct esas2r_request *rq)
  529. {
  530. struct esas2r_disc_context *dc =
  531. (struct esas2r_disc_context *)rq->interrupt_cx;
  532. bool rslt;
  533. struct atto_vdapart_info *partinfo;
  534. esas2r_trace_enter();
  535. esas2r_trace("part_num: %d", dc->part_num);
  536. if (dc->part_num >= VDA_MAX_PARTITIONS) {
  537. dc->state = DCS_RAID_GRP_INFO;
  538. dc->raid_grp_ix++;
  539. esas2r_trace_exit();
  540. return false;
  541. }
  542. esas2r_rq_init_request(rq, a);
  543. partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
  544. memset(partinfo, 0, sizeof(struct atto_vdapart_info));
  545. esas2r_build_mgt_req(a,
  546. rq,
  547. VDAMGT_PART_INFO,
  548. dc->scan_gen,
  549. 0,
  550. sizeof(struct atto_vdapart_info),
  551. NULL);
  552. partinfo->part_no = dc->part_num;
  553. memcpy(&partinfo->grp_name[0],
  554. &dc->raid_grp_name[0],
  555. sizeof(partinfo->grp_name));
  556. rq->comp_cb = esas2r_disc_part_info_cb;
  557. rq->interrupt_cx = dc;
  558. rslt = esas2r_disc_start_request(a, rq);
  559. esas2r_trace_exit();
  560. return rslt;
  561. }
  562. static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
  563. struct esas2r_request *rq)
  564. {
  565. struct esas2r_disc_context *dc =
  566. (struct esas2r_disc_context *)rq->interrupt_cx;
  567. unsigned long flags;
  568. struct atto_vdapart_info *partinfo;
  569. esas2r_trace_enter();
  570. spin_lock_irqsave(&a->mem_lock, flags);
  571. if (rq->req_stat == RS_SCAN_GEN) {
  572. dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
  573. dc->raid_grp_ix = 0;
  574. dc->state = DCS_RAID_GRP_INFO;
  575. } else if (rq->req_stat == RS_SUCCESS) {
  576. partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
  577. dc->part_num = partinfo->part_no;
  578. dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
  579. esas2r_targ_db_add_raid(a, dc);
  580. dc->part_num++;
  581. } else {
  582. if (!(rq->req_stat == RS_PART_LAST)) {
  583. esas2r_log(ESAS2R_LOG_WARN,
  584. "A request for RAID group partition info "
  585. "failed - status:%d", rq->req_stat);
  586. }
  587. dc->state = DCS_RAID_GRP_INFO;
  588. dc->raid_grp_ix++;
  589. }
  590. esas2r_rq_destroy_request(rq, a);
  591. /* continue discovery if it's interrupt driven */
  592. if (!(dc->flags & DCF_POLLED))
  593. esas2r_disc_continue(a, rq);
  594. spin_unlock_irqrestore(&a->mem_lock, flags);
  595. esas2r_trace_exit();
  596. }
  597. static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
  598. struct esas2r_request *rq)
  599. {
  600. struct esas2r_disc_context *dc =
  601. (struct esas2r_disc_context *)rq->interrupt_cx;
  602. bool rslt;
  603. struct atto_vda_devinfo *devinfo;
  604. esas2r_trace_enter();
  605. esas2r_trace("dev_ix: %d", dc->dev_ix);
  606. esas2r_rq_init_request(rq, a);
  607. devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
  608. memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
  609. esas2r_build_mgt_req(a,
  610. rq,
  611. VDAMGT_DEV_PT_INFO,
  612. dc->scan_gen,
  613. dc->dev_ix,
  614. sizeof(struct atto_vda_devinfo),
  615. NULL);
  616. rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
  617. rq->interrupt_cx = dc;
  618. rslt = esas2r_disc_start_request(a, rq);
  619. esas2r_trace_exit();
  620. return rslt;
  621. }
  622. static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
  623. struct esas2r_request *rq)
  624. {
  625. struct esas2r_disc_context *dc =
  626. (struct esas2r_disc_context *)rq->interrupt_cx;
  627. unsigned long flags;
  628. struct atto_vda_devinfo *devinfo;
  629. esas2r_trace_enter();
  630. spin_lock_irqsave(&a->mem_lock, flags);
  631. if (rq->req_stat == RS_SCAN_GEN) {
  632. dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
  633. dc->dev_ix = 0;
  634. dc->state = DCS_PT_DEV_INFO;
  635. } else if (rq->req_stat == RS_SUCCESS) {
  636. devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
  637. dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
  638. dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
  639. if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
  640. dc->curr_phys_id =
  641. le16_to_cpu(devinfo->phys_target_id);
  642. dc->dev_addr_type = ATTO_GDA_AT_PORT;
  643. dc->state = DCS_PT_DEV_ADDR;
  644. esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
  645. esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
  646. } else {
  647. dc->dev_ix++;
  648. }
  649. } else {
  650. if (!(rq->req_stat == RS_DEV_INVALID)) {
  651. esas2r_log(ESAS2R_LOG_WARN,
  652. "A request for device information failed - "
  653. "status:%d", rq->req_stat);
  654. }
  655. dc->state = DCS_DISC_DONE;
  656. }
  657. esas2r_rq_destroy_request(rq, a);
  658. /* continue discovery if it's interrupt driven */
  659. if (!(dc->flags & DCF_POLLED))
  660. esas2r_disc_continue(a, rq);
  661. spin_unlock_irqrestore(&a->mem_lock, flags);
  662. esas2r_trace_exit();
  663. }
  664. static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
  665. struct esas2r_request *rq)
  666. {
  667. struct esas2r_disc_context *dc =
  668. (struct esas2r_disc_context *)rq->interrupt_cx;
  669. bool rslt;
  670. struct atto_ioctl *hi;
  671. struct esas2r_sg_context sgc;
  672. esas2r_trace_enter();
  673. esas2r_rq_init_request(rq, a);
  674. /* format the request. */
  675. sgc.cur_offset = NULL;
  676. sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
  677. sgc.length = offsetof(struct atto_ioctl, data)
  678. + sizeof(struct atto_hba_get_device_address);
  679. esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
  680. esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
  681. if (!esas2r_build_sg_list(a, rq, &sgc)) {
  682. esas2r_rq_destroy_request(rq, a);
  683. esas2r_trace_exit();
  684. return false;
  685. }
  686. rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
  687. rq->interrupt_cx = dc;
  688. /* format the IOCTL data. */
  689. hi = (struct atto_ioctl *)a->disc_buffer;
  690. memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
  691. hi->version = ATTO_VER_GET_DEV_ADDR0;
  692. hi->function = ATTO_FUNC_GET_DEV_ADDR;
  693. hi->flags = HBAF_TUNNEL;
  694. hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
  695. hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
  696. /* start it up. */
  697. rslt = esas2r_disc_start_request(a, rq);
  698. esas2r_trace_exit();
  699. return rslt;
  700. }
  701. static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
  702. struct esas2r_request *rq)
  703. {
  704. struct esas2r_disc_context *dc =
  705. (struct esas2r_disc_context *)rq->interrupt_cx;
  706. struct esas2r_target *t = NULL;
  707. unsigned long flags;
  708. struct atto_ioctl *hi;
  709. u16 addrlen;
  710. esas2r_trace_enter();
  711. spin_lock_irqsave(&a->mem_lock, flags);
  712. hi = (struct atto_ioctl *)a->disc_buffer;
  713. if (rq->req_stat == RS_SUCCESS
  714. && hi->status == ATTO_STS_SUCCESS) {
  715. addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
  716. if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
  717. if (addrlen == sizeof(u64))
  718. memcpy(&dc->sas_addr,
  719. &hi->data.get_dev_addr.address[0],
  720. addrlen);
  721. else
  722. memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
  723. /* Get the unique identifier. */
  724. dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
  725. goto next_dev_addr;
  726. } else {
  727. /* Add the pass through target. */
  728. if (HIBYTE(addrlen) == 0) {
  729. t = esas2r_targ_db_add_pthru(a,
  730. dc,
  731. &hi->data.
  732. get_dev_addr.
  733. address[0],
  734. (u8)hi->data.
  735. get_dev_addr.
  736. addr_len);
  737. if (t)
  738. memcpy(&t->sas_addr, &dc->sas_addr,
  739. sizeof(t->sas_addr));
  740. } else {
  741. /* getting the back end data failed */
  742. esas2r_log(ESAS2R_LOG_WARN,
  743. "an error occurred retrieving the "
  744. "back end data (%s:%d)",
  745. __func__,
  746. __LINE__);
  747. }
  748. }
  749. } else {
  750. /* getting the back end data failed */
  751. esas2r_log(ESAS2R_LOG_WARN,
  752. "an error occurred retrieving the back end data - "
  753. "rq->req_stat:%d hi->status:%d",
  754. rq->req_stat, hi->status);
  755. }
  756. /* proceed to the next device. */
  757. if (dc->flags & DCF_DEV_SCAN) {
  758. dc->dev_ix++;
  759. dc->state = DCS_PT_DEV_INFO;
  760. } else if (dc->flags & DCF_DEV_CHANGE) {
  761. dc->curr_targ++;
  762. dc->state = DCS_DEV_ADD;
  763. } else {
  764. esas2r_bugon();
  765. }
  766. next_dev_addr:
  767. esas2r_rq_destroy_request(rq, a);
  768. /* continue discovery if it's interrupt driven */
  769. if (!(dc->flags & DCF_POLLED))
  770. esas2r_disc_continue(a, rq);
  771. spin_unlock_irqrestore(&a->mem_lock, flags);
  772. esas2r_trace_exit();
  773. }
  774. static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
  775. {
  776. struct esas2r_adapter *a = sgc->adapter;
  777. if (sgc->length > ESAS2R_DISC_BUF_LEN)
  778. esas2r_bugon();
  779. *addr = a->uncached_phys
  780. + (u64)((u8 *)a->disc_buffer - a->uncached);
  781. return sgc->length;
  782. }
  783. static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
  784. struct esas2r_request *rq)
  785. {
  786. struct esas2r_disc_context *dc =
  787. (struct esas2r_disc_context *)rq->interrupt_cx;
  788. struct esas2r_target *t;
  789. struct esas2r_target *t2;
  790. esas2r_trace_enter();
  791. /* process removals. */
  792. for (t = a->targetdb; t < a->targetdb_end; t++) {
  793. if (t->new_target_state != TS_NOT_PRESENT)
  794. continue;
  795. t->new_target_state = TS_INVALID;
  796. /* remove the right target! */
  797. t2 =
  798. esas2r_targ_db_find_by_virt_id(a,
  799. esas2r_targ_get_id(t,
  800. a));
  801. if (t2)
  802. esas2r_targ_db_remove(a, t2);
  803. }
  804. /* removals complete. process arrivals. */
  805. dc->state = DCS_DEV_ADD;
  806. dc->curr_targ = a->targetdb;
  807. esas2r_trace_exit();
  808. return false;
  809. }
  810. static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
  811. struct esas2r_request *rq)
  812. {
  813. struct esas2r_disc_context *dc =
  814. (struct esas2r_disc_context *)rq->interrupt_cx;
  815. struct esas2r_target *t = dc->curr_targ;
  816. if (t >= a->targetdb_end) {
  817. /* done processing state changes. */
  818. dc->state = DCS_DISC_DONE;
  819. } else if (t->new_target_state == TS_PRESENT) {
  820. struct atto_vda_ae_lu *luevt = &t->lu_event;
  821. esas2r_trace_enter();
  822. /* clear this now in case more events come in. */
  823. t->new_target_state = TS_INVALID;
  824. /* setup the discovery context for adding this device. */
  825. dc->curr_virt_id = esas2r_targ_get_id(t, a);
  826. if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
  827. + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
  828. && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
  829. dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
  830. dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
  831. } else {
  832. dc->block_size = 0;
  833. dc->interleave = 0;
  834. }
  835. /* determine the device type being added. */
  836. if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
  837. if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
  838. dc->state = DCS_PT_DEV_ADDR;
  839. dc->dev_addr_type = ATTO_GDA_AT_PORT;
  840. dc->curr_phys_id = luevt->wphys_target_id;
  841. } else {
  842. esas2r_log(ESAS2R_LOG_WARN,
  843. "luevt->dwevent does not have the "
  844. "VDAAE_LU_PHYS_ID bit set (%s:%d)",
  845. __func__, __LINE__);
  846. }
  847. } else {
  848. dc->raid_grp_name[0] = 0;
  849. esas2r_targ_db_add_raid(a, dc);
  850. }
  851. esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
  852. esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
  853. esas2r_trace("dwevent: %d", luevt->dwevent);
  854. esas2r_trace_exit();
  855. }
  856. if (dc->state == DCS_DEV_ADD) {
  857. /* go to the next device. */
  858. dc->curr_targ++;
  859. }
  860. return false;
  861. }
  862. /*
  863. * When discovery is done, find all requests on defer queue and
  864. * test if they need to be modified. If a target is no longer present
  865. * then complete the request with RS_SEL. Otherwise, update the
  866. * target_id since after a hibernate it can be a different value.
  867. * VDA does not make passthrough target IDs persistent.
  868. */
  869. static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
  870. {
  871. unsigned long flags;
  872. struct esas2r_target *t;
  873. struct esas2r_request *rq;
  874. struct list_head *element;
  875. /* update virt_targ_id in any outstanding esas2r_requests */
  876. spin_lock_irqsave(&a->queue_lock, flags);
  877. list_for_each(element, &a->defer_list) {
  878. rq = list_entry(element, struct esas2r_request, req_list);
  879. if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
  880. t = a->targetdb + rq->target_id;
  881. if (t->target_state == TS_PRESENT)
  882. rq->vrq->scsi.target_id = le16_to_cpu(
  883. t->virt_targ_id);
  884. else
  885. rq->req_stat = RS_SEL;
  886. }
  887. }
  888. spin_unlock_irqrestore(&a->queue_lock, flags);
  889. }