aic94xx_tmf.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. /*
  2. * Aic94xx Task Management Functions
  3. *
  4. * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
  5. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  6. *
  7. * This file is licensed under GPLv2.
  8. *
  9. * This file is part of the aic94xx driver.
  10. *
  11. * The aic94xx driver is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation; version 2 of the
  14. * License.
  15. *
  16. * The aic94xx driver is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with the aic94xx driver; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  24. *
  25. */
  26. #include <linux/spinlock.h>
  27. #include <linux/gfp.h>
  28. #include "aic94xx.h"
  29. #include "aic94xx_sas.h"
  30. #include "aic94xx_hwi.h"
  31. /* ---------- Internal enqueue ---------- */
  32. static int asd_enqueue_internal(struct asd_ascb *ascb,
  33. void (*tasklet_complete)(struct asd_ascb *,
  34. struct done_list_struct *),
  35. void (*timed_out)(unsigned long))
  36. {
  37. int res;
  38. ascb->tasklet_complete = tasklet_complete;
  39. ascb->uldd_timer = 1;
  40. ascb->timer.data = (unsigned long) ascb;
  41. ascb->timer.function = timed_out;
  42. ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
  43. add_timer(&ascb->timer);
  44. res = asd_post_ascb_list(ascb->ha, ascb, 1);
  45. if (unlikely(res))
  46. del_timer(&ascb->timer);
  47. return res;
  48. }
  49. /* ---------- CLEAR NEXUS ---------- */
  50. struct tasklet_completion_status {
  51. int dl_opcode;
  52. int tmf_state;
  53. u8 tag_valid:1;
  54. __be16 tag;
  55. };
  56. #define DECLARE_TCS(tcs) \
  57. struct tasklet_completion_status tcs = { \
  58. .dl_opcode = 0, \
  59. .tmf_state = 0, \
  60. .tag_valid = 0, \
  61. .tag = 0, \
  62. }
  63. static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
  64. struct done_list_struct *dl)
  65. {
  66. struct tasklet_completion_status *tcs = ascb->uldd_task;
  67. ASD_DPRINTK("%s: here\n", __func__);
  68. if (!del_timer(&ascb->timer)) {
  69. ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
  70. return;
  71. }
  72. ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
  73. tcs->dl_opcode = dl->opcode;
  74. complete(ascb->completion);
  75. asd_ascb_free(ascb);
  76. }
  77. static void asd_clear_nexus_timedout(unsigned long data)
  78. {
  79. struct asd_ascb *ascb = (void *)data;
  80. struct tasklet_completion_status *tcs = ascb->uldd_task;
  81. ASD_DPRINTK("%s: here\n", __func__);
  82. tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
  83. complete(ascb->completion);
  84. }
  85. #define CLEAR_NEXUS_PRE \
  86. struct asd_ascb *ascb; \
  87. struct scb *scb; \
  88. int res; \
  89. DECLARE_COMPLETION_ONSTACK(completion); \
  90. DECLARE_TCS(tcs); \
  91. \
  92. ASD_DPRINTK("%s: PRE\n", __func__); \
  93. res = 1; \
  94. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
  95. if (!ascb) \
  96. return -ENOMEM; \
  97. \
  98. ascb->completion = &completion; \
  99. ascb->uldd_task = &tcs; \
  100. scb = ascb->scb; \
  101. scb->header.opcode = CLEAR_NEXUS
  102. #define CLEAR_NEXUS_POST \
  103. ASD_DPRINTK("%s: POST\n", __func__); \
  104. res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
  105. asd_clear_nexus_timedout); \
  106. if (res) \
  107. goto out_err; \
  108. ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
  109. wait_for_completion(&completion); \
  110. res = tcs.dl_opcode; \
  111. if (res == TC_NO_ERROR) \
  112. res = TMF_RESP_FUNC_COMPLETE; \
  113. return res; \
  114. out_err: \
  115. asd_ascb_free(ascb); \
  116. return res
  117. int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
  118. {
  119. struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
  120. CLEAR_NEXUS_PRE;
  121. scb->clear_nexus.nexus = NEXUS_ADAPTER;
  122. CLEAR_NEXUS_POST;
  123. }
  124. int asd_clear_nexus_port(struct asd_sas_port *port)
  125. {
  126. struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
  127. CLEAR_NEXUS_PRE;
  128. scb->clear_nexus.nexus = NEXUS_PORT;
  129. scb->clear_nexus.conn_mask = port->phy_mask;
  130. CLEAR_NEXUS_POST;
  131. }
  132. enum clear_nexus_phase {
  133. NEXUS_PHASE_PRE,
  134. NEXUS_PHASE_POST,
  135. NEXUS_PHASE_RESUME,
  136. };
  137. static int asd_clear_nexus_I_T(struct domain_device *dev,
  138. enum clear_nexus_phase phase)
  139. {
  140. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  141. CLEAR_NEXUS_PRE;
  142. scb->clear_nexus.nexus = NEXUS_I_T;
  143. switch (phase) {
  144. case NEXUS_PHASE_PRE:
  145. scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
  146. break;
  147. case NEXUS_PHASE_POST:
  148. scb->clear_nexus.flags = SEND_Q | NOTINQ;
  149. break;
  150. case NEXUS_PHASE_RESUME:
  151. scb->clear_nexus.flags = RESUME_TX;
  152. }
  153. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  154. dev->lldd_dev);
  155. CLEAR_NEXUS_POST;
  156. }
  157. int asd_I_T_nexus_reset(struct domain_device *dev)
  158. {
  159. int res, tmp_res, i;
  160. struct sas_phy *phy = sas_get_local_phy(dev);
  161. /* Standard mandates link reset for ATA (type 0) and
  162. * hard reset for SSP (type 1) */
  163. int reset_type = (dev->dev_type == SAS_SATA_DEV ||
  164. (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
  165. asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
  166. /* send a hard reset */
  167. ASD_DPRINTK("sending %s reset to %s\n",
  168. reset_type ? "hard" : "soft", dev_name(&phy->dev));
  169. res = sas_phy_reset(phy, reset_type);
  170. if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
  171. /* wait for the maximum settle time */
  172. msleep(500);
  173. /* clear all outstanding commands (keep nexus suspended) */
  174. asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
  175. }
  176. for (i = 0 ; i < 3; i++) {
  177. tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
  178. if (tmp_res == TC_RESUME)
  179. goto out;
  180. msleep(500);
  181. }
  182. /* This is a bit of a problem: the sequencer is still suspended
  183. * and is refusing to resume. Hope it will resume on a bigger hammer
  184. * or the disk is lost */
  185. dev_printk(KERN_ERR, &phy->dev,
  186. "Failed to resume nexus after reset 0x%x\n", tmp_res);
  187. res = TMF_RESP_FUNC_FAILED;
  188. out:
  189. sas_put_local_phy(phy);
  190. return res;
  191. }
  192. static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
  193. {
  194. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  195. CLEAR_NEXUS_PRE;
  196. scb->clear_nexus.nexus = NEXUS_I_T_L;
  197. scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
  198. memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
  199. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  200. dev->lldd_dev);
  201. CLEAR_NEXUS_POST;
  202. }
  203. static int asd_clear_nexus_tag(struct sas_task *task)
  204. {
  205. struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
  206. struct asd_ascb *tascb = task->lldd_task;
  207. CLEAR_NEXUS_PRE;
  208. scb->clear_nexus.nexus = NEXUS_TAG;
  209. memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
  210. scb->clear_nexus.ssp_task.tag = tascb->tag;
  211. if (task->dev->tproto)
  212. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  213. task->dev->lldd_dev);
  214. CLEAR_NEXUS_POST;
  215. }
  216. static int asd_clear_nexus_index(struct sas_task *task)
  217. {
  218. struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
  219. struct asd_ascb *tascb = task->lldd_task;
  220. CLEAR_NEXUS_PRE;
  221. scb->clear_nexus.nexus = NEXUS_TRANS_CX;
  222. if (task->dev->tproto)
  223. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  224. task->dev->lldd_dev);
  225. scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
  226. CLEAR_NEXUS_POST;
  227. }
  228. /* ---------- TMFs ---------- */
  229. static void asd_tmf_timedout(unsigned long data)
  230. {
  231. struct asd_ascb *ascb = (void *) data;
  232. struct tasklet_completion_status *tcs = ascb->uldd_task;
  233. ASD_DPRINTK("tmf timed out\n");
  234. tcs->tmf_state = TMF_RESP_FUNC_FAILED;
  235. complete(ascb->completion);
  236. }
  237. static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
  238. struct done_list_struct *dl)
  239. {
  240. struct asd_ha_struct *asd_ha = ascb->ha;
  241. unsigned long flags;
  242. struct tc_resp_sb_struct {
  243. __le16 index_escb;
  244. u8 len_lsb;
  245. u8 flags;
  246. } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
  247. int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
  248. struct asd_ascb *escb;
  249. struct asd_dma_tok *edb;
  250. struct ssp_frame_hdr *fh;
  251. struct ssp_response_iu *ru;
  252. int res = TMF_RESP_FUNC_FAILED;
  253. ASD_DPRINTK("tmf resp tasklet\n");
  254. spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
  255. escb = asd_tc_index_find(&asd_ha->seq,
  256. (int)le16_to_cpu(resp_sb->index_escb));
  257. spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
  258. if (!escb) {
  259. ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
  260. return res;
  261. }
  262. edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
  263. ascb->tag = *(__be16 *)(edb->vaddr+4);
  264. fh = edb->vaddr + 16;
  265. ru = edb->vaddr + 16 + sizeof(*fh);
  266. res = ru->status;
  267. if (ru->datapres == 1) /* Response data present */
  268. res = ru->resp_data[3];
  269. #if 0
  270. ascb->tag = fh->tag;
  271. #endif
  272. ascb->tag_valid = 1;
  273. asd_invalidate_edb(escb, edb_id);
  274. return res;
  275. }
  276. static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
  277. struct done_list_struct *dl)
  278. {
  279. struct tasklet_completion_status *tcs;
  280. if (!del_timer(&ascb->timer))
  281. return;
  282. tcs = ascb->uldd_task;
  283. ASD_DPRINTK("tmf tasklet complete\n");
  284. tcs->dl_opcode = dl->opcode;
  285. if (dl->opcode == TC_SSP_RESP) {
  286. tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
  287. tcs->tag_valid = ascb->tag_valid;
  288. tcs->tag = ascb->tag;
  289. }
  290. complete(ascb->completion);
  291. asd_ascb_free(ascb);
  292. }
  293. static int asd_clear_nexus(struct sas_task *task)
  294. {
  295. int res = TMF_RESP_FUNC_FAILED;
  296. int leftover;
  297. struct asd_ascb *tascb = task->lldd_task;
  298. DECLARE_COMPLETION_ONSTACK(completion);
  299. unsigned long flags;
  300. tascb->completion = &completion;
  301. ASD_DPRINTK("task not done, clearing nexus\n");
  302. if (tascb->tag_valid)
  303. res = asd_clear_nexus_tag(task);
  304. else
  305. res = asd_clear_nexus_index(task);
  306. leftover = wait_for_completion_timeout(&completion,
  307. AIC94XX_SCB_TIMEOUT);
  308. tascb->completion = NULL;
  309. ASD_DPRINTK("came back from clear nexus\n");
  310. spin_lock_irqsave(&task->task_state_lock, flags);
  311. if (leftover < 1)
  312. res = TMF_RESP_FUNC_FAILED;
  313. if (task->task_state_flags & SAS_TASK_STATE_DONE)
  314. res = TMF_RESP_FUNC_COMPLETE;
  315. spin_unlock_irqrestore(&task->task_state_lock, flags);
  316. return res;
  317. }
  318. /**
  319. * asd_abort_task -- ABORT TASK TMF
  320. * @task: the task to be aborted
  321. *
  322. * Before calling ABORT TASK the task state flags should be ORed with
  323. * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
  324. * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
  325. *
  326. * Implements the ABORT TASK TMF, I_T_L_Q nexus.
  327. * Returns: SAS TMF responses (see sas_task.h),
  328. * -ENOMEM,
  329. * -SAS_QUEUE_FULL.
  330. *
  331. * When ABORT TASK returns, the caller of ABORT TASK checks first the
  332. * task->task_state_flags, and then the return value of ABORT TASK.
  333. *
  334. * If the task has task state bit SAS_TASK_STATE_DONE set, then the
  335. * task was completed successfully prior to it being aborted. The
  336. * caller of ABORT TASK has responsibility to call task->task_done()
  337. * xor free the task, depending on their framework. The return code
  338. * is TMF_RESP_FUNC_FAILED in this case.
  339. *
  340. * Else the SAS_TASK_STATE_DONE bit is not set,
  341. * If the return code is TMF_RESP_FUNC_COMPLETE, then
  342. * the task was aborted successfully. The caller of
  343. * ABORT TASK has responsibility to call task->task_done()
  344. * to finish the task, xor free the task depending on their
  345. * framework.
  346. * else
  347. * the ABORT TASK returned some kind of error. The task
  348. * was _not_ cancelled. Nothing can be assumed.
  349. * The caller of ABORT TASK may wish to retry.
  350. */
  351. int asd_abort_task(struct sas_task *task)
  352. {
  353. struct asd_ascb *tascb = task->lldd_task;
  354. struct asd_ha_struct *asd_ha = tascb->ha;
  355. int res = 1;
  356. unsigned long flags;
  357. struct asd_ascb *ascb = NULL;
  358. struct scb *scb;
  359. int leftover;
  360. DECLARE_TCS(tcs);
  361. DECLARE_COMPLETION_ONSTACK(completion);
  362. DECLARE_COMPLETION_ONSTACK(tascb_completion);
  363. tascb->completion = &tascb_completion;
  364. spin_lock_irqsave(&task->task_state_lock, flags);
  365. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  366. spin_unlock_irqrestore(&task->task_state_lock, flags);
  367. res = TMF_RESP_FUNC_COMPLETE;
  368. ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
  369. goto out_done;
  370. }
  371. spin_unlock_irqrestore(&task->task_state_lock, flags);
  372. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
  373. if (!ascb)
  374. return -ENOMEM;
  375. ascb->uldd_task = &tcs;
  376. ascb->completion = &completion;
  377. scb = ascb->scb;
  378. scb->header.opcode = SCB_ABORT_TASK;
  379. switch (task->task_proto) {
  380. case SAS_PROTOCOL_SATA:
  381. case SAS_PROTOCOL_STP:
  382. scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
  383. break;
  384. case SAS_PROTOCOL_SSP:
  385. scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
  386. scb->abort_task.proto_conn_rate |= task->dev->linkrate;
  387. break;
  388. case SAS_PROTOCOL_SMP:
  389. break;
  390. default:
  391. break;
  392. }
  393. if (task->task_proto == SAS_PROTOCOL_SSP) {
  394. scb->abort_task.ssp_frame.frame_type = SSP_TASK;
  395. memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
  396. task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  397. memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
  398. task->dev->port->ha->hashed_sas_addr,
  399. HASHED_SAS_ADDR_SIZE);
  400. scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
  401. memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
  402. scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
  403. scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
  404. }
  405. scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
  406. scb->abort_task.conn_handle = cpu_to_le16(
  407. (u16)(unsigned long)task->dev->lldd_dev);
  408. scb->abort_task.retry_count = 1;
  409. scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
  410. scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
  411. res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
  412. asd_tmf_timedout);
  413. if (res)
  414. goto out_free;
  415. wait_for_completion(&completion);
  416. ASD_DPRINTK("tmf came back\n");
  417. tascb->tag = tcs.tag;
  418. tascb->tag_valid = tcs.tag_valid;
  419. spin_lock_irqsave(&task->task_state_lock, flags);
  420. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  421. spin_unlock_irqrestore(&task->task_state_lock, flags);
  422. res = TMF_RESP_FUNC_COMPLETE;
  423. ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
  424. goto out_done;
  425. }
  426. spin_unlock_irqrestore(&task->task_state_lock, flags);
  427. if (tcs.dl_opcode == TC_SSP_RESP) {
  428. /* The task to be aborted has been sent to the device.
  429. * We got a Response IU for the ABORT TASK TMF. */
  430. if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
  431. res = asd_clear_nexus(task);
  432. else
  433. res = tcs.tmf_state;
  434. } else if (tcs.dl_opcode == TC_NO_ERROR &&
  435. tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
  436. /* timeout */
  437. res = TMF_RESP_FUNC_FAILED;
  438. } else {
  439. /* In the following we assume that the managing layer
  440. * will _never_ make a mistake, when issuing ABORT
  441. * TASK.
  442. */
  443. switch (tcs.dl_opcode) {
  444. default:
  445. res = asd_clear_nexus(task);
  446. /* fallthrough */
  447. case TC_NO_ERROR:
  448. break;
  449. /* The task hasn't been sent to the device xor
  450. * we never got a (sane) Response IU for the
  451. * ABORT TASK TMF.
  452. */
  453. case TF_NAK_RECV:
  454. res = TMF_RESP_INVALID_FRAME;
  455. break;
  456. case TF_TMF_TASK_DONE: /* done but not reported yet */
  457. res = TMF_RESP_FUNC_FAILED;
  458. leftover =
  459. wait_for_completion_timeout(&tascb_completion,
  460. AIC94XX_SCB_TIMEOUT);
  461. spin_lock_irqsave(&task->task_state_lock, flags);
  462. if (leftover < 1)
  463. res = TMF_RESP_FUNC_FAILED;
  464. if (task->task_state_flags & SAS_TASK_STATE_DONE)
  465. res = TMF_RESP_FUNC_COMPLETE;
  466. spin_unlock_irqrestore(&task->task_state_lock, flags);
  467. break;
  468. case TF_TMF_NO_TAG:
  469. case TF_TMF_TAG_FREE: /* the tag is in the free list */
  470. case TF_TMF_NO_CONN_HANDLE: /* no such device */
  471. res = TMF_RESP_FUNC_COMPLETE;
  472. break;
  473. case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
  474. res = TMF_RESP_FUNC_ESUPP;
  475. break;
  476. }
  477. }
  478. out_done:
  479. tascb->completion = NULL;
  480. if (res == TMF_RESP_FUNC_COMPLETE) {
  481. task->lldd_task = NULL;
  482. mb();
  483. asd_ascb_free(tascb);
  484. }
  485. ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
  486. return res;
  487. out_free:
  488. asd_ascb_free(ascb);
  489. ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
  490. return res;
  491. }
  492. /**
  493. * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
  494. * @dev: pointer to struct domain_device of interest
  495. * @lun: pointer to u8[8] which is the LUN
  496. * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
  497. * @index: the transaction context of the task to be queried if QT TMF
  498. *
  499. * This function is used to send ABORT TASK SET, CLEAR ACA,
  500. * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
  501. *
  502. * No SCBs should be queued to the I_T_L nexus when this SCB is
  503. * pending.
  504. *
  505. * Returns: TMF response code (see sas_task.h or the SAS spec)
  506. */
  507. static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
  508. int tmf, int index)
  509. {
  510. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  511. struct asd_ascb *ascb;
  512. int res = 1;
  513. struct scb *scb;
  514. DECLARE_COMPLETION_ONSTACK(completion);
  515. DECLARE_TCS(tcs);
  516. if (!(dev->tproto & SAS_PROTOCOL_SSP))
  517. return TMF_RESP_FUNC_ESUPP;
  518. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
  519. if (!ascb)
  520. return -ENOMEM;
  521. ascb->completion = &completion;
  522. ascb->uldd_task = &tcs;
  523. scb = ascb->scb;
  524. if (tmf == TMF_QUERY_TASK)
  525. scb->header.opcode = QUERY_SSP_TASK;
  526. else
  527. scb->header.opcode = INITIATE_SSP_TMF;
  528. scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
  529. scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
  530. /* SSP frame header */
  531. scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
  532. memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
  533. dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  534. memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
  535. dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  536. scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
  537. /* SSP Task IU */
  538. memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
  539. scb->ssp_tmf.ssp_task.tmf = tmf;
  540. scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
  541. scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
  542. dev->lldd_dev);
  543. scb->ssp_tmf.retry_count = 1;
  544. scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
  545. if (tmf == TMF_QUERY_TASK)
  546. scb->ssp_tmf.index = cpu_to_le16(index);
  547. res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
  548. asd_tmf_timedout);
  549. if (res)
  550. goto out_err;
  551. wait_for_completion(&completion);
  552. switch (tcs.dl_opcode) {
  553. case TC_NO_ERROR:
  554. res = TMF_RESP_FUNC_COMPLETE;
  555. break;
  556. case TF_NAK_RECV:
  557. res = TMF_RESP_INVALID_FRAME;
  558. break;
  559. case TF_TMF_TASK_DONE:
  560. res = TMF_RESP_FUNC_FAILED;
  561. break;
  562. case TF_TMF_NO_TAG:
  563. case TF_TMF_TAG_FREE: /* the tag is in the free list */
  564. case TF_TMF_NO_CONN_HANDLE: /* no such device */
  565. res = TMF_RESP_FUNC_COMPLETE;
  566. break;
  567. case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
  568. res = TMF_RESP_FUNC_ESUPP;
  569. break;
  570. default:
  571. /* Allow TMF response codes to propagate upwards */
  572. res = tcs.dl_opcode;
  573. break;
  574. }
  575. return res;
  576. out_err:
  577. asd_ascb_free(ascb);
  578. return res;
  579. }
  580. int asd_abort_task_set(struct domain_device *dev, u8 *lun)
  581. {
  582. int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
  583. if (res == TMF_RESP_FUNC_COMPLETE)
  584. asd_clear_nexus_I_T_L(dev, lun);
  585. return res;
  586. }
  587. int asd_clear_aca(struct domain_device *dev, u8 *lun)
  588. {
  589. int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
  590. if (res == TMF_RESP_FUNC_COMPLETE)
  591. asd_clear_nexus_I_T_L(dev, lun);
  592. return res;
  593. }
  594. int asd_clear_task_set(struct domain_device *dev, u8 *lun)
  595. {
  596. int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
  597. if (res == TMF_RESP_FUNC_COMPLETE)
  598. asd_clear_nexus_I_T_L(dev, lun);
  599. return res;
  600. }
  601. int asd_lu_reset(struct domain_device *dev, u8 *lun)
  602. {
  603. int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
  604. if (res == TMF_RESP_FUNC_COMPLETE)
  605. asd_clear_nexus_I_T_L(dev, lun);
  606. return res;
  607. }
  608. /**
  609. * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
  610. * task: pointer to sas_task struct of interest
  611. *
  612. * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
  613. * or TMF_RESP_FUNC_SUCC if the task is in the task set.
  614. *
  615. * Normally the management layer sets the task to aborted state,
  616. * and then calls query task and then abort task.
  617. */
  618. int asd_query_task(struct sas_task *task)
  619. {
  620. struct asd_ascb *ascb = task->lldd_task;
  621. int index;
  622. if (ascb) {
  623. index = ascb->tc_index;
  624. return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
  625. TMF_QUERY_TASK, index);
  626. }
  627. return TMF_RESP_FUNC_COMPLETE;
  628. }