aic94xx_tmf.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. /*
  2. * Aic94xx Task Management Functions
  3. *
  4. * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
  5. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  6. *
  7. * This file is licensed under GPLv2.
  8. *
  9. * This file is part of the aic94xx driver.
  10. *
  11. * The aic94xx driver is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation; version 2 of the
  14. * License.
  15. *
  16. * The aic94xx driver is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with the aic94xx driver; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  24. *
  25. */
  26. #include <linux/spinlock.h>
  27. #include <linux/gfp.h>
  28. #include "aic94xx.h"
  29. #include "aic94xx_sas.h"
  30. #include "aic94xx_hwi.h"
  31. /* ---------- Internal enqueue ---------- */
  32. static int asd_enqueue_internal(struct asd_ascb *ascb,
  33. void (*tasklet_complete)(struct asd_ascb *,
  34. struct done_list_struct *),
  35. void (*timed_out)(unsigned long))
  36. {
  37. int res;
  38. ascb->tasklet_complete = tasklet_complete;
  39. ascb->uldd_timer = 1;
  40. ascb->timer.data = (unsigned long) ascb;
  41. ascb->timer.function = timed_out;
  42. ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
  43. add_timer(&ascb->timer);
  44. res = asd_post_ascb_list(ascb->ha, ascb, 1);
  45. if (unlikely(res))
  46. del_timer(&ascb->timer);
  47. return res;
  48. }
  49. /* ---------- CLEAR NEXUS ---------- */
  50. struct tasklet_completion_status {
  51. int dl_opcode;
  52. int tmf_state;
  53. u8 tag_valid:1;
  54. __be16 tag;
  55. };
  56. #define DECLARE_TCS(tcs) \
  57. struct tasklet_completion_status tcs = { \
  58. .dl_opcode = 0, \
  59. .tmf_state = 0, \
  60. .tag_valid = 0, \
  61. .tag = 0, \
  62. }
  63. static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
  64. struct done_list_struct *dl)
  65. {
  66. struct tasklet_completion_status *tcs = ascb->uldd_task;
  67. ASD_DPRINTK("%s: here\n", __func__);
  68. if (!del_timer(&ascb->timer)) {
  69. ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
  70. return;
  71. }
  72. ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
  73. tcs->dl_opcode = dl->opcode;
  74. complete(ascb->completion);
  75. asd_ascb_free(ascb);
  76. }
  77. static void asd_clear_nexus_timedout(unsigned long data)
  78. {
  79. struct asd_ascb *ascb = (void *)data;
  80. struct tasklet_completion_status *tcs = ascb->uldd_task;
  81. ASD_DPRINTK("%s: here\n", __func__);
  82. tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
  83. complete(ascb->completion);
  84. }
  85. #define CLEAR_NEXUS_PRE \
  86. struct asd_ascb *ascb; \
  87. struct scb *scb; \
  88. int res; \
  89. DECLARE_COMPLETION_ONSTACK(completion); \
  90. DECLARE_TCS(tcs); \
  91. \
  92. ASD_DPRINTK("%s: PRE\n", __func__); \
  93. res = 1; \
  94. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
  95. if (!ascb) \
  96. return -ENOMEM; \
  97. \
  98. ascb->completion = &completion; \
  99. ascb->uldd_task = &tcs; \
  100. scb = ascb->scb; \
  101. scb->header.opcode = CLEAR_NEXUS
  102. #define CLEAR_NEXUS_POST \
  103. ASD_DPRINTK("%s: POST\n", __func__); \
  104. res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
  105. asd_clear_nexus_timedout); \
  106. if (res) \
  107. goto out_err; \
  108. ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
  109. wait_for_completion(&completion); \
  110. res = tcs.dl_opcode; \
  111. if (res == TC_NO_ERROR) \
  112. res = TMF_RESP_FUNC_COMPLETE; \
  113. return res; \
  114. out_err: \
  115. asd_ascb_free(ascb); \
  116. return res
  117. int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
  118. {
  119. struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
  120. CLEAR_NEXUS_PRE;
  121. scb->clear_nexus.nexus = NEXUS_ADAPTER;
  122. CLEAR_NEXUS_POST;
  123. }
  124. int asd_clear_nexus_port(struct asd_sas_port *port)
  125. {
  126. struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
  127. CLEAR_NEXUS_PRE;
  128. scb->clear_nexus.nexus = NEXUS_PORT;
  129. scb->clear_nexus.conn_mask = port->phy_mask;
  130. CLEAR_NEXUS_POST;
  131. }
  132. enum clear_nexus_phase {
  133. NEXUS_PHASE_PRE,
  134. NEXUS_PHASE_POST,
  135. NEXUS_PHASE_RESUME,
  136. };
  137. static int asd_clear_nexus_I_T(struct domain_device *dev,
  138. enum clear_nexus_phase phase)
  139. {
  140. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  141. CLEAR_NEXUS_PRE;
  142. scb->clear_nexus.nexus = NEXUS_I_T;
  143. switch (phase) {
  144. case NEXUS_PHASE_PRE:
  145. scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
  146. break;
  147. case NEXUS_PHASE_POST:
  148. scb->clear_nexus.flags = SEND_Q | NOTINQ;
  149. break;
  150. case NEXUS_PHASE_RESUME:
  151. scb->clear_nexus.flags = RESUME_TX;
  152. }
  153. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  154. dev->lldd_dev);
  155. CLEAR_NEXUS_POST;
  156. }
  157. int asd_I_T_nexus_reset(struct domain_device *dev)
  158. {
  159. int res, tmp_res, i;
  160. struct sas_phy *phy = sas_find_local_phy(dev);
  161. /* Standard mandates link reset for ATA (type 0) and
  162. * hard reset for SSP (type 1) */
  163. int reset_type = (dev->dev_type == SATA_DEV ||
  164. (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
  165. asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
  166. /* send a hard reset */
  167. ASD_DPRINTK("sending %s reset to %s\n",
  168. reset_type ? "hard" : "soft", dev_name(&phy->dev));
  169. res = sas_phy_reset(phy, reset_type);
  170. if (res == TMF_RESP_FUNC_COMPLETE) {
  171. /* wait for the maximum settle time */
  172. msleep(500);
  173. /* clear all outstanding commands (keep nexus suspended) */
  174. asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
  175. }
  176. for (i = 0 ; i < 3; i++) {
  177. tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
  178. if (tmp_res == TC_RESUME)
  179. return res;
  180. msleep(500);
  181. }
  182. /* This is a bit of a problem: the sequencer is still suspended
  183. * and is refusing to resume. Hope it will resume on a bigger hammer
  184. * or the disk is lost */
  185. dev_printk(KERN_ERR, &phy->dev,
  186. "Failed to resume nexus after reset 0x%x\n", tmp_res);
  187. return TMF_RESP_FUNC_FAILED;
  188. }
  189. static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
  190. {
  191. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  192. CLEAR_NEXUS_PRE;
  193. scb->clear_nexus.nexus = NEXUS_I_T_L;
  194. scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
  195. memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
  196. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  197. dev->lldd_dev);
  198. CLEAR_NEXUS_POST;
  199. }
  200. static int asd_clear_nexus_tag(struct sas_task *task)
  201. {
  202. struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
  203. struct asd_ascb *tascb = task->lldd_task;
  204. CLEAR_NEXUS_PRE;
  205. scb->clear_nexus.nexus = NEXUS_TAG;
  206. memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
  207. scb->clear_nexus.ssp_task.tag = tascb->tag;
  208. if (task->dev->tproto)
  209. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  210. task->dev->lldd_dev);
  211. CLEAR_NEXUS_POST;
  212. }
  213. static int asd_clear_nexus_index(struct sas_task *task)
  214. {
  215. struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
  216. struct asd_ascb *tascb = task->lldd_task;
  217. CLEAR_NEXUS_PRE;
  218. scb->clear_nexus.nexus = NEXUS_TRANS_CX;
  219. if (task->dev->tproto)
  220. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  221. task->dev->lldd_dev);
  222. scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
  223. CLEAR_NEXUS_POST;
  224. }
  225. /* ---------- TMFs ---------- */
  226. static void asd_tmf_timedout(unsigned long data)
  227. {
  228. struct asd_ascb *ascb = (void *) data;
  229. struct tasklet_completion_status *tcs = ascb->uldd_task;
  230. ASD_DPRINTK("tmf timed out\n");
  231. tcs->tmf_state = TMF_RESP_FUNC_FAILED;
  232. complete(ascb->completion);
  233. }
  234. static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
  235. struct done_list_struct *dl)
  236. {
  237. struct asd_ha_struct *asd_ha = ascb->ha;
  238. unsigned long flags;
  239. struct tc_resp_sb_struct {
  240. __le16 index_escb;
  241. u8 len_lsb;
  242. u8 flags;
  243. } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
  244. int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
  245. struct asd_ascb *escb;
  246. struct asd_dma_tok *edb;
  247. struct ssp_frame_hdr *fh;
  248. struct ssp_response_iu *ru;
  249. int res = TMF_RESP_FUNC_FAILED;
  250. ASD_DPRINTK("tmf resp tasklet\n");
  251. spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
  252. escb = asd_tc_index_find(&asd_ha->seq,
  253. (int)le16_to_cpu(resp_sb->index_escb));
  254. spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
  255. if (!escb) {
  256. ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
  257. return res;
  258. }
  259. edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
  260. ascb->tag = *(__be16 *)(edb->vaddr+4);
  261. fh = edb->vaddr + 16;
  262. ru = edb->vaddr + 16 + sizeof(*fh);
  263. res = ru->status;
  264. if (ru->datapres == 1) /* Response data present */
  265. res = ru->resp_data[3];
  266. #if 0
  267. ascb->tag = fh->tag;
  268. #endif
  269. ascb->tag_valid = 1;
  270. asd_invalidate_edb(escb, edb_id);
  271. return res;
  272. }
  273. static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
  274. struct done_list_struct *dl)
  275. {
  276. struct tasklet_completion_status *tcs;
  277. if (!del_timer(&ascb->timer))
  278. return;
  279. tcs = ascb->uldd_task;
  280. ASD_DPRINTK("tmf tasklet complete\n");
  281. tcs->dl_opcode = dl->opcode;
  282. if (dl->opcode == TC_SSP_RESP) {
  283. tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
  284. tcs->tag_valid = ascb->tag_valid;
  285. tcs->tag = ascb->tag;
  286. }
  287. complete(ascb->completion);
  288. asd_ascb_free(ascb);
  289. }
  290. static int asd_clear_nexus(struct sas_task *task)
  291. {
  292. int res = TMF_RESP_FUNC_FAILED;
  293. int leftover;
  294. struct asd_ascb *tascb = task->lldd_task;
  295. DECLARE_COMPLETION_ONSTACK(completion);
  296. unsigned long flags;
  297. tascb->completion = &completion;
  298. ASD_DPRINTK("task not done, clearing nexus\n");
  299. if (tascb->tag_valid)
  300. res = asd_clear_nexus_tag(task);
  301. else
  302. res = asd_clear_nexus_index(task);
  303. leftover = wait_for_completion_timeout(&completion,
  304. AIC94XX_SCB_TIMEOUT);
  305. tascb->completion = NULL;
  306. ASD_DPRINTK("came back from clear nexus\n");
  307. spin_lock_irqsave(&task->task_state_lock, flags);
  308. if (leftover < 1)
  309. res = TMF_RESP_FUNC_FAILED;
  310. if (task->task_state_flags & SAS_TASK_STATE_DONE)
  311. res = TMF_RESP_FUNC_COMPLETE;
  312. spin_unlock_irqrestore(&task->task_state_lock, flags);
  313. return res;
  314. }
  315. /**
  316. * asd_abort_task -- ABORT TASK TMF
  317. * @task: the task to be aborted
  318. *
  319. * Before calling ABORT TASK the task state flags should be ORed with
  320. * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
  321. * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
  322. *
  323. * Implements the ABORT TASK TMF, I_T_L_Q nexus.
  324. * Returns: SAS TMF responses (see sas_task.h),
  325. * -ENOMEM,
  326. * -SAS_QUEUE_FULL.
  327. *
  328. * When ABORT TASK returns, the caller of ABORT TASK checks first the
  329. * task->task_state_flags, and then the return value of ABORT TASK.
  330. *
  331. * If the task has task state bit SAS_TASK_STATE_DONE set, then the
  332. * task was completed successfully prior to it being aborted. The
  333. * caller of ABORT TASK has responsibility to call task->task_done()
  334. * xor free the task, depending on their framework. The return code
  335. * is TMF_RESP_FUNC_FAILED in this case.
  336. *
  337. * Else the SAS_TASK_STATE_DONE bit is not set,
  338. * If the return code is TMF_RESP_FUNC_COMPLETE, then
  339. * the task was aborted successfully. The caller of
  340. * ABORT TASK has responsibility to call task->task_done()
  341. * to finish the task, xor free the task depending on their
  342. * framework.
  343. * else
  344. * the ABORT TASK returned some kind of error. The task
  345. * was _not_ cancelled. Nothing can be assumed.
  346. * The caller of ABORT TASK may wish to retry.
  347. */
  348. int asd_abort_task(struct sas_task *task)
  349. {
  350. struct asd_ascb *tascb = task->lldd_task;
  351. struct asd_ha_struct *asd_ha = tascb->ha;
  352. int res = 1;
  353. unsigned long flags;
  354. struct asd_ascb *ascb = NULL;
  355. struct scb *scb;
  356. int leftover;
  357. DECLARE_TCS(tcs);
  358. DECLARE_COMPLETION_ONSTACK(completion);
  359. DECLARE_COMPLETION_ONSTACK(tascb_completion);
  360. tascb->completion = &tascb_completion;
  361. spin_lock_irqsave(&task->task_state_lock, flags);
  362. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  363. spin_unlock_irqrestore(&task->task_state_lock, flags);
  364. res = TMF_RESP_FUNC_COMPLETE;
  365. ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
  366. goto out_done;
  367. }
  368. spin_unlock_irqrestore(&task->task_state_lock, flags);
  369. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
  370. if (!ascb)
  371. return -ENOMEM;
  372. ascb->uldd_task = &tcs;
  373. ascb->completion = &completion;
  374. scb = ascb->scb;
  375. scb->header.opcode = SCB_ABORT_TASK;
  376. switch (task->task_proto) {
  377. case SAS_PROTOCOL_SATA:
  378. case SAS_PROTOCOL_STP:
  379. scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
  380. break;
  381. case SAS_PROTOCOL_SSP:
  382. scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
  383. scb->abort_task.proto_conn_rate |= task->dev->linkrate;
  384. break;
  385. case SAS_PROTOCOL_SMP:
  386. break;
  387. default:
  388. break;
  389. }
  390. if (task->task_proto == SAS_PROTOCOL_SSP) {
  391. scb->abort_task.ssp_frame.frame_type = SSP_TASK;
  392. memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
  393. task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  394. memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
  395. task->dev->port->ha->hashed_sas_addr,
  396. HASHED_SAS_ADDR_SIZE);
  397. scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
  398. memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
  399. scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
  400. scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
  401. }
  402. scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
  403. scb->abort_task.conn_handle = cpu_to_le16(
  404. (u16)(unsigned long)task->dev->lldd_dev);
  405. scb->abort_task.retry_count = 1;
  406. scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
  407. scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
  408. res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
  409. asd_tmf_timedout);
  410. if (res)
  411. goto out_free;
  412. wait_for_completion(&completion);
  413. ASD_DPRINTK("tmf came back\n");
  414. tascb->tag = tcs.tag;
  415. tascb->tag_valid = tcs.tag_valid;
  416. spin_lock_irqsave(&task->task_state_lock, flags);
  417. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  418. spin_unlock_irqrestore(&task->task_state_lock, flags);
  419. res = TMF_RESP_FUNC_COMPLETE;
  420. ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
  421. goto out_done;
  422. }
  423. spin_unlock_irqrestore(&task->task_state_lock, flags);
  424. if (tcs.dl_opcode == TC_SSP_RESP) {
  425. /* The task to be aborted has been sent to the device.
  426. * We got a Response IU for the ABORT TASK TMF. */
  427. if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
  428. res = asd_clear_nexus(task);
  429. else
  430. res = tcs.tmf_state;
  431. } else if (tcs.dl_opcode == TC_NO_ERROR &&
  432. tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
  433. /* timeout */
  434. res = TMF_RESP_FUNC_FAILED;
  435. } else {
  436. /* In the following we assume that the managing layer
  437. * will _never_ make a mistake, when issuing ABORT
  438. * TASK.
  439. */
  440. switch (tcs.dl_opcode) {
  441. default:
  442. res = asd_clear_nexus(task);
  443. /* fallthrough */
  444. case TC_NO_ERROR:
  445. break;
  446. /* The task hasn't been sent to the device xor
  447. * we never got a (sane) Response IU for the
  448. * ABORT TASK TMF.
  449. */
  450. case TF_NAK_RECV:
  451. res = TMF_RESP_INVALID_FRAME;
  452. break;
  453. case TF_TMF_TASK_DONE: /* done but not reported yet */
  454. res = TMF_RESP_FUNC_FAILED;
  455. leftover =
  456. wait_for_completion_timeout(&tascb_completion,
  457. AIC94XX_SCB_TIMEOUT);
  458. spin_lock_irqsave(&task->task_state_lock, flags);
  459. if (leftover < 1)
  460. res = TMF_RESP_FUNC_FAILED;
  461. if (task->task_state_flags & SAS_TASK_STATE_DONE)
  462. res = TMF_RESP_FUNC_COMPLETE;
  463. spin_unlock_irqrestore(&task->task_state_lock, flags);
  464. break;
  465. case TF_TMF_NO_TAG:
  466. case TF_TMF_TAG_FREE: /* the tag is in the free list */
  467. case TF_TMF_NO_CONN_HANDLE: /* no such device */
  468. res = TMF_RESP_FUNC_COMPLETE;
  469. break;
  470. case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
  471. res = TMF_RESP_FUNC_ESUPP;
  472. break;
  473. }
  474. }
  475. out_done:
  476. tascb->completion = NULL;
  477. if (res == TMF_RESP_FUNC_COMPLETE) {
  478. task->lldd_task = NULL;
  479. mb();
  480. asd_ascb_free(tascb);
  481. }
  482. ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
  483. return res;
  484. out_free:
  485. asd_ascb_free(ascb);
  486. ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
  487. return res;
  488. }
  489. /**
  490. * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
  491. * @dev: pointer to struct domain_device of interest
  492. * @lun: pointer to u8[8] which is the LUN
  493. * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
  494. * @index: the transaction context of the task to be queried if QT TMF
  495. *
  496. * This function is used to send ABORT TASK SET, CLEAR ACA,
  497. * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
  498. *
  499. * No SCBs should be queued to the I_T_L nexus when this SCB is
  500. * pending.
  501. *
  502. * Returns: TMF response code (see sas_task.h or the SAS spec)
  503. */
  504. static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
  505. int tmf, int index)
  506. {
  507. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  508. struct asd_ascb *ascb;
  509. int res = 1;
  510. struct scb *scb;
  511. DECLARE_COMPLETION_ONSTACK(completion);
  512. DECLARE_TCS(tcs);
  513. if (!(dev->tproto & SAS_PROTOCOL_SSP))
  514. return TMF_RESP_FUNC_ESUPP;
  515. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
  516. if (!ascb)
  517. return -ENOMEM;
  518. ascb->completion = &completion;
  519. ascb->uldd_task = &tcs;
  520. scb = ascb->scb;
  521. if (tmf == TMF_QUERY_TASK)
  522. scb->header.opcode = QUERY_SSP_TASK;
  523. else
  524. scb->header.opcode = INITIATE_SSP_TMF;
  525. scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
  526. scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
  527. /* SSP frame header */
  528. scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
  529. memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
  530. dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  531. memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
  532. dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  533. scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
  534. /* SSP Task IU */
  535. memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
  536. scb->ssp_tmf.ssp_task.tmf = tmf;
  537. scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
  538. scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
  539. dev->lldd_dev);
  540. scb->ssp_tmf.retry_count = 1;
  541. scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
  542. if (tmf == TMF_QUERY_TASK)
  543. scb->ssp_tmf.index = cpu_to_le16(index);
  544. res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
  545. asd_tmf_timedout);
  546. if (res)
  547. goto out_err;
  548. wait_for_completion(&completion);
  549. switch (tcs.dl_opcode) {
  550. case TC_NO_ERROR:
  551. res = TMF_RESP_FUNC_COMPLETE;
  552. break;
  553. case TF_NAK_RECV:
  554. res = TMF_RESP_INVALID_FRAME;
  555. break;
  556. case TF_TMF_TASK_DONE:
  557. res = TMF_RESP_FUNC_FAILED;
  558. break;
  559. case TF_TMF_NO_TAG:
  560. case TF_TMF_TAG_FREE: /* the tag is in the free list */
  561. case TF_TMF_NO_CONN_HANDLE: /* no such device */
  562. res = TMF_RESP_FUNC_COMPLETE;
  563. break;
  564. case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
  565. res = TMF_RESP_FUNC_ESUPP;
  566. break;
  567. default:
  568. /* Allow TMF response codes to propagate upwards */
  569. res = tcs.dl_opcode;
  570. break;
  571. }
  572. return res;
  573. out_err:
  574. asd_ascb_free(ascb);
  575. return res;
  576. }
  577. int asd_abort_task_set(struct domain_device *dev, u8 *lun)
  578. {
  579. int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
  580. if (res == TMF_RESP_FUNC_COMPLETE)
  581. asd_clear_nexus_I_T_L(dev, lun);
  582. return res;
  583. }
  584. int asd_clear_aca(struct domain_device *dev, u8 *lun)
  585. {
  586. int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
  587. if (res == TMF_RESP_FUNC_COMPLETE)
  588. asd_clear_nexus_I_T_L(dev, lun);
  589. return res;
  590. }
  591. int asd_clear_task_set(struct domain_device *dev, u8 *lun)
  592. {
  593. int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
  594. if (res == TMF_RESP_FUNC_COMPLETE)
  595. asd_clear_nexus_I_T_L(dev, lun);
  596. return res;
  597. }
  598. int asd_lu_reset(struct domain_device *dev, u8 *lun)
  599. {
  600. int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
  601. if (res == TMF_RESP_FUNC_COMPLETE)
  602. asd_clear_nexus_I_T_L(dev, lun);
  603. return res;
  604. }
  605. /**
  606. * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
  607. * task: pointer to sas_task struct of interest
  608. *
  609. * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
  610. * or TMF_RESP_FUNC_SUCC if the task is in the task set.
  611. *
  612. * Normally the management layer sets the task to aborted state,
  613. * and then calls query task and then abort task.
  614. */
  615. int asd_query_task(struct sas_task *task)
  616. {
  617. struct asd_ascb *ascb = task->lldd_task;
  618. int index;
  619. if (ascb) {
  620. index = ascb->tc_index;
  621. return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
  622. TMF_QUERY_TASK, index);
  623. }
  624. return TMF_RESP_FUNC_COMPLETE;
  625. }