qedi_fw_api.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. /* QLogic iSCSI Offload Driver
  2. * Copyright (c) 2016 Cavium Inc.
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #include "qedi_hsi.h"
  11. #include <linux/qed/qed_if.h>
  12. #include "qedi_fw_iscsi.h"
  13. #include "qedi_fw_scsi.h"
  14. #define SCSI_NUM_SGES_IN_CACHE 0x4
  15. static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
  16. {
  17. return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
  18. }
  19. static
  20. void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
  21. struct scsi_cached_sges *ctx_data_desc,
  22. struct scsi_sgl_task_params *sgl_task_params)
  23. {
  24. u8 sge_index;
  25. u8 num_sges;
  26. u32 val;
  27. num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
  28. SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
  29. /* sgl params */
  30. val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
  31. ctx_sgl_params->sgl_addr.lo = val;
  32. val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
  33. ctx_sgl_params->sgl_addr.hi = val;
  34. val = cpu_to_le32(sgl_task_params->total_buffer_size);
  35. ctx_sgl_params->sgl_total_length = val;
  36. ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
  37. for (sge_index = 0; sge_index < num_sges; sge_index++) {
  38. val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
  39. ctx_data_desc->sge[sge_index].sge_addr.lo = val;
  40. val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
  41. ctx_data_desc->sge[sge_index].sge_addr.hi = val;
  42. val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
  43. ctx_data_desc->sge[sge_index].sge_len = val;
  44. }
  45. }
  46. static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
  47. enum iscsi_task_type task_type,
  48. struct scsi_sgl_task_params *sgl_task_params,
  49. struct scsi_dif_task_params *dif_task_params)
  50. {
  51. u32 io_size;
  52. if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
  53. task_type == ISCSI_TASK_TYPE_TARGET_READ)
  54. io_size = task_params->tx_io_size;
  55. else
  56. io_size = task_params->rx_io_size;
  57. if (!io_size)
  58. return 0;
  59. if (!dif_task_params)
  60. return io_size;
  61. return !dif_task_params->dif_on_network ?
  62. io_size : sgl_task_params->total_buffer_size;
  63. }
  64. static void
  65. init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
  66. struct scsi_dif_task_params *dif_task_params)
  67. {
  68. if (!dif_task_params)
  69. return;
  70. SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
  71. dif_task_params->dif_block_size_log);
  72. SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
  73. dif_task_params->dif_on_network ? 1 : 0);
  74. SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
  75. dif_task_params->dif_on_host ? 1 : 0);
  76. }
  77. static void init_sqe(struct iscsi_task_params *task_params,
  78. struct scsi_sgl_task_params *sgl_task_params,
  79. struct scsi_dif_task_params *dif_task_params,
  80. struct iscsi_common_hdr *pdu_header,
  81. struct scsi_initiator_cmd_params *cmd_params,
  82. enum iscsi_task_type task_type,
  83. bool is_cleanup)
  84. {
  85. if (!task_params->sqe)
  86. return;
  87. memset(task_params->sqe, 0, sizeof(*task_params->sqe));
  88. task_params->sqe->task_id = cpu_to_le16(task_params->itid);
  89. if (is_cleanup) {
  90. SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
  91. ISCSI_WQE_TYPE_TASK_CLEANUP);
  92. return;
  93. }
  94. switch (task_type) {
  95. case ISCSI_TASK_TYPE_INITIATOR_WRITE:
  96. {
  97. u32 buf_size = 0;
  98. u32 num_sges = 0;
  99. init_dif_context_flags(&task_params->sqe->prot_flags,
  100. dif_task_params);
  101. SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
  102. ISCSI_WQE_TYPE_NORMAL);
  103. if (task_params->tx_io_size) {
  104. buf_size = calc_rw_task_size(task_params, task_type,
  105. sgl_task_params,
  106. dif_task_params);
  107. if (scsi_is_slow_sgl(sgl_task_params->num_sges,
  108. sgl_task_params->small_mid_sge))
  109. num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
  110. else
  111. num_sges = min(sgl_task_params->num_sges,
  112. (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
  113. }
  114. SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
  115. num_sges);
  116. SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
  117. buf_size);
  118. if (GET_FIELD(pdu_header->hdr_second_dword,
  119. ISCSI_CMD_HDR_TOTAL_AHS_LEN))
  120. SET_FIELD(task_params->sqe->contlen_cdbsize,
  121. ISCSI_WQE_CDB_SIZE,
  122. cmd_params->extended_cdb_sge.sge_len);
  123. }
  124. break;
  125. case ISCSI_TASK_TYPE_INITIATOR_READ:
  126. SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
  127. ISCSI_WQE_TYPE_NORMAL);
  128. if (GET_FIELD(pdu_header->hdr_second_dword,
  129. ISCSI_CMD_HDR_TOTAL_AHS_LEN))
  130. SET_FIELD(task_params->sqe->contlen_cdbsize,
  131. ISCSI_WQE_CDB_SIZE,
  132. cmd_params->extended_cdb_sge.sge_len);
  133. break;
  134. case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
  135. case ISCSI_TASK_TYPE_MIDPATH:
  136. {
  137. bool advance_statsn = true;
  138. if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
  139. SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
  140. ISCSI_WQE_TYPE_LOGIN);
  141. else
  142. SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
  143. ISCSI_WQE_TYPE_MIDDLE_PATH);
  144. if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
  145. u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
  146. ISCSI_COMMON_HDR_OPCODE);
  147. if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
  148. (opcode != ISCSI_OPCODE_NOP_IN ||
  149. pdu_header->itt == ISCSI_TTT_ALL_ONES))
  150. advance_statsn = false;
  151. }
  152. SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
  153. advance_statsn ? 1 : 0);
  154. if (task_params->tx_io_size) {
  155. SET_FIELD(task_params->sqe->contlen_cdbsize,
  156. ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
  157. if (scsi_is_slow_sgl(sgl_task_params->num_sges,
  158. sgl_task_params->small_mid_sge))
  159. SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
  160. ISCSI_WQE_NUM_SGES_SLOWIO);
  161. else
  162. SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
  163. min(sgl_task_params->num_sges,
  164. (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
  165. }
  166. }
  167. break;
  168. default:
  169. break;
  170. }
  171. }
  172. static void init_default_iscsi_task(struct iscsi_task_params *task_params,
  173. struct data_hdr *pdu_header,
  174. enum iscsi_task_type task_type)
  175. {
  176. struct e4_iscsi_task_context *context;
  177. u32 val;
  178. u16 index;
  179. u8 val_byte;
  180. context = task_params->context;
  181. val_byte = context->mstorm_ag_context.cdu_validation;
  182. memset(context, 0, sizeof(*context));
  183. context->mstorm_ag_context.cdu_validation = val_byte;
  184. for (index = 0; index <
  185. ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
  186. index++) {
  187. val = cpu_to_le32(pdu_header->data[index]);
  188. context->ystorm_st_context.pdu_hdr.data.data[index] = val;
  189. }
  190. context->mstorm_st_context.task_type = task_type;
  191. context->mstorm_ag_context.task_cid =
  192. cpu_to_le16(task_params->conn_icid);
  193. SET_FIELD(context->ustorm_ag_context.flags1,
  194. E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
  195. context->ustorm_st_context.task_type = task_type;
  196. context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
  197. context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
  198. }
  199. static
  200. void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
  201. struct scsi_initiator_cmd_params *cmd)
  202. {
  203. union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
  204. u32 val;
  205. if (!cmd->extended_cdb_sge.sge_len)
  206. return;
  207. SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
  208. ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
  209. cmd->extended_cdb_sge.sge_len);
  210. val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
  211. ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
  212. val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
  213. ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
  214. val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
  215. ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val;
  216. }
  217. static
  218. void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
  219. struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
  220. u32 remaining_recv_len, u32 expected_data_transfer_len,
  221. u8 num_sges, bool tx_dif_conn_err_en)
  222. {
  223. u32 val;
  224. ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
  225. ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
  226. val = cpu_to_le32(expected_data_transfer_len);
  227. ustorm_st_cxt->exp_data_transfer_len = val;
  228. SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
  229. SET_FIELD(ustorm_ag_cxt->flags2,
  230. E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
  231. tx_dif_conn_err_en ? 1 : 0);
  232. }
  233. static
  234. void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
  235. struct iscsi_conn_params *conn_params,
  236. enum iscsi_task_type task_type,
  237. u32 task_size,
  238. u32 exp_data_transfer_len,
  239. u8 total_ahs_length)
  240. {
  241. u32 max_unsolicited_data = 0, val;
  242. if (total_ahs_length &&
  243. (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
  244. task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
  245. SET_FIELD(context->ustorm_st_context.flags2,
  246. USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
  247. switch (task_type) {
  248. case ISCSI_TASK_TYPE_INITIATOR_WRITE:
  249. if (!conn_params->initial_r2t)
  250. max_unsolicited_data = conn_params->first_burst_length;
  251. else if (conn_params->immediate_data)
  252. max_unsolicited_data =
  253. min(conn_params->first_burst_length,
  254. conn_params->max_send_pdu_length);
  255. context->ustorm_ag_context.exp_data_acked =
  256. cpu_to_le32(total_ahs_length == 0 ?
  257. min(exp_data_transfer_len,
  258. max_unsolicited_data) :
  259. ((u32)(total_ahs_length +
  260. ISCSI_AHS_CNTL_SIZE)));
  261. break;
  262. case ISCSI_TASK_TYPE_TARGET_READ:
  263. val = cpu_to_le32(exp_data_transfer_len);
  264. context->ustorm_ag_context.exp_data_acked = val;
  265. break;
  266. case ISCSI_TASK_TYPE_INITIATOR_READ:
  267. context->ustorm_ag_context.exp_data_acked =
  268. cpu_to_le32((total_ahs_length == 0 ? 0 :
  269. total_ahs_length +
  270. ISCSI_AHS_CNTL_SIZE));
  271. break;
  272. case ISCSI_TASK_TYPE_TARGET_WRITE:
  273. val = cpu_to_le32(task_size);
  274. context->ustorm_ag_context.exp_cont_len = val;
  275. break;
  276. default:
  277. break;
  278. }
  279. }
  280. static
  281. void init_rtdif_task_context(struct rdif_task_context *rdif_context,
  282. struct tdif_task_context *tdif_context,
  283. struct scsi_dif_task_params *dif_task_params,
  284. enum iscsi_task_type task_type)
  285. {
  286. u32 val;
  287. if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
  288. return;
  289. if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
  290. task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
  291. rdif_context->app_tag_value =
  292. cpu_to_le16(dif_task_params->application_tag);
  293. rdif_context->partial_crc_value = cpu_to_le16(0xffff);
  294. val = cpu_to_le32(dif_task_params->initial_ref_tag);
  295. rdif_context->initial_ref_tag = val;
  296. rdif_context->app_tag_mask =
  297. cpu_to_le16(dif_task_params->application_tag_mask);
  298. SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
  299. dif_task_params->crc_seed ? 1 : 0);
  300. SET_FIELD(rdif_context->flags0,
  301. RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
  302. dif_task_params->host_guard_type);
  303. SET_FIELD(rdif_context->flags0,
  304. RDIF_TASK_CONTEXT_PROTECTION_TYPE,
  305. dif_task_params->protection_type);
  306. SET_FIELD(rdif_context->flags0,
  307. RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
  308. SET_FIELD(rdif_context->flags0,
  309. RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
  310. dif_task_params->keep_ref_tag_const ? 1 : 0);
  311. SET_FIELD(rdif_context->flags1,
  312. RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
  313. (dif_task_params->validate_app_tag &&
  314. dif_task_params->dif_on_network) ? 1 : 0);
  315. SET_FIELD(rdif_context->flags1,
  316. RDIF_TASK_CONTEXT_VALIDATE_GUARD,
  317. (dif_task_params->validate_guard &&
  318. dif_task_params->dif_on_network) ? 1 : 0);
  319. SET_FIELD(rdif_context->flags1,
  320. RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
  321. (dif_task_params->validate_ref_tag &&
  322. dif_task_params->dif_on_network) ? 1 : 0);
  323. SET_FIELD(rdif_context->flags1,
  324. RDIF_TASK_CONTEXT_HOST_INTERFACE,
  325. dif_task_params->dif_on_host ? 1 : 0);
  326. SET_FIELD(rdif_context->flags1,
  327. RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
  328. dif_task_params->dif_on_network ? 1 : 0);
  329. SET_FIELD(rdif_context->flags1,
  330. RDIF_TASK_CONTEXT_FORWARD_GUARD,
  331. dif_task_params->forward_guard ? 1 : 0);
  332. SET_FIELD(rdif_context->flags1,
  333. RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
  334. dif_task_params->forward_app_tag ? 1 : 0);
  335. SET_FIELD(rdif_context->flags1,
  336. RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
  337. dif_task_params->forward_ref_tag ? 1 : 0);
  338. SET_FIELD(rdif_context->flags1,
  339. RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
  340. dif_task_params->forward_app_tag_with_mask ? 1 : 0);
  341. SET_FIELD(rdif_context->flags1,
  342. RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
  343. dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
  344. SET_FIELD(rdif_context->flags1,
  345. RDIF_TASK_CONTEXT_INTERVAL_SIZE,
  346. dif_task_params->dif_block_size_log - 9);
  347. SET_FIELD(rdif_context->state,
  348. RDIF_TASK_CONTEXT_REF_TAG_MASK,
  349. dif_task_params->ref_tag_mask);
  350. SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
  351. dif_task_params->ignore_app_tag);
  352. }
  353. if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
  354. task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
  355. tdif_context->app_tag_value =
  356. cpu_to_le16(dif_task_params->application_tag);
  357. tdif_context->partial_crc_value_b =
  358. cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
  359. tdif_context->partial_crc_value_a =
  360. cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
  361. SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
  362. dif_task_params->crc_seed ? 1 : 0);
  363. SET_FIELD(tdif_context->flags0,
  364. TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
  365. dif_task_params->tx_dif_conn_err_en ? 1 : 0);
  366. SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
  367. dif_task_params->forward_guard ? 1 : 0);
  368. SET_FIELD(tdif_context->flags1,
  369. TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
  370. dif_task_params->forward_app_tag ? 1 : 0);
  371. SET_FIELD(tdif_context->flags1,
  372. TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
  373. dif_task_params->forward_ref_tag ? 1 : 0);
  374. SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
  375. dif_task_params->dif_block_size_log - 9);
  376. SET_FIELD(tdif_context->flags1,
  377. TDIF_TASK_CONTEXT_HOST_INTERFACE,
  378. dif_task_params->dif_on_host ? 1 : 0);
  379. SET_FIELD(tdif_context->flags1,
  380. TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
  381. dif_task_params->dif_on_network ? 1 : 0);
  382. val = cpu_to_le32(dif_task_params->initial_ref_tag);
  383. tdif_context->initial_ref_tag = val;
  384. tdif_context->app_tag_mask =
  385. cpu_to_le16(dif_task_params->application_tag_mask);
  386. SET_FIELD(tdif_context->flags0,
  387. TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
  388. dif_task_params->host_guard_type);
  389. SET_FIELD(tdif_context->flags0,
  390. TDIF_TASK_CONTEXT_PROTECTION_TYPE,
  391. dif_task_params->protection_type);
  392. SET_FIELD(tdif_context->flags0,
  393. TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
  394. dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
  395. SET_FIELD(tdif_context->flags0,
  396. TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
  397. dif_task_params->keep_ref_tag_const ? 1 : 0);
  398. SET_FIELD(tdif_context->flags1,
  399. TDIF_TASK_CONTEXT_VALIDATE_GUARD,
  400. (dif_task_params->validate_guard &&
  401. dif_task_params->dif_on_host) ? 1 : 0);
  402. SET_FIELD(tdif_context->flags1,
  403. TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
  404. (dif_task_params->validate_app_tag &&
  405. dif_task_params->dif_on_host) ? 1 : 0);
  406. SET_FIELD(tdif_context->flags1,
  407. TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
  408. (dif_task_params->validate_ref_tag &&
  409. dif_task_params->dif_on_host) ? 1 : 0);
  410. SET_FIELD(tdif_context->flags1,
  411. TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
  412. dif_task_params->forward_app_tag_with_mask ? 1 : 0);
  413. SET_FIELD(tdif_context->flags1,
  414. TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
  415. dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
  416. SET_FIELD(tdif_context->flags1,
  417. TDIF_TASK_CONTEXT_REF_TAG_MASK,
  418. dif_task_params->ref_tag_mask);
  419. SET_FIELD(tdif_context->flags0,
  420. TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
  421. dif_task_params->ignore_app_tag ? 1 : 0);
  422. }
  423. }
  424. static void set_local_completion_context(struct e4_iscsi_task_context *context)
  425. {
  426. SET_FIELD(context->ystorm_st_context.state.flags,
  427. YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
  428. SET_FIELD(context->ustorm_st_context.flags,
  429. USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
  430. }
  431. static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
  432. enum iscsi_task_type task_type,
  433. struct iscsi_conn_params *conn_params,
  434. struct iscsi_common_hdr *pdu_header,
  435. struct scsi_sgl_task_params *sgl_task_params,
  436. struct scsi_initiator_cmd_params *cmd_params,
  437. struct scsi_dif_task_params *dif_task_params)
  438. {
  439. u32 exp_data_transfer_len = conn_params->max_burst_length;
  440. struct e4_iscsi_task_context *cxt;
  441. bool slow_io = false;
  442. u32 task_size, val;
  443. u8 num_sges = 0;
  444. task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
  445. dif_task_params);
  446. init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
  447. task_type);
  448. cxt = task_params->context;
  449. if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
  450. set_local_completion_context(cxt);
  451. } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
  452. val = cpu_to_le32(task_size +
  453. ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
  454. cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
  455. cxt->mstorm_st_context.expected_itt =
  456. cpu_to_le32(pdu_header->itt);
  457. } else {
  458. val = cpu_to_le32(task_size);
  459. cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
  460. val;
  461. init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
  462. cmd_params);
  463. val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
  464. cxt->mstorm_st_context.sense_db.lo = val;
  465. val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
  466. cxt->mstorm_st_context.sense_db.hi = val;
  467. }
  468. if (task_params->tx_io_size) {
  469. init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
  470. dif_task_params);
  471. init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
  472. dif_task_params);
  473. init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
  474. &cxt->ystorm_st_context.state.data_desc,
  475. sgl_task_params);
  476. slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
  477. sgl_task_params->small_mid_sge);
  478. num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
  479. (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
  480. ISCSI_WQE_NUM_SGES_SLOWIO;
  481. if (slow_io) {
  482. SET_FIELD(cxt->ystorm_st_context.state.flags,
  483. YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
  484. }
  485. } else if (task_params->rx_io_size) {
  486. init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
  487. dif_task_params);
  488. init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
  489. &cxt->mstorm_st_context.data_desc,
  490. sgl_task_params);
  491. num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
  492. sgl_task_params->small_mid_sge) ?
  493. min_t(u16, sgl_task_params->num_sges,
  494. (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
  495. ISCSI_WQE_NUM_SGES_SLOWIO;
  496. cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
  497. }
  498. if (exp_data_transfer_len > task_size ||
  499. task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
  500. exp_data_transfer_len = task_size;
  501. init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
  502. &task_params->context->ustorm_ag_context,
  503. task_size, exp_data_transfer_len, num_sges,
  504. dif_task_params ?
  505. dif_task_params->tx_dif_conn_err_en : false);
  506. set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
  507. task_type, task_size,
  508. exp_data_transfer_len,
  509. GET_FIELD(pdu_header->hdr_second_dword,
  510. ISCSI_CMD_HDR_TOTAL_AHS_LEN));
  511. if (dif_task_params)
  512. init_rtdif_task_context(&task_params->context->rdif_context,
  513. &task_params->context->tdif_context,
  514. dif_task_params, task_type);
  515. init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
  516. cmd_params, task_type, false);
  517. return 0;
  518. }
  519. int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
  520. struct iscsi_conn_params *conn_params,
  521. struct scsi_initiator_cmd_params *cmd_params,
  522. struct iscsi_cmd_hdr *cmd_header,
  523. struct scsi_sgl_task_params *tx_sgl_params,
  524. struct scsi_sgl_task_params *rx_sgl_params,
  525. struct scsi_dif_task_params *dif_task_params)
  526. {
  527. if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
  528. return init_rw_iscsi_task(task_params,
  529. ISCSI_TASK_TYPE_INITIATOR_WRITE,
  530. conn_params,
  531. (struct iscsi_common_hdr *)cmd_header,
  532. tx_sgl_params, cmd_params,
  533. dif_task_params);
  534. else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) ||
  535. (task_params->rx_io_size == 0 && task_params->tx_io_size == 0))
  536. return init_rw_iscsi_task(task_params,
  537. ISCSI_TASK_TYPE_INITIATOR_READ,
  538. conn_params,
  539. (struct iscsi_common_hdr *)cmd_header,
  540. rx_sgl_params, cmd_params,
  541. dif_task_params);
  542. else
  543. return -1;
  544. }
  545. int init_initiator_login_request_task(struct iscsi_task_params *task_params,
  546. struct iscsi_login_req_hdr *login_header,
  547. struct scsi_sgl_task_params *tx_params,
  548. struct scsi_sgl_task_params *rx_params)
  549. {
  550. struct e4_iscsi_task_context *cxt;
  551. cxt = task_params->context;
  552. init_default_iscsi_task(task_params,
  553. (struct data_hdr *)login_header,
  554. ISCSI_TASK_TYPE_MIDPATH);
  555. init_ustorm_task_contexts(&cxt->ustorm_st_context,
  556. &cxt->ustorm_ag_context,
  557. task_params->rx_io_size ?
  558. rx_params->total_buffer_size : 0,
  559. task_params->tx_io_size ?
  560. tx_params->total_buffer_size : 0, 0,
  561. 0);
  562. if (task_params->tx_io_size)
  563. init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
  564. &cxt->ystorm_st_context.state.data_desc,
  565. tx_params);
  566. if (task_params->rx_io_size)
  567. init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
  568. &cxt->mstorm_st_context.data_desc,
  569. rx_params);
  570. cxt->mstorm_st_context.rem_task_size =
  571. cpu_to_le32(task_params->rx_io_size ?
  572. rx_params->total_buffer_size : 0);
  573. init_sqe(task_params, tx_params, NULL,
  574. (struct iscsi_common_hdr *)login_header, NULL,
  575. ISCSI_TASK_TYPE_MIDPATH, false);
  576. return 0;
  577. }
  578. int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
  579. struct iscsi_nop_out_hdr *nop_out_pdu_header,
  580. struct scsi_sgl_task_params *tx_sgl_task_params,
  581. struct scsi_sgl_task_params *rx_sgl_task_params)
  582. {
  583. struct e4_iscsi_task_context *cxt;
  584. cxt = task_params->context;
  585. init_default_iscsi_task(task_params,
  586. (struct data_hdr *)nop_out_pdu_header,
  587. ISCSI_TASK_TYPE_MIDPATH);
  588. if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
  589. set_local_completion_context(task_params->context);
  590. if (task_params->tx_io_size)
  591. init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
  592. &cxt->ystorm_st_context.state.data_desc,
  593. tx_sgl_task_params);
  594. if (task_params->rx_io_size)
  595. init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
  596. &cxt->mstorm_st_context.data_desc,
  597. rx_sgl_task_params);
  598. init_ustorm_task_contexts(&cxt->ustorm_st_context,
  599. &cxt->ustorm_ag_context,
  600. task_params->rx_io_size ?
  601. rx_sgl_task_params->total_buffer_size : 0,
  602. task_params->tx_io_size ?
  603. tx_sgl_task_params->total_buffer_size : 0,
  604. 0, 0);
  605. cxt->mstorm_st_context.rem_task_size =
  606. cpu_to_le32(task_params->rx_io_size ?
  607. rx_sgl_task_params->total_buffer_size :
  608. 0);
  609. init_sqe(task_params, tx_sgl_task_params, NULL,
  610. (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
  611. ISCSI_TASK_TYPE_MIDPATH, false);
  612. return 0;
  613. }
  614. int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
  615. struct iscsi_logout_req_hdr *logout_hdr,
  616. struct scsi_sgl_task_params *tx_params,
  617. struct scsi_sgl_task_params *rx_params)
  618. {
  619. struct e4_iscsi_task_context *cxt;
  620. cxt = task_params->context;
  621. init_default_iscsi_task(task_params,
  622. (struct data_hdr *)logout_hdr,
  623. ISCSI_TASK_TYPE_MIDPATH);
  624. if (task_params->tx_io_size)
  625. init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
  626. &cxt->ystorm_st_context.state.data_desc,
  627. tx_params);
  628. if (task_params->rx_io_size)
  629. init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
  630. &cxt->mstorm_st_context.data_desc,
  631. rx_params);
  632. init_ustorm_task_contexts(&cxt->ustorm_st_context,
  633. &cxt->ustorm_ag_context,
  634. task_params->rx_io_size ?
  635. rx_params->total_buffer_size : 0,
  636. task_params->tx_io_size ?
  637. tx_params->total_buffer_size : 0,
  638. 0, 0);
  639. cxt->mstorm_st_context.rem_task_size =
  640. cpu_to_le32(task_params->rx_io_size ?
  641. rx_params->total_buffer_size : 0);
  642. init_sqe(task_params, tx_params, NULL,
  643. (struct iscsi_common_hdr *)logout_hdr, NULL,
  644. ISCSI_TASK_TYPE_MIDPATH, false);
  645. return 0;
  646. }
  647. int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
  648. struct iscsi_tmf_request_hdr *tmf_header)
  649. {
  650. init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
  651. ISCSI_TASK_TYPE_MIDPATH);
  652. init_sqe(task_params, NULL, NULL,
  653. (struct iscsi_common_hdr *)tmf_header, NULL,
  654. ISCSI_TASK_TYPE_MIDPATH, false);
  655. return 0;
  656. }
  657. int init_initiator_text_request_task(struct iscsi_task_params *task_params,
  658. struct iscsi_text_request_hdr *text_header,
  659. struct scsi_sgl_task_params *tx_params,
  660. struct scsi_sgl_task_params *rx_params)
  661. {
  662. struct e4_iscsi_task_context *cxt;
  663. cxt = task_params->context;
  664. init_default_iscsi_task(task_params,
  665. (struct data_hdr *)text_header,
  666. ISCSI_TASK_TYPE_MIDPATH);
  667. if (task_params->tx_io_size)
  668. init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
  669. &cxt->ystorm_st_context.state.data_desc,
  670. tx_params);
  671. if (task_params->rx_io_size)
  672. init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
  673. &cxt->mstorm_st_context.data_desc,
  674. rx_params);
  675. cxt->mstorm_st_context.rem_task_size =
  676. cpu_to_le32(task_params->rx_io_size ?
  677. rx_params->total_buffer_size : 0);
  678. init_ustorm_task_contexts(&cxt->ustorm_st_context,
  679. &cxt->ustorm_ag_context,
  680. task_params->rx_io_size ?
  681. rx_params->total_buffer_size : 0,
  682. task_params->tx_io_size ?
  683. tx_params->total_buffer_size : 0, 0, 0);
  684. init_sqe(task_params, tx_params, NULL,
  685. (struct iscsi_common_hdr *)text_header, NULL,
  686. ISCSI_TASK_TYPE_MIDPATH, false);
  687. return 0;
  688. }
  689. int init_cleanup_task(struct iscsi_task_params *task_params)
  690. {
  691. init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
  692. true);
  693. return 0;
  694. }