ena_eth_com.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * Copyright 2015 Amazon.com, Inc. or its affiliates.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "ena_eth_com.h"
  33. static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
  34. struct ena_com_io_cq *io_cq)
  35. {
  36. struct ena_eth_io_rx_cdesc_base *cdesc;
  37. u16 expected_phase, head_masked;
  38. u16 desc_phase;
  39. head_masked = io_cq->head & (io_cq->q_depth - 1);
  40. expected_phase = io_cq->phase;
  41. cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
  42. + (head_masked * io_cq->cdesc_entry_size_in_bytes));
  43. desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
  44. ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
  45. if (desc_phase != expected_phase)
  46. return NULL;
  47. /* Make sure we read the rest of the descriptor after the phase bit
  48. * has been read
  49. */
  50. dma_rmb();
  51. return cdesc;
  52. }
  53. static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
  54. {
  55. u16 tail_masked;
  56. u32 offset;
  57. tail_masked = io_sq->tail & (io_sq->q_depth - 1);
  58. offset = tail_masked * io_sq->desc_entry_size;
  59. return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
  60. }
  61. static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
  62. u8 *bounce_buffer)
  63. {
  64. struct ena_com_llq_info *llq_info = &io_sq->llq_info;
  65. u16 dst_tail_mask;
  66. u32 dst_offset;
  67. dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
  68. dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
  69. if (is_llq_max_tx_burst_exists(io_sq)) {
  70. if (unlikely(!io_sq->entries_in_tx_burst_left)) {
  71. pr_err("Error: trying to send more packets than tx burst allows\n");
  72. return -ENOSPC;
  73. }
  74. io_sq->entries_in_tx_burst_left--;
  75. pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
  76. io_sq->qid, io_sq->entries_in_tx_burst_left);
  77. }
  78. /* Make sure everything was written into the bounce buffer before
  79. * writing the bounce buffer to the device
  80. */
  81. wmb();
  82. /* The line is completed. Copy it to dev */
  83. __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
  84. bounce_buffer, (llq_info->desc_list_entry_size) / 8);
  85. io_sq->tail++;
  86. /* Switch phase bit in case of wrap around */
  87. if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
  88. io_sq->phase ^= 1;
  89. return 0;
  90. }
  91. static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
  92. u8 *header_src,
  93. u16 header_len)
  94. {
  95. struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
  96. struct ena_com_llq_info *llq_info = &io_sq->llq_info;
  97. u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
  98. u16 header_offset;
  99. if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
  100. return 0;
  101. header_offset =
  102. llq_info->descs_num_before_header * io_sq->desc_entry_size;
  103. if (unlikely((header_offset + header_len) >
  104. llq_info->desc_list_entry_size)) {
  105. pr_err("trying to write header larger than llq entry can accommodate\n");
  106. return -EFAULT;
  107. }
  108. if (unlikely(!bounce_buffer)) {
  109. pr_err("bounce buffer is NULL\n");
  110. return -EFAULT;
  111. }
  112. memcpy(bounce_buffer + header_offset, header_src, header_len);
  113. return 0;
  114. }
  115. static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
  116. {
  117. struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
  118. u8 *bounce_buffer;
  119. void *sq_desc;
  120. bounce_buffer = pkt_ctrl->curr_bounce_buf;
  121. if (unlikely(!bounce_buffer)) {
  122. pr_err("bounce buffer is NULL\n");
  123. return NULL;
  124. }
  125. sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
  126. pkt_ctrl->idx++;
  127. pkt_ctrl->descs_left_in_line--;
  128. return sq_desc;
  129. }
  130. static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
  131. {
  132. struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
  133. struct ena_com_llq_info *llq_info = &io_sq->llq_info;
  134. int rc;
  135. if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
  136. return 0;
  137. /* bounce buffer was used, so write it and get a new one */
  138. if (pkt_ctrl->idx) {
  139. rc = ena_com_write_bounce_buffer_to_dev(io_sq,
  140. pkt_ctrl->curr_bounce_buf);
  141. if (unlikely(rc))
  142. return rc;
  143. pkt_ctrl->curr_bounce_buf =
  144. ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
  145. memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
  146. 0x0, llq_info->desc_list_entry_size);
  147. }
  148. pkt_ctrl->idx = 0;
  149. pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
  150. return 0;
  151. }
  152. static void *get_sq_desc(struct ena_com_io_sq *io_sq)
  153. {
  154. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
  155. return get_sq_desc_llq(io_sq);
  156. return get_sq_desc_regular_queue(io_sq);
  157. }
  158. static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
  159. {
  160. struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
  161. struct ena_com_llq_info *llq_info = &io_sq->llq_info;
  162. int rc;
  163. if (!pkt_ctrl->descs_left_in_line) {
  164. rc = ena_com_write_bounce_buffer_to_dev(io_sq,
  165. pkt_ctrl->curr_bounce_buf);
  166. if (unlikely(rc))
  167. return rc;
  168. pkt_ctrl->curr_bounce_buf =
  169. ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
  170. memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
  171. 0x0, llq_info->desc_list_entry_size);
  172. pkt_ctrl->idx = 0;
  173. if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
  174. pkt_ctrl->descs_left_in_line = 1;
  175. else
  176. pkt_ctrl->descs_left_in_line =
  177. llq_info->desc_list_entry_size / io_sq->desc_entry_size;
  178. }
  179. return 0;
  180. }
  181. static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
  182. {
  183. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
  184. return ena_com_sq_update_llq_tail(io_sq);
  185. io_sq->tail++;
  186. /* Switch phase bit in case of wrap around */
  187. if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
  188. io_sq->phase ^= 1;
  189. return 0;
  190. }
  191. static struct ena_eth_io_rx_cdesc_base *
  192. ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
  193. {
  194. idx &= (io_cq->q_depth - 1);
  195. return (struct ena_eth_io_rx_cdesc_base *)
  196. ((uintptr_t)io_cq->cdesc_addr.virt_addr +
  197. idx * io_cq->cdesc_entry_size_in_bytes);
  198. }
  199. static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
  200. u16 *first_cdesc_idx)
  201. {
  202. struct ena_eth_io_rx_cdesc_base *cdesc;
  203. u16 count = 0, head_masked;
  204. u32 last = 0;
  205. do {
  206. cdesc = ena_com_get_next_rx_cdesc(io_cq);
  207. if (!cdesc)
  208. break;
  209. ena_com_cq_inc_head(io_cq);
  210. count++;
  211. last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
  212. ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
  213. } while (!last);
  214. if (last) {
  215. *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
  216. count += io_cq->cur_rx_pkt_cdesc_count;
  217. head_masked = io_cq->head & (io_cq->q_depth - 1);
  218. io_cq->cur_rx_pkt_cdesc_count = 0;
  219. io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
  220. pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
  221. io_cq->qid, *first_cdesc_idx, count);
  222. } else {
  223. io_cq->cur_rx_pkt_cdesc_count += count;
  224. count = 0;
  225. }
  226. return count;
  227. }
  228. static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
  229. struct ena_com_tx_ctx *ena_tx_ctx)
  230. {
  231. struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
  232. struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
  233. meta_desc = get_sq_desc(io_sq);
  234. memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
  235. meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
  236. meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
  237. /* bits 0-9 of the mss */
  238. meta_desc->word2 |= (ena_meta->mss <<
  239. ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
  240. ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
  241. /* bits 10-13 of the mss */
  242. meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
  243. ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
  244. ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
  245. /* Extended meta desc */
  246. meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
  247. meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
  248. meta_desc->len_ctrl |= (io_sq->phase <<
  249. ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
  250. ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
  251. meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
  252. meta_desc->word2 |= ena_meta->l3_hdr_len &
  253. ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
  254. meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
  255. ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
  256. ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
  257. meta_desc->word2 |= (ena_meta->l4_hdr_len <<
  258. ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
  259. ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
  260. meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
  261. /* Cached the meta desc */
  262. memcpy(&io_sq->cached_tx_meta, ena_meta,
  263. sizeof(struct ena_com_tx_meta));
  264. return ena_com_sq_update_tail(io_sq);
  265. }
  266. static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
  267. struct ena_eth_io_rx_cdesc_base *cdesc)
  268. {
  269. ena_rx_ctx->l3_proto = cdesc->status &
  270. ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
  271. ena_rx_ctx->l4_proto =
  272. (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
  273. ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
  274. ena_rx_ctx->l3_csum_err =
  275. !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
  276. ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
  277. ena_rx_ctx->l4_csum_err =
  278. !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
  279. ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
  280. ena_rx_ctx->l4_csum_checked =
  281. !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
  282. ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
  283. ena_rx_ctx->hash = cdesc->hash;
  284. ena_rx_ctx->frag =
  285. (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
  286. ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
  287. pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
  288. ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
  289. ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
  290. ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
  291. }
  292. /*****************************************************************************/
  293. /***************************** API **********************************/
  294. /*****************************************************************************/
  295. int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
  296. struct ena_com_tx_ctx *ena_tx_ctx,
  297. int *nb_hw_desc)
  298. {
  299. struct ena_eth_io_tx_desc *desc = NULL;
  300. struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
  301. void *buffer_to_push = ena_tx_ctx->push_header;
  302. u16 header_len = ena_tx_ctx->header_len;
  303. u16 num_bufs = ena_tx_ctx->num_bufs;
  304. u16 start_tail = io_sq->tail;
  305. int i, rc;
  306. bool have_meta;
  307. u64 addr_hi;
  308. WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
  309. /* num_bufs +1 for potential meta desc */
  310. if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
  311. pr_debug("Not enough space in the tx queue\n");
  312. return -ENOMEM;
  313. }
  314. if (unlikely(header_len > io_sq->tx_max_header_size)) {
  315. pr_err("header size is too large %d max header: %d\n",
  316. header_len, io_sq->tx_max_header_size);
  317. return -EINVAL;
  318. }
  319. if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
  320. !buffer_to_push))
  321. return -EINVAL;
  322. rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
  323. if (unlikely(rc))
  324. return rc;
  325. have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
  326. ena_tx_ctx);
  327. if (have_meta) {
  328. rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
  329. if (unlikely(rc))
  330. return rc;
  331. }
  332. /* If the caller doesn't want to send packets */
  333. if (unlikely(!num_bufs && !header_len)) {
  334. rc = ena_com_close_bounce_buffer(io_sq);
  335. *nb_hw_desc = io_sq->tail - start_tail;
  336. return rc;
  337. }
  338. desc = get_sq_desc(io_sq);
  339. if (unlikely(!desc))
  340. return -EFAULT;
  341. memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
  342. /* Set first desc when we don't have meta descriptor */
  343. if (!have_meta)
  344. desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
  345. desc->buff_addr_hi_hdr_sz |= (header_len <<
  346. ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
  347. ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
  348. desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
  349. ENA_ETH_IO_TX_DESC_PHASE_MASK;
  350. desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
  351. /* Bits 0-9 */
  352. desc->meta_ctrl |= (ena_tx_ctx->req_id <<
  353. ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
  354. ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
  355. desc->meta_ctrl |= (ena_tx_ctx->df <<
  356. ENA_ETH_IO_TX_DESC_DF_SHIFT) &
  357. ENA_ETH_IO_TX_DESC_DF_MASK;
  358. /* Bits 10-15 */
  359. desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
  360. ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
  361. ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
  362. if (ena_tx_ctx->meta_valid) {
  363. desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
  364. ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
  365. ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
  366. desc->meta_ctrl |= ena_tx_ctx->l3_proto &
  367. ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
  368. desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
  369. ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
  370. ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
  371. desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
  372. ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
  373. ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
  374. desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
  375. ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
  376. ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
  377. desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
  378. ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
  379. ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
  380. }
  381. for (i = 0; i < num_bufs; i++) {
  382. /* The first desc share the same desc as the header */
  383. if (likely(i != 0)) {
  384. rc = ena_com_sq_update_tail(io_sq);
  385. if (unlikely(rc))
  386. return rc;
  387. desc = get_sq_desc(io_sq);
  388. if (unlikely(!desc))
  389. return -EFAULT;
  390. memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
  391. desc->len_ctrl |= (io_sq->phase <<
  392. ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
  393. ENA_ETH_IO_TX_DESC_PHASE_MASK;
  394. }
  395. desc->len_ctrl |= ena_bufs->len &
  396. ENA_ETH_IO_TX_DESC_LENGTH_MASK;
  397. addr_hi = ((ena_bufs->paddr &
  398. GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
  399. desc->buff_addr_lo = (u32)ena_bufs->paddr;
  400. desc->buff_addr_hi_hdr_sz |= addr_hi &
  401. ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
  402. ena_bufs++;
  403. }
  404. /* set the last desc indicator */
  405. desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
  406. rc = ena_com_sq_update_tail(io_sq);
  407. if (unlikely(rc))
  408. return rc;
  409. rc = ena_com_close_bounce_buffer(io_sq);
  410. *nb_hw_desc = io_sq->tail - start_tail;
  411. return rc;
  412. }
  413. int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
  414. struct ena_com_io_sq *io_sq,
  415. struct ena_com_rx_ctx *ena_rx_ctx)
  416. {
  417. struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
  418. struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
  419. u16 cdesc_idx = 0;
  420. u16 nb_hw_desc;
  421. u16 i;
  422. WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
  423. nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
  424. if (nb_hw_desc == 0) {
  425. ena_rx_ctx->descs = nb_hw_desc;
  426. return 0;
  427. }
  428. pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
  429. nb_hw_desc);
  430. if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
  431. pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
  432. ena_rx_ctx->max_bufs);
  433. return -ENOSPC;
  434. }
  435. for (i = 0; i < nb_hw_desc; i++) {
  436. cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
  437. ena_buf->len = cdesc->length;
  438. ena_buf->req_id = cdesc->req_id;
  439. ena_buf++;
  440. }
  441. /* Update SQ head ptr */
  442. io_sq->next_to_comp += nb_hw_desc;
  443. pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
  444. io_sq->next_to_comp);
  445. /* Get rx flags from the last pkt */
  446. ena_com_rx_set_flags(ena_rx_ctx, cdesc);
  447. ena_rx_ctx->descs = nb_hw_desc;
  448. return 0;
  449. }
  450. int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
  451. struct ena_com_buf *ena_buf,
  452. u16 req_id)
  453. {
  454. struct ena_eth_io_rx_desc *desc;
  455. WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
  456. if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
  457. return -ENOSPC;
  458. desc = get_sq_desc(io_sq);
  459. if (unlikely(!desc))
  460. return -EFAULT;
  461. memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
  462. desc->length = ena_buf->len;
  463. desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
  464. desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
  465. desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
  466. desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
  467. desc->req_id = req_id;
  468. desc->buff_addr_lo = (u32)ena_buf->paddr;
  469. desc->buff_addr_hi =
  470. ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
  471. return ena_com_sq_update_tail(io_sq);
  472. }
  473. bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
  474. {
  475. struct ena_eth_io_rx_cdesc_base *cdesc;
  476. cdesc = ena_com_get_next_rx_cdesc(io_cq);
  477. if (cdesc)
  478. return false;
  479. else
  480. return true;
  481. }