octeon_droq.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2015 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * This file may also be available under a different license from Cavium.
  20. * Contact Cavium, Inc. for more information
  21. **********************************************************************/
  22. #include <linux/pci.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/vmalloc.h>
  25. #include "liquidio_common.h"
  26. #include "octeon_droq.h"
  27. #include "octeon_iq.h"
  28. #include "response_manager.h"
  29. #include "octeon_device.h"
  30. #include "octeon_main.h"
  31. #include "octeon_network.h"
  32. #include "cn66xx_regs.h"
  33. #include "cn66xx_device.h"
  34. #include "cn23xx_pf_device.h"
  35. #define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
  36. #define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
  37. struct niclist {
  38. struct list_head list;
  39. void *ptr;
  40. };
  41. struct __dispatch {
  42. struct list_head list;
  43. struct octeon_recv_info *rinfo;
  44. octeon_dispatch_fn_t disp_fn;
  45. };
  46. /** Get the argument that the user set when registering dispatch
  47. * function for a given opcode/subcode.
  48. * @param octeon_dev - the octeon device pointer.
  49. * @param opcode - the opcode for which the dispatch argument
  50. * is to be checked.
  51. * @param subcode - the subcode for which the dispatch argument
  52. * is to be checked.
  53. * @return Success: void * (argument to the dispatch function)
  54. * @return Failure: NULL
  55. *
  56. */
  57. static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
  58. u16 opcode, u16 subcode)
  59. {
  60. int idx;
  61. struct list_head *dispatch;
  62. void *fn_arg = NULL;
  63. u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
  64. idx = combined_opcode & OCTEON_OPCODE_MASK;
  65. spin_lock_bh(&octeon_dev->dispatch.lock);
  66. if (octeon_dev->dispatch.count == 0) {
  67. spin_unlock_bh(&octeon_dev->dispatch.lock);
  68. return NULL;
  69. }
  70. if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
  71. fn_arg = octeon_dev->dispatch.dlist[idx].arg;
  72. } else {
  73. list_for_each(dispatch,
  74. &octeon_dev->dispatch.dlist[idx].list) {
  75. if (((struct octeon_dispatch *)dispatch)->opcode ==
  76. combined_opcode) {
  77. fn_arg = ((struct octeon_dispatch *)
  78. dispatch)->arg;
  79. break;
  80. }
  81. }
  82. }
  83. spin_unlock_bh(&octeon_dev->dispatch.lock);
  84. return fn_arg;
  85. }
  86. /** Check for packets on Droq. This function should be called with lock held.
  87. * @param droq - Droq on which count is checked.
  88. * @return Returns packet count.
  89. */
  90. u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
  91. {
  92. u32 pkt_count = 0;
  93. u32 last_count;
  94. pkt_count = readl(droq->pkts_sent_reg);
  95. last_count = pkt_count - droq->pkt_count;
  96. droq->pkt_count = pkt_count;
  97. /* we shall write to cnts at napi irq enable or end of droq tasklet */
  98. if (last_count)
  99. atomic_add(last_count, &droq->pkts_pending);
  100. return last_count;
  101. }
  102. static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
  103. {
  104. u32 count = 0;
  105. /* max_empty_descs is the max. no. of descs that can have no buffers.
  106. * If the empty desc count goes beyond this value, we cannot safely
  107. * read in a 64K packet sent by Octeon
  108. * (64K is max pkt size from Octeon)
  109. */
  110. droq->max_empty_descs = 0;
  111. do {
  112. droq->max_empty_descs++;
  113. count += droq->buffer_size;
  114. } while (count < (64 * 1024));
  115. droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
  116. }
  117. static void octeon_droq_reset_indices(struct octeon_droq *droq)
  118. {
  119. droq->read_idx = 0;
  120. droq->write_idx = 0;
  121. droq->refill_idx = 0;
  122. droq->refill_count = 0;
  123. atomic_set(&droq->pkts_pending, 0);
  124. }
  125. static void
  126. octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
  127. struct octeon_droq *droq)
  128. {
  129. u32 i;
  130. struct octeon_skb_page_info *pg_info;
  131. for (i = 0; i < droq->max_count; i++) {
  132. pg_info = &droq->recv_buf_list[i].pg_info;
  133. if (pg_info->dma)
  134. lio_unmap_ring(oct->pci_dev,
  135. (u64)pg_info->dma);
  136. pg_info->dma = 0;
  137. if (pg_info->page)
  138. recv_buffer_destroy(droq->recv_buf_list[i].buffer,
  139. pg_info);
  140. if (droq->desc_ring && droq->desc_ring[i].info_ptr)
  141. lio_unmap_ring_info(oct->pci_dev,
  142. (u64)droq->
  143. desc_ring[i].info_ptr,
  144. OCT_DROQ_INFO_SIZE);
  145. droq->recv_buf_list[i].buffer = NULL;
  146. }
  147. octeon_droq_reset_indices(droq);
  148. }
  149. static int
  150. octeon_droq_setup_ring_buffers(struct octeon_device *oct,
  151. struct octeon_droq *droq)
  152. {
  153. u32 i;
  154. void *buf;
  155. struct octeon_droq_desc *desc_ring = droq->desc_ring;
  156. for (i = 0; i < droq->max_count; i++) {
  157. buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
  158. if (!buf) {
  159. dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
  160. __func__);
  161. droq->stats.rx_alloc_failure++;
  162. return -ENOMEM;
  163. }
  164. droq->recv_buf_list[i].buffer = buf;
  165. droq->recv_buf_list[i].data = get_rbd(buf);
  166. droq->info_list[i].length = 0;
  167. /* map ring buffers into memory */
  168. desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
  169. desc_ring[i].buffer_ptr =
  170. lio_map_ring(droq->recv_buf_list[i].buffer);
  171. }
  172. octeon_droq_reset_indices(droq);
  173. octeon_droq_compute_max_packet_bufs(droq);
  174. return 0;
  175. }
  176. int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
  177. {
  178. struct octeon_droq *droq = oct->droq[q_no];
  179. dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
  180. octeon_droq_destroy_ring_buffers(oct, droq);
  181. vfree(droq->recv_buf_list);
  182. if (droq->info_base_addr)
  183. cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
  184. droq->info_alloc_size,
  185. droq->info_base_addr,
  186. droq->info_list_dma);
  187. if (droq->desc_ring)
  188. lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
  189. droq->desc_ring, droq->desc_ring_dma);
  190. memset(droq, 0, OCT_DROQ_SIZE);
  191. return 0;
  192. }
  193. int octeon_init_droq(struct octeon_device *oct,
  194. u32 q_no,
  195. u32 num_descs,
  196. u32 desc_size,
  197. void *app_ctx)
  198. {
  199. struct octeon_droq *droq;
  200. u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
  201. u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
  202. int orig_node = dev_to_node(&oct->pci_dev->dev);
  203. int numa_node = cpu_to_node(q_no % num_online_cpus());
  204. dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
  205. droq = oct->droq[q_no];
  206. memset(droq, 0, OCT_DROQ_SIZE);
  207. droq->oct_dev = oct;
  208. droq->q_no = q_no;
  209. if (app_ctx)
  210. droq->app_ctx = app_ctx;
  211. else
  212. droq->app_ctx = (void *)(size_t)q_no;
  213. c_num_descs = num_descs;
  214. c_buf_size = desc_size;
  215. if (OCTEON_CN6XXX(oct)) {
  216. struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
  217. c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
  218. c_refill_threshold =
  219. (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
  220. } else if (OCTEON_CN23XX_PF(oct)) {
  221. struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
  222. c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
  223. c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
  224. } else {
  225. return 1;
  226. }
  227. droq->max_count = c_num_descs;
  228. droq->buffer_size = c_buf_size;
  229. desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
  230. set_dev_node(&oct->pci_dev->dev, numa_node);
  231. droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
  232. (dma_addr_t *)&droq->desc_ring_dma);
  233. set_dev_node(&oct->pci_dev->dev, orig_node);
  234. if (!droq->desc_ring)
  235. droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
  236. (dma_addr_t *)&droq->desc_ring_dma);
  237. if (!droq->desc_ring) {
  238. dev_err(&oct->pci_dev->dev,
  239. "Output queue %d ring alloc failed\n", q_no);
  240. return 1;
  241. }
  242. dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
  243. q_no, droq->desc_ring, droq->desc_ring_dma);
  244. dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
  245. droq->max_count);
  246. droq->info_list =
  247. cnnic_numa_alloc_aligned_dma((droq->max_count *
  248. OCT_DROQ_INFO_SIZE),
  249. &droq->info_alloc_size,
  250. &droq->info_base_addr,
  251. numa_node);
  252. if (!droq->info_list) {
  253. dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
  254. lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
  255. droq->desc_ring, droq->desc_ring_dma);
  256. return 1;
  257. }
  258. droq->recv_buf_list = (struct octeon_recv_buffer *)
  259. vmalloc_node(droq->max_count *
  260. OCT_DROQ_RECVBUF_SIZE,
  261. numa_node);
  262. if (!droq->recv_buf_list)
  263. droq->recv_buf_list = (struct octeon_recv_buffer *)
  264. vmalloc(droq->max_count *
  265. OCT_DROQ_RECVBUF_SIZE);
  266. if (!droq->recv_buf_list) {
  267. dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
  268. goto init_droq_fail;
  269. }
  270. if (octeon_droq_setup_ring_buffers(oct, droq))
  271. goto init_droq_fail;
  272. droq->pkts_per_intr = c_pkts_per_intr;
  273. droq->refill_threshold = c_refill_threshold;
  274. dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
  275. droq->max_empty_descs);
  276. spin_lock_init(&droq->lock);
  277. INIT_LIST_HEAD(&droq->dispatch_list);
  278. /* For 56xx Pass1, this function won't be called, so no checks. */
  279. oct->fn_list.setup_oq_regs(oct, q_no);
  280. oct->io_qmask.oq |= (1ULL << q_no);
  281. return 0;
  282. init_droq_fail:
  283. octeon_delete_droq(oct, q_no);
  284. return 1;
  285. }
  286. /* octeon_create_recv_info
  287. * Parameters:
  288. * octeon_dev - pointer to the octeon device structure
  289. * droq - droq in which the packet arrived.
  290. * buf_cnt - no. of buffers used by the packet.
  291. * idx - index in the descriptor for the first buffer in the packet.
  292. * Description:
  293. * Allocates a recv_info_t and copies the buffer addresses for packet data
  294. * into the recv_pkt space which starts at an 8B offset from recv_info_t.
  295. * Flags the descriptors for refill later. If available descriptors go
  296. * below the threshold to receive a 64K pkt, new buffers are first allocated
  297. * before the recv_pkt_t is created.
  298. * This routine will be called in interrupt context.
  299. * Returns:
  300. * Success: Pointer to recv_info_t
  301. * Failure: NULL.
  302. * Locks:
  303. * The droq->lock is held when this routine is called.
  304. */
  305. static inline struct octeon_recv_info *octeon_create_recv_info(
  306. struct octeon_device *octeon_dev,
  307. struct octeon_droq *droq,
  308. u32 buf_cnt,
  309. u32 idx)
  310. {
  311. struct octeon_droq_info *info;
  312. struct octeon_recv_pkt *recv_pkt;
  313. struct octeon_recv_info *recv_info;
  314. u32 i, bytes_left;
  315. struct octeon_skb_page_info *pg_info;
  316. info = &droq->info_list[idx];
  317. recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
  318. if (!recv_info)
  319. return NULL;
  320. recv_pkt = recv_info->recv_pkt;
  321. recv_pkt->rh = info->rh;
  322. recv_pkt->length = (u32)info->length;
  323. recv_pkt->buffer_count = (u16)buf_cnt;
  324. recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
  325. i = 0;
  326. bytes_left = (u32)info->length;
  327. while (buf_cnt) {
  328. {
  329. pg_info = &droq->recv_buf_list[idx].pg_info;
  330. lio_unmap_ring(octeon_dev->pci_dev,
  331. (u64)pg_info->dma);
  332. pg_info->page = NULL;
  333. pg_info->dma = 0;
  334. }
  335. recv_pkt->buffer_size[i] =
  336. (bytes_left >=
  337. droq->buffer_size) ? droq->buffer_size : bytes_left;
  338. recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
  339. droq->recv_buf_list[idx].buffer = NULL;
  340. INCR_INDEX_BY1(idx, droq->max_count);
  341. bytes_left -= droq->buffer_size;
  342. i++;
  343. buf_cnt--;
  344. }
  345. return recv_info;
  346. }
  347. /* If we were not able to refill all buffers, try to move around
  348. * the buffers that were not dispatched.
  349. */
  350. static inline u32
  351. octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
  352. struct octeon_droq_desc *desc_ring)
  353. {
  354. u32 desc_refilled = 0;
  355. u32 refill_index = droq->refill_idx;
  356. while (refill_index != droq->read_idx) {
  357. if (droq->recv_buf_list[refill_index].buffer) {
  358. droq->recv_buf_list[droq->refill_idx].buffer =
  359. droq->recv_buf_list[refill_index].buffer;
  360. droq->recv_buf_list[droq->refill_idx].data =
  361. droq->recv_buf_list[refill_index].data;
  362. desc_ring[droq->refill_idx].buffer_ptr =
  363. desc_ring[refill_index].buffer_ptr;
  364. droq->recv_buf_list[refill_index].buffer = NULL;
  365. desc_ring[refill_index].buffer_ptr = 0;
  366. do {
  367. INCR_INDEX_BY1(droq->refill_idx,
  368. droq->max_count);
  369. desc_refilled++;
  370. droq->refill_count--;
  371. } while (droq->recv_buf_list[droq->refill_idx].
  372. buffer);
  373. }
  374. INCR_INDEX_BY1(refill_index, droq->max_count);
  375. } /* while */
  376. return desc_refilled;
  377. }
  378. /* octeon_droq_refill
  379. * Parameters:
  380. * droq - droq in which descriptors require new buffers.
  381. * Description:
  382. * Called during normal DROQ processing in interrupt mode or by the poll
  383. * thread to refill the descriptors from which buffers were dispatched
  384. * to upper layers. Attempts to allocate new buffers. If that fails, moves
  385. * up buffers (that were not dispatched) to form a contiguous ring.
  386. * Returns:
  387. * No of descriptors refilled.
  388. * Locks:
  389. * This routine is called with droq->lock held.
  390. */
  391. static u32
  392. octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
  393. {
  394. struct octeon_droq_desc *desc_ring;
  395. void *buf = NULL;
  396. u8 *data;
  397. u32 desc_refilled = 0;
  398. struct octeon_skb_page_info *pg_info;
  399. desc_ring = droq->desc_ring;
  400. while (droq->refill_count && (desc_refilled < droq->max_count)) {
  401. /* If a valid buffer exists (happens if there is no dispatch),
  402. * reuse
  403. * the buffer, else allocate.
  404. */
  405. if (!droq->recv_buf_list[droq->refill_idx].buffer) {
  406. pg_info =
  407. &droq->recv_buf_list[droq->refill_idx].pg_info;
  408. /* Either recycle the existing pages or go for
  409. * new page alloc
  410. */
  411. if (pg_info->page)
  412. buf = recv_buffer_reuse(octeon_dev, pg_info);
  413. else
  414. buf = recv_buffer_alloc(octeon_dev, pg_info);
  415. /* If a buffer could not be allocated, no point in
  416. * continuing
  417. */
  418. if (!buf) {
  419. droq->stats.rx_alloc_failure++;
  420. break;
  421. }
  422. droq->recv_buf_list[droq->refill_idx].buffer =
  423. buf;
  424. data = get_rbd(buf);
  425. } else {
  426. data = get_rbd(droq->recv_buf_list
  427. [droq->refill_idx].buffer);
  428. }
  429. droq->recv_buf_list[droq->refill_idx].data = data;
  430. desc_ring[droq->refill_idx].buffer_ptr =
  431. lio_map_ring(droq->recv_buf_list[droq->
  432. refill_idx].buffer);
  433. /* Reset any previous values in the length field. */
  434. droq->info_list[droq->refill_idx].length = 0;
  435. INCR_INDEX_BY1(droq->refill_idx, droq->max_count);
  436. desc_refilled++;
  437. droq->refill_count--;
  438. }
  439. if (droq->refill_count)
  440. desc_refilled +=
  441. octeon_droq_refill_pullup_descs(droq, desc_ring);
  442. /* if droq->refill_count
  443. * The refill count would not change in pass two. We only moved buffers
  444. * to close the gap in the ring, but we would still have the same no. of
  445. * buffers to refill.
  446. */
  447. return desc_refilled;
  448. }
  449. static inline u32
  450. octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
  451. {
  452. u32 buf_cnt = 0;
  453. while (total_len > (buf_size * buf_cnt))
  454. buf_cnt++;
  455. return buf_cnt;
  456. }
  457. static int
  458. octeon_droq_dispatch_pkt(struct octeon_device *oct,
  459. struct octeon_droq *droq,
  460. union octeon_rh *rh,
  461. struct octeon_droq_info *info)
  462. {
  463. u32 cnt;
  464. octeon_dispatch_fn_t disp_fn;
  465. struct octeon_recv_info *rinfo;
  466. cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
  467. disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
  468. (u16)rh->r.subcode);
  469. if (disp_fn) {
  470. rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
  471. if (rinfo) {
  472. struct __dispatch *rdisp = rinfo->rsvd;
  473. rdisp->rinfo = rinfo;
  474. rdisp->disp_fn = disp_fn;
  475. rinfo->recv_pkt->rh = *rh;
  476. list_add_tail(&rdisp->list,
  477. &droq->dispatch_list);
  478. } else {
  479. droq->stats.dropped_nomem++;
  480. }
  481. } else {
  482. dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
  483. (unsigned int)rh->r.opcode,
  484. (unsigned int)rh->r.subcode);
  485. droq->stats.dropped_nodispatch++;
  486. }
  487. return cnt;
  488. }
  489. static inline void octeon_droq_drop_packets(struct octeon_device *oct,
  490. struct octeon_droq *droq,
  491. u32 cnt)
  492. {
  493. u32 i = 0, buf_cnt;
  494. struct octeon_droq_info *info;
  495. for (i = 0; i < cnt; i++) {
  496. info = &droq->info_list[droq->read_idx];
  497. octeon_swap_8B_data((u64 *)info, 2);
  498. if (info->length) {
  499. info->length -= OCT_RH_SIZE;
  500. droq->stats.bytes_received += info->length;
  501. buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
  502. (u32)info->length);
  503. } else {
  504. dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
  505. buf_cnt = 1;
  506. }
  507. INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
  508. droq->refill_count += buf_cnt;
  509. }
  510. }
  511. static u32
  512. octeon_droq_fast_process_packets(struct octeon_device *oct,
  513. struct octeon_droq *droq,
  514. u32 pkts_to_process)
  515. {
  516. struct octeon_droq_info *info;
  517. union octeon_rh *rh;
  518. u32 pkt, total_len = 0, pkt_count;
  519. pkt_count = pkts_to_process;
  520. for (pkt = 0; pkt < pkt_count; pkt++) {
  521. u32 pkt_len = 0;
  522. struct sk_buff *nicbuf = NULL;
  523. struct octeon_skb_page_info *pg_info;
  524. void *buf;
  525. info = &droq->info_list[droq->read_idx];
  526. octeon_swap_8B_data((u64 *)info, 2);
  527. if (!info->length) {
  528. dev_err(&oct->pci_dev->dev,
  529. "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
  530. droq->q_no, droq->read_idx, pkt_count);
  531. print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
  532. (u8 *)info,
  533. OCT_DROQ_INFO_SIZE);
  534. break;
  535. }
  536. /* Len of resp hdr in included in the received data len. */
  537. info->length -= OCT_RH_SIZE;
  538. rh = &info->rh;
  539. total_len += (u32)info->length;
  540. if (OPCODE_SLOW_PATH(rh)) {
  541. u32 buf_cnt;
  542. buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
  543. INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
  544. droq->refill_count += buf_cnt;
  545. } else {
  546. if (info->length <= droq->buffer_size) {
  547. pkt_len = (u32)info->length;
  548. nicbuf = droq->recv_buf_list[
  549. droq->read_idx].buffer;
  550. pg_info = &droq->recv_buf_list[
  551. droq->read_idx].pg_info;
  552. if (recv_buffer_recycle(oct, pg_info))
  553. pg_info->page = NULL;
  554. droq->recv_buf_list[droq->read_idx].buffer =
  555. NULL;
  556. INCR_INDEX_BY1(droq->read_idx, droq->max_count);
  557. droq->refill_count++;
  558. } else {
  559. nicbuf = octeon_fast_packet_alloc((u32)
  560. info->length);
  561. pkt_len = 0;
  562. /* nicbuf allocation can fail. We'll handle it
  563. * inside the loop.
  564. */
  565. while (pkt_len < info->length) {
  566. int cpy_len, idx = droq->read_idx;
  567. cpy_len = ((pkt_len + droq->buffer_size)
  568. > info->length) ?
  569. ((u32)info->length - pkt_len) :
  570. droq->buffer_size;
  571. if (nicbuf) {
  572. octeon_fast_packet_next(droq,
  573. nicbuf,
  574. cpy_len,
  575. idx);
  576. buf = droq->recv_buf_list[idx].
  577. buffer;
  578. recv_buffer_fast_free(buf);
  579. droq->recv_buf_list[idx].buffer
  580. = NULL;
  581. } else {
  582. droq->stats.rx_alloc_failure++;
  583. }
  584. pkt_len += cpy_len;
  585. INCR_INDEX_BY1(droq->read_idx,
  586. droq->max_count);
  587. droq->refill_count++;
  588. }
  589. }
  590. if (nicbuf) {
  591. if (droq->ops.fptr) {
  592. droq->ops.fptr(oct->octeon_id,
  593. nicbuf, pkt_len,
  594. rh, &droq->napi,
  595. droq->ops.farg);
  596. } else {
  597. recv_buffer_free(nicbuf);
  598. }
  599. }
  600. }
  601. if (droq->refill_count >= droq->refill_threshold) {
  602. int desc_refilled = octeon_droq_refill(oct, droq);
  603. /* Flush the droq descriptor data to memory to be sure
  604. * that when we update the credits the data in memory
  605. * is accurate.
  606. */
  607. wmb();
  608. writel((desc_refilled), droq->pkts_credit_reg);
  609. /* make sure mmio write completes */
  610. mmiowb();
  611. }
  612. } /* for (each packet)... */
  613. /* Increment refill_count by the number of buffers processed. */
  614. droq->stats.pkts_received += pkt;
  615. droq->stats.bytes_received += total_len;
  616. if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
  617. octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
  618. droq->stats.dropped_toomany += (pkts_to_process - pkt);
  619. return pkts_to_process;
  620. }
  621. return pkt;
  622. }
  623. int
  624. octeon_droq_process_packets(struct octeon_device *oct,
  625. struct octeon_droq *droq,
  626. u32 budget)
  627. {
  628. u32 pkt_count = 0, pkts_processed = 0;
  629. struct list_head *tmp, *tmp2;
  630. /* Grab the droq lock */
  631. spin_lock(&droq->lock);
  632. octeon_droq_check_hw_for_pkts(droq);
  633. pkt_count = atomic_read(&droq->pkts_pending);
  634. if (!pkt_count) {
  635. spin_unlock(&droq->lock);
  636. return 0;
  637. }
  638. if (pkt_count > budget)
  639. pkt_count = budget;
  640. pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
  641. atomic_sub(pkts_processed, &droq->pkts_pending);
  642. /* Release the spin lock */
  643. spin_unlock(&droq->lock);
  644. list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
  645. struct __dispatch *rdisp = (struct __dispatch *)tmp;
  646. list_del(tmp);
  647. rdisp->disp_fn(rdisp->rinfo,
  648. octeon_get_dispatch_arg
  649. (oct,
  650. (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
  651. (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
  652. }
  653. /* If there are packets pending. schedule tasklet again */
  654. if (atomic_read(&droq->pkts_pending))
  655. return 1;
  656. return 0;
  657. }
  658. /**
  659. * Utility function to poll for packets. check_hw_for_packets must be
  660. * called before calling this routine.
  661. */
  662. static int
  663. octeon_droq_process_poll_pkts(struct octeon_device *oct,
  664. struct octeon_droq *droq, u32 budget)
  665. {
  666. struct list_head *tmp, *tmp2;
  667. u32 pkts_available = 0, pkts_processed = 0;
  668. u32 total_pkts_processed = 0;
  669. if (budget > droq->max_count)
  670. budget = droq->max_count;
  671. spin_lock(&droq->lock);
  672. while (total_pkts_processed < budget) {
  673. octeon_droq_check_hw_for_pkts(droq);
  674. pkts_available =
  675. CVM_MIN((budget - total_pkts_processed),
  676. (u32)(atomic_read(&droq->pkts_pending)));
  677. if (pkts_available == 0)
  678. break;
  679. pkts_processed =
  680. octeon_droq_fast_process_packets(oct, droq,
  681. pkts_available);
  682. atomic_sub(pkts_processed, &droq->pkts_pending);
  683. total_pkts_processed += pkts_processed;
  684. }
  685. spin_unlock(&droq->lock);
  686. list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
  687. struct __dispatch *rdisp = (struct __dispatch *)tmp;
  688. list_del(tmp);
  689. rdisp->disp_fn(rdisp->rinfo,
  690. octeon_get_dispatch_arg
  691. (oct,
  692. (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
  693. (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
  694. }
  695. return total_pkts_processed;
  696. }
  697. int
  698. octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
  699. u32 arg)
  700. {
  701. struct octeon_droq *droq;
  702. droq = oct->droq[q_no];
  703. if (cmd == POLL_EVENT_PROCESS_PKTS)
  704. return octeon_droq_process_poll_pkts(oct, droq, arg);
  705. if (cmd == POLL_EVENT_PENDING_PKTS) {
  706. u32 pkt_cnt = atomic_read(&droq->pkts_pending);
  707. return octeon_droq_process_packets(oct, droq, pkt_cnt);
  708. }
  709. if (cmd == POLL_EVENT_ENABLE_INTR) {
  710. u32 value;
  711. unsigned long flags;
  712. /* Enable Pkt Interrupt */
  713. switch (oct->chip_id) {
  714. case OCTEON_CN66XX:
  715. case OCTEON_CN68XX: {
  716. struct octeon_cn6xxx *cn6xxx =
  717. (struct octeon_cn6xxx *)oct->chip;
  718. spin_lock_irqsave
  719. (&cn6xxx->lock_for_droq_int_enb_reg, flags);
  720. value =
  721. octeon_read_csr(oct,
  722. CN6XXX_SLI_PKT_TIME_INT_ENB);
  723. value |= (1 << q_no);
  724. octeon_write_csr(oct,
  725. CN6XXX_SLI_PKT_TIME_INT_ENB,
  726. value);
  727. value =
  728. octeon_read_csr(oct,
  729. CN6XXX_SLI_PKT_CNT_INT_ENB);
  730. value |= (1 << q_no);
  731. octeon_write_csr(oct,
  732. CN6XXX_SLI_PKT_CNT_INT_ENB,
  733. value);
  734. /* don't bother flushing the enables */
  735. spin_unlock_irqrestore
  736. (&cn6xxx->lock_for_droq_int_enb_reg, flags);
  737. return 0;
  738. }
  739. break;
  740. case OCTEON_CN23XX_PF_VID: {
  741. lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
  742. }
  743. break;
  744. }
  745. return 0;
  746. }
  747. dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd);
  748. return -EINVAL;
  749. }
  750. int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
  751. struct octeon_droq_ops *ops)
  752. {
  753. struct octeon_droq *droq;
  754. unsigned long flags;
  755. struct octeon_config *oct_cfg = NULL;
  756. oct_cfg = octeon_get_conf(oct);
  757. if (!oct_cfg)
  758. return -EINVAL;
  759. if (!(ops)) {
  760. dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
  761. __func__);
  762. return -EINVAL;
  763. }
  764. if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
  765. dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
  766. __func__, q_no, (oct->num_oqs - 1));
  767. return -EINVAL;
  768. }
  769. droq = oct->droq[q_no];
  770. spin_lock_irqsave(&droq->lock, flags);
  771. memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
  772. spin_unlock_irqrestore(&droq->lock, flags);
  773. return 0;
  774. }
  775. int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
  776. {
  777. unsigned long flags;
  778. struct octeon_droq *droq;
  779. struct octeon_config *oct_cfg = NULL;
  780. oct_cfg = octeon_get_conf(oct);
  781. if (!oct_cfg)
  782. return -EINVAL;
  783. if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
  784. dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
  785. __func__, q_no, oct->num_oqs - 1);
  786. return -EINVAL;
  787. }
  788. droq = oct->droq[q_no];
  789. if (!droq) {
  790. dev_info(&oct->pci_dev->dev,
  791. "Droq id (%d) not available.\n", q_no);
  792. return 0;
  793. }
  794. spin_lock_irqsave(&droq->lock, flags);
  795. droq->ops.fptr = NULL;
  796. droq->ops.farg = NULL;
  797. droq->ops.drop_on_max = 0;
  798. spin_unlock_irqrestore(&droq->lock, flags);
  799. return 0;
  800. }
  801. int octeon_create_droq(struct octeon_device *oct,
  802. u32 q_no, u32 num_descs,
  803. u32 desc_size, void *app_ctx)
  804. {
  805. struct octeon_droq *droq;
  806. int numa_node = cpu_to_node(q_no % num_online_cpus());
  807. if (oct->droq[q_no]) {
  808. dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
  809. q_no);
  810. return 1;
  811. }
  812. /* Allocate the DS for the new droq. */
  813. droq = vmalloc_node(sizeof(*droq), numa_node);
  814. if (!droq)
  815. droq = vmalloc(sizeof(*droq));
  816. if (!droq)
  817. goto create_droq_fail;
  818. memset(droq, 0, sizeof(struct octeon_droq));
  819. /*Disable the pkt o/p for this Q */
  820. octeon_set_droq_pkt_op(oct, q_no, 0);
  821. oct->droq[q_no] = droq;
  822. /* Initialize the Droq */
  823. octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx);
  824. oct->num_oqs++;
  825. dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
  826. oct->num_oqs);
  827. /* Global Droq register settings */
  828. /* As of now not required, as setting are done for all 32 Droqs at
  829. * the same time.
  830. */
  831. return 0;
  832. create_droq_fail:
  833. octeon_delete_droq(oct, q_no);
  834. return -ENOMEM;
  835. }