dwc-xlgmac-desc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
  2. *
  3. * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * This program is dual-licensed; you may select either version 2 of
  6. * the GNU General Public License ("GPL") or BSD license ("BSD").
  7. *
  8. * This Synopsys DWC XLGMAC software driver and associated documentation
  9. * (hereinafter the "Software") is an unsupported proprietary work of
  10. * Synopsys, Inc. unless otherwise expressly agreed to in writing between
  11. * Synopsys and you. The Software IS NOT an item of Licensed Software or a
  12. * Licensed Product under any End User Software License Agreement or
  13. * Agreement for Licensed Products with Synopsys or any supplement thereto.
  14. * Synopsys is a registered trademark of Synopsys, Inc. Other names included
  15. * in the SOFTWARE may be the trademarks of their respective owners.
  16. */
  17. #include "dwc-xlgmac.h"
  18. #include "dwc-xlgmac-reg.h"
  19. static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
  20. struct xlgmac_desc_data *desc_data)
  21. {
  22. if (desc_data->skb_dma) {
  23. if (desc_data->mapped_as_page) {
  24. dma_unmap_page(pdata->dev, desc_data->skb_dma,
  25. desc_data->skb_dma_len, DMA_TO_DEVICE);
  26. } else {
  27. dma_unmap_single(pdata->dev, desc_data->skb_dma,
  28. desc_data->skb_dma_len, DMA_TO_DEVICE);
  29. }
  30. desc_data->skb_dma = 0;
  31. desc_data->skb_dma_len = 0;
  32. }
  33. if (desc_data->skb) {
  34. dev_kfree_skb_any(desc_data->skb);
  35. desc_data->skb = NULL;
  36. }
  37. if (desc_data->rx.hdr.pa.pages)
  38. put_page(desc_data->rx.hdr.pa.pages);
  39. if (desc_data->rx.hdr.pa_unmap.pages) {
  40. dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
  41. desc_data->rx.hdr.pa_unmap.pages_len,
  42. DMA_FROM_DEVICE);
  43. put_page(desc_data->rx.hdr.pa_unmap.pages);
  44. }
  45. if (desc_data->rx.buf.pa.pages)
  46. put_page(desc_data->rx.buf.pa.pages);
  47. if (desc_data->rx.buf.pa_unmap.pages) {
  48. dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
  49. desc_data->rx.buf.pa_unmap.pages_len,
  50. DMA_FROM_DEVICE);
  51. put_page(desc_data->rx.buf.pa_unmap.pages);
  52. }
  53. memset(&desc_data->tx, 0, sizeof(desc_data->tx));
  54. memset(&desc_data->rx, 0, sizeof(desc_data->rx));
  55. desc_data->mapped_as_page = 0;
  56. if (desc_data->state_saved) {
  57. desc_data->state_saved = 0;
  58. desc_data->state.skb = NULL;
  59. desc_data->state.len = 0;
  60. desc_data->state.error = 0;
  61. }
  62. }
  63. static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
  64. struct xlgmac_ring *ring)
  65. {
  66. struct xlgmac_desc_data *desc_data;
  67. unsigned int i;
  68. if (!ring)
  69. return;
  70. if (ring->desc_data_head) {
  71. for (i = 0; i < ring->dma_desc_count; i++) {
  72. desc_data = XLGMAC_GET_DESC_DATA(ring, i);
  73. xlgmac_unmap_desc_data(pdata, desc_data);
  74. }
  75. kfree(ring->desc_data_head);
  76. ring->desc_data_head = NULL;
  77. }
  78. if (ring->rx_hdr_pa.pages) {
  79. dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
  80. ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
  81. put_page(ring->rx_hdr_pa.pages);
  82. ring->rx_hdr_pa.pages = NULL;
  83. ring->rx_hdr_pa.pages_len = 0;
  84. ring->rx_hdr_pa.pages_offset = 0;
  85. ring->rx_hdr_pa.pages_dma = 0;
  86. }
  87. if (ring->rx_buf_pa.pages) {
  88. dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
  89. ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
  90. put_page(ring->rx_buf_pa.pages);
  91. ring->rx_buf_pa.pages = NULL;
  92. ring->rx_buf_pa.pages_len = 0;
  93. ring->rx_buf_pa.pages_offset = 0;
  94. ring->rx_buf_pa.pages_dma = 0;
  95. }
  96. if (ring->dma_desc_head) {
  97. dma_free_coherent(pdata->dev,
  98. (sizeof(struct xlgmac_dma_desc) *
  99. ring->dma_desc_count),
  100. ring->dma_desc_head,
  101. ring->dma_desc_head_addr);
  102. ring->dma_desc_head = NULL;
  103. }
  104. }
  105. static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
  106. struct xlgmac_ring *ring,
  107. unsigned int dma_desc_count)
  108. {
  109. if (!ring)
  110. return 0;
  111. /* Descriptors */
  112. ring->dma_desc_count = dma_desc_count;
  113. ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
  114. (sizeof(struct xlgmac_dma_desc) *
  115. dma_desc_count),
  116. &ring->dma_desc_head_addr,
  117. GFP_KERNEL);
  118. if (!ring->dma_desc_head)
  119. return -ENOMEM;
  120. /* Array of descriptor data */
  121. ring->desc_data_head = kcalloc(dma_desc_count,
  122. sizeof(struct xlgmac_desc_data),
  123. GFP_KERNEL);
  124. if (!ring->desc_data_head)
  125. return -ENOMEM;
  126. netif_dbg(pdata, drv, pdata->netdev,
  127. "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
  128. ring->dma_desc_head,
  129. &ring->dma_desc_head_addr,
  130. ring->desc_data_head);
  131. return 0;
  132. }
  133. static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
  134. {
  135. struct xlgmac_channel *channel;
  136. unsigned int i;
  137. if (!pdata->channel_head)
  138. return;
  139. channel = pdata->channel_head;
  140. for (i = 0; i < pdata->channel_count; i++, channel++) {
  141. xlgmac_free_ring(pdata, channel->tx_ring);
  142. xlgmac_free_ring(pdata, channel->rx_ring);
  143. }
  144. }
  145. static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
  146. {
  147. struct xlgmac_channel *channel;
  148. unsigned int i;
  149. int ret;
  150. channel = pdata->channel_head;
  151. for (i = 0; i < pdata->channel_count; i++, channel++) {
  152. netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
  153. channel->name);
  154. ret = xlgmac_init_ring(pdata, channel->tx_ring,
  155. pdata->tx_desc_count);
  156. if (ret) {
  157. netdev_alert(pdata->netdev,
  158. "error initializing Tx ring");
  159. goto err_init_ring;
  160. }
  161. netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
  162. channel->name);
  163. ret = xlgmac_init_ring(pdata, channel->rx_ring,
  164. pdata->rx_desc_count);
  165. if (ret) {
  166. netdev_alert(pdata->netdev,
  167. "error initializing Rx ring\n");
  168. goto err_init_ring;
  169. }
  170. }
  171. return 0;
  172. err_init_ring:
  173. xlgmac_free_rings(pdata);
  174. return ret;
  175. }
  176. static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
  177. {
  178. if (!pdata->channel_head)
  179. return;
  180. kfree(pdata->channel_head->tx_ring);
  181. pdata->channel_head->tx_ring = NULL;
  182. kfree(pdata->channel_head->rx_ring);
  183. pdata->channel_head->rx_ring = NULL;
  184. kfree(pdata->channel_head);
  185. pdata->channel_head = NULL;
  186. pdata->channel_count = 0;
  187. }
  188. static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
  189. {
  190. struct xlgmac_channel *channel_head, *channel;
  191. struct xlgmac_ring *tx_ring, *rx_ring;
  192. int ret = -ENOMEM;
  193. unsigned int i;
  194. channel_head = kcalloc(pdata->channel_count,
  195. sizeof(struct xlgmac_channel), GFP_KERNEL);
  196. if (!channel_head)
  197. return ret;
  198. netif_dbg(pdata, drv, pdata->netdev,
  199. "channel_head=%p\n", channel_head);
  200. tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
  201. GFP_KERNEL);
  202. if (!tx_ring)
  203. goto err_tx_ring;
  204. rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
  205. GFP_KERNEL);
  206. if (!rx_ring)
  207. goto err_rx_ring;
  208. for (i = 0, channel = channel_head; i < pdata->channel_count;
  209. i++, channel++) {
  210. snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
  211. channel->pdata = pdata;
  212. channel->queue_index = i;
  213. channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
  214. (DMA_CH_INC * i);
  215. if (pdata->per_channel_irq) {
  216. /* Get the per DMA interrupt */
  217. ret = pdata->channel_irq[i];
  218. if (ret < 0) {
  219. netdev_err(pdata->netdev,
  220. "get_irq %u failed\n",
  221. i + 1);
  222. goto err_irq;
  223. }
  224. channel->dma_irq = ret;
  225. }
  226. if (i < pdata->tx_ring_count)
  227. channel->tx_ring = tx_ring++;
  228. if (i < pdata->rx_ring_count)
  229. channel->rx_ring = rx_ring++;
  230. netif_dbg(pdata, drv, pdata->netdev,
  231. "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
  232. channel->name, channel->dma_regs,
  233. channel->tx_ring, channel->rx_ring);
  234. }
  235. pdata->channel_head = channel_head;
  236. return 0;
  237. err_irq:
  238. kfree(rx_ring);
  239. err_rx_ring:
  240. kfree(tx_ring);
  241. err_tx_ring:
  242. kfree(channel_head);
  243. return ret;
  244. }
  245. static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
  246. {
  247. xlgmac_free_rings(pdata);
  248. xlgmac_free_channels(pdata);
  249. }
  250. static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
  251. {
  252. int ret;
  253. ret = xlgmac_alloc_channels(pdata);
  254. if (ret)
  255. goto err_alloc;
  256. ret = xlgmac_alloc_rings(pdata);
  257. if (ret)
  258. goto err_alloc;
  259. return 0;
  260. err_alloc:
  261. xlgmac_free_channels_and_rings(pdata);
  262. return ret;
  263. }
  264. static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
  265. struct xlgmac_page_alloc *pa,
  266. gfp_t gfp, int order)
  267. {
  268. struct page *pages = NULL;
  269. dma_addr_t pages_dma;
  270. /* Try to obtain pages, decreasing order if necessary */
  271. gfp |= __GFP_COMP | __GFP_NOWARN;
  272. while (order >= 0) {
  273. pages = alloc_pages(gfp, order);
  274. if (pages)
  275. break;
  276. order--;
  277. }
  278. if (!pages)
  279. return -ENOMEM;
  280. /* Map the pages */
  281. pages_dma = dma_map_page(pdata->dev, pages, 0,
  282. PAGE_SIZE << order, DMA_FROM_DEVICE);
  283. if (dma_mapping_error(pdata->dev, pages_dma)) {
  284. put_page(pages);
  285. return -ENOMEM;
  286. }
  287. pa->pages = pages;
  288. pa->pages_len = PAGE_SIZE << order;
  289. pa->pages_offset = 0;
  290. pa->pages_dma = pages_dma;
  291. return 0;
  292. }
  293. static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
  294. struct xlgmac_page_alloc *pa,
  295. unsigned int len)
  296. {
  297. get_page(pa->pages);
  298. bd->pa = *pa;
  299. bd->dma_base = pa->pages_dma;
  300. bd->dma_off = pa->pages_offset;
  301. bd->dma_len = len;
  302. pa->pages_offset += len;
  303. if ((pa->pages_offset + len) > pa->pages_len) {
  304. /* This data descriptor is responsible for unmapping page(s) */
  305. bd->pa_unmap = *pa;
  306. /* Get a new allocation next time */
  307. pa->pages = NULL;
  308. pa->pages_len = 0;
  309. pa->pages_offset = 0;
  310. pa->pages_dma = 0;
  311. }
  312. }
  313. static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
  314. struct xlgmac_ring *ring,
  315. struct xlgmac_desc_data *desc_data)
  316. {
  317. int order, ret;
  318. if (!ring->rx_hdr_pa.pages) {
  319. ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
  320. GFP_ATOMIC, 0);
  321. if (ret)
  322. return ret;
  323. }
  324. if (!ring->rx_buf_pa.pages) {
  325. order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
  326. ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
  327. GFP_ATOMIC, order);
  328. if (ret)
  329. return ret;
  330. }
  331. /* Set up the header page info */
  332. xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
  333. XLGMAC_SKB_ALLOC_SIZE);
  334. /* Set up the buffer page info */
  335. xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
  336. pdata->rx_buf_size);
  337. return 0;
  338. }
  339. static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
  340. {
  341. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  342. struct xlgmac_desc_data *desc_data;
  343. struct xlgmac_dma_desc *dma_desc;
  344. struct xlgmac_channel *channel;
  345. struct xlgmac_ring *ring;
  346. dma_addr_t dma_desc_addr;
  347. unsigned int i, j;
  348. channel = pdata->channel_head;
  349. for (i = 0; i < pdata->channel_count; i++, channel++) {
  350. ring = channel->tx_ring;
  351. if (!ring)
  352. break;
  353. dma_desc = ring->dma_desc_head;
  354. dma_desc_addr = ring->dma_desc_head_addr;
  355. for (j = 0; j < ring->dma_desc_count; j++) {
  356. desc_data = XLGMAC_GET_DESC_DATA(ring, j);
  357. desc_data->dma_desc = dma_desc;
  358. desc_data->dma_desc_addr = dma_desc_addr;
  359. dma_desc++;
  360. dma_desc_addr += sizeof(struct xlgmac_dma_desc);
  361. }
  362. ring->cur = 0;
  363. ring->dirty = 0;
  364. memset(&ring->tx, 0, sizeof(ring->tx));
  365. hw_ops->tx_desc_init(channel);
  366. }
  367. }
  368. static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
  369. {
  370. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  371. struct xlgmac_desc_data *desc_data;
  372. struct xlgmac_dma_desc *dma_desc;
  373. struct xlgmac_channel *channel;
  374. struct xlgmac_ring *ring;
  375. dma_addr_t dma_desc_addr;
  376. unsigned int i, j;
  377. channel = pdata->channel_head;
  378. for (i = 0; i < pdata->channel_count; i++, channel++) {
  379. ring = channel->rx_ring;
  380. if (!ring)
  381. break;
  382. dma_desc = ring->dma_desc_head;
  383. dma_desc_addr = ring->dma_desc_head_addr;
  384. for (j = 0; j < ring->dma_desc_count; j++) {
  385. desc_data = XLGMAC_GET_DESC_DATA(ring, j);
  386. desc_data->dma_desc = dma_desc;
  387. desc_data->dma_desc_addr = dma_desc_addr;
  388. if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
  389. break;
  390. dma_desc++;
  391. dma_desc_addr += sizeof(struct xlgmac_dma_desc);
  392. }
  393. ring->cur = 0;
  394. ring->dirty = 0;
  395. hw_ops->rx_desc_init(channel);
  396. }
  397. }
  398. static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
  399. struct sk_buff *skb)
  400. {
  401. struct xlgmac_pdata *pdata = channel->pdata;
  402. struct xlgmac_ring *ring = channel->tx_ring;
  403. unsigned int start_index, cur_index;
  404. struct xlgmac_desc_data *desc_data;
  405. unsigned int offset, datalen, len;
  406. struct xlgmac_pkt_info *pkt_info;
  407. struct skb_frag_struct *frag;
  408. unsigned int tso, vlan;
  409. dma_addr_t skb_dma;
  410. unsigned int i;
  411. offset = 0;
  412. start_index = ring->cur;
  413. cur_index = ring->cur;
  414. pkt_info = &ring->pkt_info;
  415. pkt_info->desc_count = 0;
  416. pkt_info->length = 0;
  417. tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
  418. TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
  419. TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
  420. vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
  421. TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
  422. TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
  423. /* Save space for a context descriptor if needed */
  424. if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
  425. (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
  426. cur_index++;
  427. desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
  428. if (tso) {
  429. /* Map the TSO header */
  430. skb_dma = dma_map_single(pdata->dev, skb->data,
  431. pkt_info->header_len, DMA_TO_DEVICE);
  432. if (dma_mapping_error(pdata->dev, skb_dma)) {
  433. netdev_alert(pdata->netdev, "dma_map_single failed\n");
  434. goto err_out;
  435. }
  436. desc_data->skb_dma = skb_dma;
  437. desc_data->skb_dma_len = pkt_info->header_len;
  438. netif_dbg(pdata, tx_queued, pdata->netdev,
  439. "skb header: index=%u, dma=%pad, len=%u\n",
  440. cur_index, &skb_dma, pkt_info->header_len);
  441. offset = pkt_info->header_len;
  442. pkt_info->length += pkt_info->header_len;
  443. cur_index++;
  444. desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
  445. }
  446. /* Map the (remainder of the) packet */
  447. for (datalen = skb_headlen(skb) - offset; datalen; ) {
  448. len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
  449. skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
  450. DMA_TO_DEVICE);
  451. if (dma_mapping_error(pdata->dev, skb_dma)) {
  452. netdev_alert(pdata->netdev, "dma_map_single failed\n");
  453. goto err_out;
  454. }
  455. desc_data->skb_dma = skb_dma;
  456. desc_data->skb_dma_len = len;
  457. netif_dbg(pdata, tx_queued, pdata->netdev,
  458. "skb data: index=%u, dma=%pad, len=%u\n",
  459. cur_index, &skb_dma, len);
  460. datalen -= len;
  461. offset += len;
  462. pkt_info->length += len;
  463. cur_index++;
  464. desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
  465. }
  466. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  467. netif_dbg(pdata, tx_queued, pdata->netdev,
  468. "mapping frag %u\n", i);
  469. frag = &skb_shinfo(skb)->frags[i];
  470. offset = 0;
  471. for (datalen = skb_frag_size(frag); datalen; ) {
  472. len = min_t(unsigned int, datalen,
  473. XLGMAC_TX_MAX_BUF_SIZE);
  474. skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
  475. len, DMA_TO_DEVICE);
  476. if (dma_mapping_error(pdata->dev, skb_dma)) {
  477. netdev_alert(pdata->netdev,
  478. "skb_frag_dma_map failed\n");
  479. goto err_out;
  480. }
  481. desc_data->skb_dma = skb_dma;
  482. desc_data->skb_dma_len = len;
  483. desc_data->mapped_as_page = 1;
  484. netif_dbg(pdata, tx_queued, pdata->netdev,
  485. "skb frag: index=%u, dma=%pad, len=%u\n",
  486. cur_index, &skb_dma, len);
  487. datalen -= len;
  488. offset += len;
  489. pkt_info->length += len;
  490. cur_index++;
  491. desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
  492. }
  493. }
  494. /* Save the skb address in the last entry. We always have some data
  495. * that has been mapped so desc_data is always advanced past the last
  496. * piece of mapped data - use the entry pointed to by cur_index - 1.
  497. */
  498. desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
  499. desc_data->skb = skb;
  500. /* Save the number of descriptor entries used */
  501. pkt_info->desc_count = cur_index - start_index;
  502. return pkt_info->desc_count;
  503. err_out:
  504. while (start_index < cur_index) {
  505. desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
  506. xlgmac_unmap_desc_data(pdata, desc_data);
  507. }
  508. return 0;
  509. }
  510. void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
  511. {
  512. desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
  513. desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
  514. desc_ops->map_tx_skb = xlgmac_map_tx_skb;
  515. desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
  516. desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
  517. desc_ops->tx_desc_init = xlgmac_tx_desc_init;
  518. desc_ops->rx_desc_init = xlgmac_rx_desc_init;
  519. }