xgbe-desc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /*
  2. * AMD 10Gb Ethernet driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. * This file incorporates work covered by the following copyright and
  25. * permission notice:
  26. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  29. * and you.
  30. *
  31. * The Software IS NOT an item of Licensed Software or Licensed Product
  32. * under any End User Software License Agreement or Agreement for Licensed
  33. * Product with Synopsys or any supplement thereto. Permission is hereby
  34. * granted, free of charge, to any person obtaining a copy of this software
  35. * annotated with this license and the Software, to deal in the Software
  36. * without restriction, including without limitation the rights to use,
  37. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38. * of the Software, and to permit persons to whom the Software is furnished
  39. * to do so, subject to the following conditions:
  40. *
  41. * The above copyright notice and this permission notice shall be included
  42. * in all copies or substantial portions of the Software.
  43. *
  44. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54. * THE POSSIBILITY OF SUCH DAMAGE.
  55. *
  56. *
  57. * License 2: Modified BSD
  58. *
  59. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  60. * All rights reserved.
  61. *
  62. * Redistribution and use in source and binary forms, with or without
  63. * modification, are permitted provided that the following conditions are met:
  64. * * Redistributions of source code must retain the above copyright
  65. * notice, this list of conditions and the following disclaimer.
  66. * * Redistributions in binary form must reproduce the above copyright
  67. * notice, this list of conditions and the following disclaimer in the
  68. * documentation and/or other materials provided with the distribution.
  69. * * Neither the name of Advanced Micro Devices, Inc. nor the
  70. * names of its contributors may be used to endorse or promote products
  71. * derived from this software without specific prior written permission.
  72. *
  73. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83. *
  84. * This file incorporates work covered by the following copyright and
  85. * permission notice:
  86. * The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87. * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88. * Inc. unless otherwise expressly agreed to in writing between Synopsys
  89. * and you.
  90. *
  91. * The Software IS NOT an item of Licensed Software or Licensed Product
  92. * under any End User Software License Agreement or Agreement for Licensed
  93. * Product with Synopsys or any supplement thereto. Permission is hereby
  94. * granted, free of charge, to any person obtaining a copy of this software
  95. * annotated with this license and the Software, to deal in the Software
  96. * without restriction, including without limitation the rights to use,
  97. * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98. * of the Software, and to permit persons to whom the Software is furnished
  99. * to do so, subject to the following conditions:
  100. *
  101. * The above copyright notice and this permission notice shall be included
  102. * in all copies or substantial portions of the Software.
  103. *
  104. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  105. * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  106. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  107. * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  108. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  109. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  110. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  111. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  112. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  113. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  114. * THE POSSIBILITY OF SUCH DAMAGE.
  115. */
  116. #include "xgbe.h"
  117. #include "xgbe-common.h"
  118. static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
  119. static void xgbe_free_ring(struct xgbe_prv_data *pdata,
  120. struct xgbe_ring *ring)
  121. {
  122. struct xgbe_ring_data *rdata;
  123. unsigned int i;
  124. if (!ring)
  125. return;
  126. if (ring->rdata) {
  127. for (i = 0; i < ring->rdesc_count; i++) {
  128. rdata = XGBE_GET_DESC_DATA(ring, i);
  129. xgbe_unmap_rdata(pdata, rdata);
  130. }
  131. kfree(ring->rdata);
  132. ring->rdata = NULL;
  133. }
  134. if (ring->rx_hdr_pa.pages) {
  135. dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
  136. ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
  137. put_page(ring->rx_hdr_pa.pages);
  138. ring->rx_hdr_pa.pages = NULL;
  139. ring->rx_hdr_pa.pages_len = 0;
  140. ring->rx_hdr_pa.pages_offset = 0;
  141. ring->rx_hdr_pa.pages_dma = 0;
  142. }
  143. if (ring->rx_buf_pa.pages) {
  144. dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
  145. ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
  146. put_page(ring->rx_buf_pa.pages);
  147. ring->rx_buf_pa.pages = NULL;
  148. ring->rx_buf_pa.pages_len = 0;
  149. ring->rx_buf_pa.pages_offset = 0;
  150. ring->rx_buf_pa.pages_dma = 0;
  151. }
  152. if (ring->rdesc) {
  153. dma_free_coherent(pdata->dev,
  154. (sizeof(struct xgbe_ring_desc) *
  155. ring->rdesc_count),
  156. ring->rdesc, ring->rdesc_dma);
  157. ring->rdesc = NULL;
  158. }
  159. }
  160. static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
  161. {
  162. struct xgbe_channel *channel;
  163. unsigned int i;
  164. DBGPR("-->xgbe_free_ring_resources\n");
  165. channel = pdata->channel;
  166. for (i = 0; i < pdata->channel_count; i++, channel++) {
  167. xgbe_free_ring(pdata, channel->tx_ring);
  168. xgbe_free_ring(pdata, channel->rx_ring);
  169. }
  170. DBGPR("<--xgbe_free_ring_resources\n");
  171. }
  172. static int xgbe_init_ring(struct xgbe_prv_data *pdata,
  173. struct xgbe_ring *ring, unsigned int rdesc_count)
  174. {
  175. DBGPR("-->xgbe_init_ring\n");
  176. if (!ring)
  177. return 0;
  178. /* Descriptors */
  179. ring->rdesc_count = rdesc_count;
  180. ring->rdesc = dma_alloc_coherent(pdata->dev,
  181. (sizeof(struct xgbe_ring_desc) *
  182. rdesc_count), &ring->rdesc_dma,
  183. GFP_KERNEL);
  184. if (!ring->rdesc)
  185. return -ENOMEM;
  186. /* Descriptor information */
  187. ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
  188. GFP_KERNEL);
  189. if (!ring->rdata)
  190. return -ENOMEM;
  191. netif_dbg(pdata, drv, pdata->netdev,
  192. "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
  193. ring->rdesc, &ring->rdesc_dma, ring->rdata);
  194. DBGPR("<--xgbe_init_ring\n");
  195. return 0;
  196. }
  197. static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
  198. {
  199. struct xgbe_channel *channel;
  200. unsigned int i;
  201. int ret;
  202. DBGPR("-->xgbe_alloc_ring_resources\n");
  203. channel = pdata->channel;
  204. for (i = 0; i < pdata->channel_count; i++, channel++) {
  205. netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
  206. channel->name);
  207. ret = xgbe_init_ring(pdata, channel->tx_ring,
  208. pdata->tx_desc_count);
  209. if (ret) {
  210. netdev_alert(pdata->netdev,
  211. "error initializing Tx ring\n");
  212. goto err_ring;
  213. }
  214. netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
  215. channel->name);
  216. ret = xgbe_init_ring(pdata, channel->rx_ring,
  217. pdata->rx_desc_count);
  218. if (ret) {
  219. netdev_alert(pdata->netdev,
  220. "error initializing Rx ring\n");
  221. goto err_ring;
  222. }
  223. }
  224. DBGPR("<--xgbe_alloc_ring_resources\n");
  225. return 0;
  226. err_ring:
  227. xgbe_free_ring_resources(pdata);
  228. return ret;
  229. }
  230. static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
  231. struct xgbe_page_alloc *pa, gfp_t gfp, int order)
  232. {
  233. struct page *pages = NULL;
  234. dma_addr_t pages_dma;
  235. int ret;
  236. /* Try to obtain pages, decreasing order if necessary */
  237. gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
  238. while (order >= 0) {
  239. pages = alloc_pages(gfp, order);
  240. if (pages)
  241. break;
  242. order--;
  243. }
  244. if (!pages)
  245. return -ENOMEM;
  246. /* Map the pages */
  247. pages_dma = dma_map_page(pdata->dev, pages, 0,
  248. PAGE_SIZE << order, DMA_FROM_DEVICE);
  249. ret = dma_mapping_error(pdata->dev, pages_dma);
  250. if (ret) {
  251. put_page(pages);
  252. return ret;
  253. }
  254. pa->pages = pages;
  255. pa->pages_len = PAGE_SIZE << order;
  256. pa->pages_offset = 0;
  257. pa->pages_dma = pages_dma;
  258. return 0;
  259. }
  260. static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
  261. struct xgbe_page_alloc *pa,
  262. unsigned int len)
  263. {
  264. get_page(pa->pages);
  265. bd->pa = *pa;
  266. bd->dma_base = pa->pages_dma;
  267. bd->dma_off = pa->pages_offset;
  268. bd->dma_len = len;
  269. pa->pages_offset += len;
  270. if ((pa->pages_offset + len) > pa->pages_len) {
  271. /* This data descriptor is responsible for unmapping page(s) */
  272. bd->pa_unmap = *pa;
  273. /* Get a new allocation next time */
  274. pa->pages = NULL;
  275. pa->pages_len = 0;
  276. pa->pages_offset = 0;
  277. pa->pages_dma = 0;
  278. }
  279. }
  280. static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
  281. struct xgbe_ring *ring,
  282. struct xgbe_ring_data *rdata)
  283. {
  284. int order, ret;
  285. if (!ring->rx_hdr_pa.pages) {
  286. ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
  287. if (ret)
  288. return ret;
  289. }
  290. if (!ring->rx_buf_pa.pages) {
  291. order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
  292. ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
  293. order);
  294. if (ret)
  295. return ret;
  296. }
  297. /* Set up the header page info */
  298. xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
  299. XGBE_SKB_ALLOC_SIZE);
  300. /* Set up the buffer page info */
  301. xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
  302. pdata->rx_buf_size);
  303. return 0;
  304. }
  305. static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
  306. {
  307. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  308. struct xgbe_channel *channel;
  309. struct xgbe_ring *ring;
  310. struct xgbe_ring_data *rdata;
  311. struct xgbe_ring_desc *rdesc;
  312. dma_addr_t rdesc_dma;
  313. unsigned int i, j;
  314. DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
  315. channel = pdata->channel;
  316. for (i = 0; i < pdata->channel_count; i++, channel++) {
  317. ring = channel->tx_ring;
  318. if (!ring)
  319. break;
  320. rdesc = ring->rdesc;
  321. rdesc_dma = ring->rdesc_dma;
  322. for (j = 0; j < ring->rdesc_count; j++) {
  323. rdata = XGBE_GET_DESC_DATA(ring, j);
  324. rdata->rdesc = rdesc;
  325. rdata->rdesc_dma = rdesc_dma;
  326. rdesc++;
  327. rdesc_dma += sizeof(struct xgbe_ring_desc);
  328. }
  329. ring->cur = 0;
  330. ring->dirty = 0;
  331. memset(&ring->tx, 0, sizeof(ring->tx));
  332. hw_if->tx_desc_init(channel);
  333. }
  334. DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
  335. }
  336. static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
  337. {
  338. struct xgbe_hw_if *hw_if = &pdata->hw_if;
  339. struct xgbe_channel *channel;
  340. struct xgbe_ring *ring;
  341. struct xgbe_ring_desc *rdesc;
  342. struct xgbe_ring_data *rdata;
  343. dma_addr_t rdesc_dma;
  344. unsigned int i, j;
  345. DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
  346. channel = pdata->channel;
  347. for (i = 0; i < pdata->channel_count; i++, channel++) {
  348. ring = channel->rx_ring;
  349. if (!ring)
  350. break;
  351. rdesc = ring->rdesc;
  352. rdesc_dma = ring->rdesc_dma;
  353. for (j = 0; j < ring->rdesc_count; j++) {
  354. rdata = XGBE_GET_DESC_DATA(ring, j);
  355. rdata->rdesc = rdesc;
  356. rdata->rdesc_dma = rdesc_dma;
  357. if (xgbe_map_rx_buffer(pdata, ring, rdata))
  358. break;
  359. rdesc++;
  360. rdesc_dma += sizeof(struct xgbe_ring_desc);
  361. }
  362. ring->cur = 0;
  363. ring->dirty = 0;
  364. hw_if->rx_desc_init(channel);
  365. }
  366. DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
  367. }
  368. static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
  369. struct xgbe_ring_data *rdata)
  370. {
  371. if (rdata->skb_dma) {
  372. if (rdata->mapped_as_page) {
  373. dma_unmap_page(pdata->dev, rdata->skb_dma,
  374. rdata->skb_dma_len, DMA_TO_DEVICE);
  375. } else {
  376. dma_unmap_single(pdata->dev, rdata->skb_dma,
  377. rdata->skb_dma_len, DMA_TO_DEVICE);
  378. }
  379. rdata->skb_dma = 0;
  380. rdata->skb_dma_len = 0;
  381. }
  382. if (rdata->skb) {
  383. dev_kfree_skb_any(rdata->skb);
  384. rdata->skb = NULL;
  385. }
  386. if (rdata->rx.hdr.pa.pages)
  387. put_page(rdata->rx.hdr.pa.pages);
  388. if (rdata->rx.hdr.pa_unmap.pages) {
  389. dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
  390. rdata->rx.hdr.pa_unmap.pages_len,
  391. DMA_FROM_DEVICE);
  392. put_page(rdata->rx.hdr.pa_unmap.pages);
  393. }
  394. if (rdata->rx.buf.pa.pages)
  395. put_page(rdata->rx.buf.pa.pages);
  396. if (rdata->rx.buf.pa_unmap.pages) {
  397. dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
  398. rdata->rx.buf.pa_unmap.pages_len,
  399. DMA_FROM_DEVICE);
  400. put_page(rdata->rx.buf.pa_unmap.pages);
  401. }
  402. memset(&rdata->tx, 0, sizeof(rdata->tx));
  403. memset(&rdata->rx, 0, sizeof(rdata->rx));
  404. rdata->mapped_as_page = 0;
  405. if (rdata->state_saved) {
  406. rdata->state_saved = 0;
  407. rdata->state.skb = NULL;
  408. rdata->state.len = 0;
  409. rdata->state.error = 0;
  410. }
  411. }
  412. static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
  413. {
  414. struct xgbe_prv_data *pdata = channel->pdata;
  415. struct xgbe_ring *ring = channel->tx_ring;
  416. struct xgbe_ring_data *rdata;
  417. struct xgbe_packet_data *packet;
  418. struct skb_frag_struct *frag;
  419. dma_addr_t skb_dma;
  420. unsigned int start_index, cur_index;
  421. unsigned int offset, tso, vlan, datalen, len;
  422. unsigned int i;
  423. DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
  424. offset = 0;
  425. start_index = ring->cur;
  426. cur_index = ring->cur;
  427. packet = &ring->packet_data;
  428. packet->rdesc_count = 0;
  429. packet->length = 0;
  430. tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  431. TSO_ENABLE);
  432. vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
  433. VLAN_CTAG);
  434. /* Save space for a context descriptor if needed */
  435. if ((tso && (packet->mss != ring->tx.cur_mss)) ||
  436. (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
  437. cur_index++;
  438. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  439. if (tso) {
  440. /* Map the TSO header */
  441. skb_dma = dma_map_single(pdata->dev, skb->data,
  442. packet->header_len, DMA_TO_DEVICE);
  443. if (dma_mapping_error(pdata->dev, skb_dma)) {
  444. netdev_alert(pdata->netdev, "dma_map_single failed\n");
  445. goto err_out;
  446. }
  447. rdata->skb_dma = skb_dma;
  448. rdata->skb_dma_len = packet->header_len;
  449. netif_dbg(pdata, tx_queued, pdata->netdev,
  450. "skb header: index=%u, dma=%pad, len=%u\n",
  451. cur_index, &skb_dma, packet->header_len);
  452. offset = packet->header_len;
  453. packet->length += packet->header_len;
  454. cur_index++;
  455. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  456. }
  457. /* Map the (remainder of the) packet */
  458. for (datalen = skb_headlen(skb) - offset; datalen; ) {
  459. len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
  460. skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
  461. DMA_TO_DEVICE);
  462. if (dma_mapping_error(pdata->dev, skb_dma)) {
  463. netdev_alert(pdata->netdev, "dma_map_single failed\n");
  464. goto err_out;
  465. }
  466. rdata->skb_dma = skb_dma;
  467. rdata->skb_dma_len = len;
  468. netif_dbg(pdata, tx_queued, pdata->netdev,
  469. "skb data: index=%u, dma=%pad, len=%u\n",
  470. cur_index, &skb_dma, len);
  471. datalen -= len;
  472. offset += len;
  473. packet->length += len;
  474. cur_index++;
  475. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  476. }
  477. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  478. netif_dbg(pdata, tx_queued, pdata->netdev,
  479. "mapping frag %u\n", i);
  480. frag = &skb_shinfo(skb)->frags[i];
  481. offset = 0;
  482. for (datalen = skb_frag_size(frag); datalen; ) {
  483. len = min_t(unsigned int, datalen,
  484. XGBE_TX_MAX_BUF_SIZE);
  485. skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
  486. len, DMA_TO_DEVICE);
  487. if (dma_mapping_error(pdata->dev, skb_dma)) {
  488. netdev_alert(pdata->netdev,
  489. "skb_frag_dma_map failed\n");
  490. goto err_out;
  491. }
  492. rdata->skb_dma = skb_dma;
  493. rdata->skb_dma_len = len;
  494. rdata->mapped_as_page = 1;
  495. netif_dbg(pdata, tx_queued, pdata->netdev,
  496. "skb frag: index=%u, dma=%pad, len=%u\n",
  497. cur_index, &skb_dma, len);
  498. datalen -= len;
  499. offset += len;
  500. packet->length += len;
  501. cur_index++;
  502. rdata = XGBE_GET_DESC_DATA(ring, cur_index);
  503. }
  504. }
  505. /* Save the skb address in the last entry. We always have some data
  506. * that has been mapped so rdata is always advanced past the last
  507. * piece of mapped data - use the entry pointed to by cur_index - 1.
  508. */
  509. rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
  510. rdata->skb = skb;
  511. /* Save the number of descriptor entries used */
  512. packet->rdesc_count = cur_index - start_index;
  513. DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
  514. return packet->rdesc_count;
  515. err_out:
  516. while (start_index < cur_index) {
  517. rdata = XGBE_GET_DESC_DATA(ring, start_index++);
  518. xgbe_unmap_rdata(pdata, rdata);
  519. }
  520. DBGPR("<--xgbe_map_tx_skb: count=0\n");
  521. return 0;
  522. }
  523. void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
  524. {
  525. DBGPR("-->xgbe_init_function_ptrs_desc\n");
  526. desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
  527. desc_if->free_ring_resources = xgbe_free_ring_resources;
  528. desc_if->map_tx_skb = xgbe_map_tx_skb;
  529. desc_if->map_rx_buffer = xgbe_map_rx_buffer;
  530. desc_if->unmap_rdata = xgbe_unmap_rdata;
  531. desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
  532. desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
  533. DBGPR("<--xgbe_init_function_ptrs_desc\n");
  534. }