fm10k_main.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036
  1. /* Intel(R) Ethernet Switch Host Interface Driver
  2. * Copyright(c) 2013 - 2016 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * The full GNU General Public License is included in this distribution in
  14. * the file called "COPYING".
  15. *
  16. * Contact Information:
  17. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. */
  20. #include <linux/types.h>
  21. #include <linux/module.h>
  22. #include <net/ipv6.h>
  23. #include <net/ip.h>
  24. #include <net/tcp.h>
  25. #include <linux/if_macvlan.h>
  26. #include <linux/prefetch.h>
  27. #include "fm10k.h"
  28. #define DRV_VERSION "0.21.2-k"
  29. #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
  30. const char fm10k_driver_version[] = DRV_VERSION;
  31. char fm10k_driver_name[] = "fm10k";
  32. static const char fm10k_driver_string[] = DRV_SUMMARY;
  33. static const char fm10k_copyright[] =
  34. "Copyright (c) 2013 - 2016 Intel Corporation.";
  35. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  36. MODULE_DESCRIPTION(DRV_SUMMARY);
  37. MODULE_LICENSE("GPL");
  38. MODULE_VERSION(DRV_VERSION);
  39. /* single workqueue for entire fm10k driver */
  40. struct workqueue_struct *fm10k_workqueue;
  41. /**
  42. * fm10k_init_module - Driver Registration Routine
  43. *
  44. * fm10k_init_module is the first routine called when the driver is
  45. * loaded. All it does is register with the PCI subsystem.
  46. **/
  47. static int __init fm10k_init_module(void)
  48. {
  49. pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
  50. pr_info("%s\n", fm10k_copyright);
  51. /* create driver workqueue */
  52. fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
  53. fm10k_driver_name);
  54. fm10k_dbg_init();
  55. return fm10k_register_pci_driver();
  56. }
  57. module_init(fm10k_init_module);
  58. /**
  59. * fm10k_exit_module - Driver Exit Cleanup Routine
  60. *
  61. * fm10k_exit_module is called just before the driver is removed
  62. * from memory.
  63. **/
  64. static void __exit fm10k_exit_module(void)
  65. {
  66. fm10k_unregister_pci_driver();
  67. fm10k_dbg_exit();
  68. /* destroy driver workqueue */
  69. destroy_workqueue(fm10k_workqueue);
  70. }
  71. module_exit(fm10k_exit_module);
  72. static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
  73. struct fm10k_rx_buffer *bi)
  74. {
  75. struct page *page = bi->page;
  76. dma_addr_t dma;
  77. /* Only page will be NULL if buffer was consumed */
  78. if (likely(page))
  79. return true;
  80. /* alloc new page for storage */
  81. page = dev_alloc_page();
  82. if (unlikely(!page)) {
  83. rx_ring->rx_stats.alloc_failed++;
  84. return false;
  85. }
  86. /* map page for use */
  87. dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
  88. /* if mapping failed free memory back to system since
  89. * there isn't much point in holding memory we can't use
  90. */
  91. if (dma_mapping_error(rx_ring->dev, dma)) {
  92. __free_page(page);
  93. rx_ring->rx_stats.alloc_failed++;
  94. return false;
  95. }
  96. bi->dma = dma;
  97. bi->page = page;
  98. bi->page_offset = 0;
  99. return true;
  100. }
  101. /**
  102. * fm10k_alloc_rx_buffers - Replace used receive buffers
  103. * @rx_ring: ring to place buffers on
  104. * @cleaned_count: number of buffers to replace
  105. **/
  106. void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
  107. {
  108. union fm10k_rx_desc *rx_desc;
  109. struct fm10k_rx_buffer *bi;
  110. u16 i = rx_ring->next_to_use;
  111. /* nothing to do */
  112. if (!cleaned_count)
  113. return;
  114. rx_desc = FM10K_RX_DESC(rx_ring, i);
  115. bi = &rx_ring->rx_buffer[i];
  116. i -= rx_ring->count;
  117. do {
  118. if (!fm10k_alloc_mapped_page(rx_ring, bi))
  119. break;
  120. /* Refresh the desc even if buffer_addrs didn't change
  121. * because each write-back erases this info.
  122. */
  123. rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
  124. rx_desc++;
  125. bi++;
  126. i++;
  127. if (unlikely(!i)) {
  128. rx_desc = FM10K_RX_DESC(rx_ring, 0);
  129. bi = rx_ring->rx_buffer;
  130. i -= rx_ring->count;
  131. }
  132. /* clear the status bits for the next_to_use descriptor */
  133. rx_desc->d.staterr = 0;
  134. cleaned_count--;
  135. } while (cleaned_count);
  136. i += rx_ring->count;
  137. if (rx_ring->next_to_use != i) {
  138. /* record the next descriptor to use */
  139. rx_ring->next_to_use = i;
  140. /* update next to alloc since we have filled the ring */
  141. rx_ring->next_to_alloc = i;
  142. /* Force memory writes to complete before letting h/w
  143. * know there are new descriptors to fetch. (Only
  144. * applicable for weak-ordered memory model archs,
  145. * such as IA-64).
  146. */
  147. wmb();
  148. /* notify hardware of new descriptors */
  149. writel(i, rx_ring->tail);
  150. }
  151. }
  152. /**
  153. * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
  154. * @rx_ring: rx descriptor ring to store buffers on
  155. * @old_buff: donor buffer to have page reused
  156. *
  157. * Synchronizes page for reuse by the interface
  158. **/
  159. static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
  160. struct fm10k_rx_buffer *old_buff)
  161. {
  162. struct fm10k_rx_buffer *new_buff;
  163. u16 nta = rx_ring->next_to_alloc;
  164. new_buff = &rx_ring->rx_buffer[nta];
  165. /* update, and store next to alloc */
  166. nta++;
  167. rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
  168. /* transfer page from old buffer to new buffer */
  169. *new_buff = *old_buff;
  170. /* sync the buffer for use by the device */
  171. dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
  172. old_buff->page_offset,
  173. FM10K_RX_BUFSZ,
  174. DMA_FROM_DEVICE);
  175. }
  176. static inline bool fm10k_page_is_reserved(struct page *page)
  177. {
  178. return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
  179. }
  180. static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
  181. struct page *page,
  182. unsigned int __maybe_unused truesize)
  183. {
  184. /* avoid re-using remote pages */
  185. if (unlikely(fm10k_page_is_reserved(page)))
  186. return false;
  187. #if (PAGE_SIZE < 8192)
  188. /* if we are only owner of page we can reuse it */
  189. if (unlikely(page_count(page) != 1))
  190. return false;
  191. /* flip page offset to other buffer */
  192. rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
  193. #else
  194. /* move offset up to the next cache line */
  195. rx_buffer->page_offset += truesize;
  196. if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
  197. return false;
  198. #endif
  199. /* Even if we own the page, we are not allowed to use atomic_set()
  200. * This would break get_page_unless_zero() users.
  201. */
  202. page_ref_inc(page);
  203. return true;
  204. }
  205. /**
  206. * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
  207. * @rx_buffer: buffer containing page to add
  208. * @rx_desc: descriptor containing length of buffer written by hardware
  209. * @skb: sk_buff to place the data into
  210. *
  211. * This function will add the data contained in rx_buffer->page to the skb.
  212. * This is done either through a direct copy if the data in the buffer is
  213. * less than the skb header size, otherwise it will just attach the page as
  214. * a frag to the skb.
  215. *
  216. * The function will then update the page offset if necessary and return
  217. * true if the buffer can be reused by the interface.
  218. **/
  219. static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
  220. union fm10k_rx_desc *rx_desc,
  221. struct sk_buff *skb)
  222. {
  223. struct page *page = rx_buffer->page;
  224. unsigned char *va = page_address(page) + rx_buffer->page_offset;
  225. unsigned int size = le16_to_cpu(rx_desc->w.length);
  226. #if (PAGE_SIZE < 8192)
  227. unsigned int truesize = FM10K_RX_BUFSZ;
  228. #else
  229. unsigned int truesize = ALIGN(size, 512);
  230. #endif
  231. unsigned int pull_len;
  232. if (unlikely(skb_is_nonlinear(skb)))
  233. goto add_tail_frag;
  234. if (likely(size <= FM10K_RX_HDR_LEN)) {
  235. memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
  236. /* page is not reserved, we can reuse buffer as-is */
  237. if (likely(!fm10k_page_is_reserved(page)))
  238. return true;
  239. /* this page cannot be reused so discard it */
  240. __free_page(page);
  241. return false;
  242. }
  243. /* we need the header to contain the greater of either ETH_HLEN or
  244. * 60 bytes if the skb->len is less than 60 for skb_pad.
  245. */
  246. pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
  247. /* align pull length to size of long to optimize memcpy performance */
  248. memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
  249. /* update all of the pointers */
  250. va += pull_len;
  251. size -= pull_len;
  252. add_tail_frag:
  253. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
  254. (unsigned long)va & ~PAGE_MASK, size, truesize);
  255. return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
  256. }
  257. static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
  258. union fm10k_rx_desc *rx_desc,
  259. struct sk_buff *skb)
  260. {
  261. struct fm10k_rx_buffer *rx_buffer;
  262. struct page *page;
  263. rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
  264. page = rx_buffer->page;
  265. prefetchw(page);
  266. if (likely(!skb)) {
  267. void *page_addr = page_address(page) +
  268. rx_buffer->page_offset;
  269. /* prefetch first cache line of first page */
  270. prefetch(page_addr);
  271. #if L1_CACHE_BYTES < 128
  272. prefetch(page_addr + L1_CACHE_BYTES);
  273. #endif
  274. /* allocate a skb to store the frags */
  275. skb = napi_alloc_skb(&rx_ring->q_vector->napi,
  276. FM10K_RX_HDR_LEN);
  277. if (unlikely(!skb)) {
  278. rx_ring->rx_stats.alloc_failed++;
  279. return NULL;
  280. }
  281. /* we will be copying header into skb->data in
  282. * pskb_may_pull so it is in our interest to prefetch
  283. * it now to avoid a possible cache miss
  284. */
  285. prefetchw(skb->data);
  286. }
  287. /* we are reusing so sync this buffer for CPU use */
  288. dma_sync_single_range_for_cpu(rx_ring->dev,
  289. rx_buffer->dma,
  290. rx_buffer->page_offset,
  291. FM10K_RX_BUFSZ,
  292. DMA_FROM_DEVICE);
  293. /* pull page into skb */
  294. if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) {
  295. /* hand second half of page back to the ring */
  296. fm10k_reuse_rx_page(rx_ring, rx_buffer);
  297. } else {
  298. /* we are not reusing the buffer so unmap it */
  299. dma_unmap_page(rx_ring->dev, rx_buffer->dma,
  300. PAGE_SIZE, DMA_FROM_DEVICE);
  301. }
  302. /* clear contents of rx_buffer */
  303. rx_buffer->page = NULL;
  304. return skb;
  305. }
  306. static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
  307. union fm10k_rx_desc *rx_desc,
  308. struct sk_buff *skb)
  309. {
  310. skb_checksum_none_assert(skb);
  311. /* Rx checksum disabled via ethtool */
  312. if (!(ring->netdev->features & NETIF_F_RXCSUM))
  313. return;
  314. /* TCP/UDP checksum error bit is set */
  315. if (fm10k_test_staterr(rx_desc,
  316. FM10K_RXD_STATUS_L4E |
  317. FM10K_RXD_STATUS_L4E2 |
  318. FM10K_RXD_STATUS_IPE |
  319. FM10K_RXD_STATUS_IPE2)) {
  320. ring->rx_stats.csum_err++;
  321. return;
  322. }
  323. /* It must be a TCP or UDP packet with a valid checksum */
  324. if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2))
  325. skb->encapsulation = true;
  326. else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS))
  327. return;
  328. skb->ip_summed = CHECKSUM_UNNECESSARY;
  329. ring->rx_stats.csum_good++;
  330. }
  331. #define FM10K_RSS_L4_TYPES_MASK \
  332. (BIT(FM10K_RSSTYPE_IPV4_TCP) | \
  333. BIT(FM10K_RSSTYPE_IPV4_UDP) | \
  334. BIT(FM10K_RSSTYPE_IPV6_TCP) | \
  335. BIT(FM10K_RSSTYPE_IPV6_UDP))
  336. static inline void fm10k_rx_hash(struct fm10k_ring *ring,
  337. union fm10k_rx_desc *rx_desc,
  338. struct sk_buff *skb)
  339. {
  340. u16 rss_type;
  341. if (!(ring->netdev->features & NETIF_F_RXHASH))
  342. return;
  343. rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK;
  344. if (!rss_type)
  345. return;
  346. skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
  347. (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
  348. PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
  349. }
  350. static void fm10k_type_trans(struct fm10k_ring *rx_ring,
  351. union fm10k_rx_desc __maybe_unused *rx_desc,
  352. struct sk_buff *skb)
  353. {
  354. struct net_device *dev = rx_ring->netdev;
  355. struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel);
  356. /* check to see if DGLORT belongs to a MACVLAN */
  357. if (l2_accel) {
  358. u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1;
  359. idx -= l2_accel->dglort;
  360. if (idx < l2_accel->size && l2_accel->macvlan[idx])
  361. dev = l2_accel->macvlan[idx];
  362. else
  363. l2_accel = NULL;
  364. }
  365. skb->protocol = eth_type_trans(skb, dev);
  366. if (!l2_accel)
  367. return;
  368. /* update MACVLAN statistics */
  369. macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
  370. !!(rx_desc->w.hdr_info &
  371. cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
  372. }
  373. /**
  374. * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
  375. * @rx_ring: rx descriptor ring packet is being transacted on
  376. * @rx_desc: pointer to the EOP Rx descriptor
  377. * @skb: pointer to current skb being populated
  378. *
  379. * This function checks the ring, descriptor, and packet information in
  380. * order to populate the hash, checksum, VLAN, timestamp, protocol, and
  381. * other fields within the skb.
  382. **/
  383. static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
  384. union fm10k_rx_desc *rx_desc,
  385. struct sk_buff *skb)
  386. {
  387. unsigned int len = skb->len;
  388. fm10k_rx_hash(rx_ring, rx_desc, skb);
  389. fm10k_rx_checksum(rx_ring, rx_desc, skb);
  390. FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
  391. skb_record_rx_queue(skb, rx_ring->queue_index);
  392. FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
  393. if (rx_desc->w.vlan) {
  394. u16 vid = le16_to_cpu(rx_desc->w.vlan);
  395. if ((vid & VLAN_VID_MASK) != rx_ring->vid)
  396. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  397. else if (vid & VLAN_PRIO_MASK)
  398. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  399. vid & VLAN_PRIO_MASK);
  400. }
  401. fm10k_type_trans(rx_ring, rx_desc, skb);
  402. return len;
  403. }
  404. /**
  405. * fm10k_is_non_eop - process handling of non-EOP buffers
  406. * @rx_ring: Rx ring being processed
  407. * @rx_desc: Rx descriptor for current buffer
  408. *
  409. * This function updates next to clean. If the buffer is an EOP buffer
  410. * this function exits returning false, otherwise it will place the
  411. * sk_buff in the next buffer to be chained and return true indicating
  412. * that this is in fact a non-EOP buffer.
  413. **/
  414. static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
  415. union fm10k_rx_desc *rx_desc)
  416. {
  417. u32 ntc = rx_ring->next_to_clean + 1;
  418. /* fetch, update, and store next to clean */
  419. ntc = (ntc < rx_ring->count) ? ntc : 0;
  420. rx_ring->next_to_clean = ntc;
  421. prefetch(FM10K_RX_DESC(rx_ring, ntc));
  422. if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP)))
  423. return false;
  424. return true;
  425. }
  426. /**
  427. * fm10k_cleanup_headers - Correct corrupted or empty headers
  428. * @rx_ring: rx descriptor ring packet is being transacted on
  429. * @rx_desc: pointer to the EOP Rx descriptor
  430. * @skb: pointer to current skb being fixed
  431. *
  432. * Address the case where we are pulling data in on pages only
  433. * and as such no data is present in the skb header.
  434. *
  435. * In addition if skb is not at least 60 bytes we need to pad it so that
  436. * it is large enough to qualify as a valid Ethernet frame.
  437. *
  438. * Returns true if an error was encountered and skb was freed.
  439. **/
  440. static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
  441. union fm10k_rx_desc *rx_desc,
  442. struct sk_buff *skb)
  443. {
  444. if (unlikely((fm10k_test_staterr(rx_desc,
  445. FM10K_RXD_STATUS_RXE)))) {
  446. #define FM10K_TEST_RXD_BIT(rxd, bit) \
  447. ((rxd)->w.csum_err & cpu_to_le16(bit))
  448. if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR))
  449. rx_ring->rx_stats.switch_errors++;
  450. if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR))
  451. rx_ring->rx_stats.drops++;
  452. if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR))
  453. rx_ring->rx_stats.pp_errors++;
  454. if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY))
  455. rx_ring->rx_stats.link_errors++;
  456. if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG))
  457. rx_ring->rx_stats.length_errors++;
  458. dev_kfree_skb_any(skb);
  459. rx_ring->rx_stats.errors++;
  460. return true;
  461. }
  462. /* if eth_skb_pad returns an error the skb was freed */
  463. if (eth_skb_pad(skb))
  464. return true;
  465. return false;
  466. }
  467. /**
  468. * fm10k_receive_skb - helper function to handle rx indications
  469. * @q_vector: structure containing interrupt and ring information
  470. * @skb: packet to send up
  471. **/
  472. static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
  473. struct sk_buff *skb)
  474. {
  475. napi_gro_receive(&q_vector->napi, skb);
  476. }
  477. static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
  478. struct fm10k_ring *rx_ring,
  479. int budget)
  480. {
  481. struct sk_buff *skb = rx_ring->skb;
  482. unsigned int total_bytes = 0, total_packets = 0;
  483. u16 cleaned_count = fm10k_desc_unused(rx_ring);
  484. while (likely(total_packets < budget)) {
  485. union fm10k_rx_desc *rx_desc;
  486. /* return some buffers to hardware, one at a time is too slow */
  487. if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
  488. fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
  489. cleaned_count = 0;
  490. }
  491. rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
  492. if (!rx_desc->d.staterr)
  493. break;
  494. /* This memory barrier is needed to keep us from reading
  495. * any other fields out of the rx_desc until we know the
  496. * descriptor has been written back
  497. */
  498. dma_rmb();
  499. /* retrieve a buffer from the ring */
  500. skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
  501. /* exit if we failed to retrieve a buffer */
  502. if (!skb)
  503. break;
  504. cleaned_count++;
  505. /* fetch next buffer in frame if non-eop */
  506. if (fm10k_is_non_eop(rx_ring, rx_desc))
  507. continue;
  508. /* verify the packet layout is correct */
  509. if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
  510. skb = NULL;
  511. continue;
  512. }
  513. /* populate checksum, timestamp, VLAN, and protocol */
  514. total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
  515. fm10k_receive_skb(q_vector, skb);
  516. /* reset skb pointer */
  517. skb = NULL;
  518. /* update budget accounting */
  519. total_packets++;
  520. }
  521. /* place incomplete frames back on ring for completion */
  522. rx_ring->skb = skb;
  523. u64_stats_update_begin(&rx_ring->syncp);
  524. rx_ring->stats.packets += total_packets;
  525. rx_ring->stats.bytes += total_bytes;
  526. u64_stats_update_end(&rx_ring->syncp);
  527. q_vector->rx.total_packets += total_packets;
  528. q_vector->rx.total_bytes += total_bytes;
  529. return total_packets;
  530. }
  531. #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
  532. static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
  533. {
  534. struct fm10k_intfc *interface = netdev_priv(skb->dev);
  535. struct fm10k_udp_port *vxlan_port;
  536. /* we can only offload a vxlan if we recognize it as such */
  537. vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
  538. struct fm10k_udp_port, list);
  539. if (!vxlan_port)
  540. return NULL;
  541. if (vxlan_port->port != udp_hdr(skb)->dest)
  542. return NULL;
  543. /* return offset of udp_hdr plus 8 bytes for VXLAN header */
  544. return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN);
  545. }
  546. #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF)
  547. #define NVGRE_TNI htons(0x2000)
  548. struct fm10k_nvgre_hdr {
  549. __be16 flags;
  550. __be16 proto;
  551. __be32 tni;
  552. };
  553. static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
  554. {
  555. struct fm10k_nvgre_hdr *nvgre_hdr;
  556. int hlen = ip_hdrlen(skb);
  557. /* currently only IPv4 is supported due to hlen above */
  558. if (vlan_get_protocol(skb) != htons(ETH_P_IP))
  559. return NULL;
  560. /* our transport header should be NVGRE */
  561. nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen);
  562. /* verify all reserved flags are 0 */
  563. if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
  564. return NULL;
  565. /* report start of ethernet header */
  566. if (nvgre_hdr->flags & NVGRE_TNI)
  567. return (struct ethhdr *)(nvgre_hdr + 1);
  568. return (struct ethhdr *)(&nvgre_hdr->tni);
  569. }
  570. __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
  571. {
  572. u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
  573. struct ethhdr *eth_hdr;
  574. if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
  575. skb->inner_protocol != htons(ETH_P_TEB))
  576. return 0;
  577. switch (vlan_get_protocol(skb)) {
  578. case htons(ETH_P_IP):
  579. l4_hdr = ip_hdr(skb)->protocol;
  580. break;
  581. case htons(ETH_P_IPV6):
  582. l4_hdr = ipv6_hdr(skb)->nexthdr;
  583. break;
  584. default:
  585. return 0;
  586. }
  587. switch (l4_hdr) {
  588. case IPPROTO_UDP:
  589. eth_hdr = fm10k_port_is_vxlan(skb);
  590. break;
  591. case IPPROTO_GRE:
  592. eth_hdr = fm10k_gre_is_nvgre(skb);
  593. break;
  594. default:
  595. return 0;
  596. }
  597. if (!eth_hdr)
  598. return 0;
  599. switch (eth_hdr->h_proto) {
  600. case htons(ETH_P_IP):
  601. inner_l4_hdr = inner_ip_hdr(skb)->protocol;
  602. break;
  603. case htons(ETH_P_IPV6):
  604. inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
  605. break;
  606. default:
  607. return 0;
  608. }
  609. switch (inner_l4_hdr) {
  610. case IPPROTO_TCP:
  611. inner_l4_hlen = inner_tcp_hdrlen(skb);
  612. break;
  613. case IPPROTO_UDP:
  614. inner_l4_hlen = 8;
  615. break;
  616. default:
  617. return 0;
  618. }
  619. /* The hardware allows tunnel offloads only if the combined inner and
  620. * outer header is 184 bytes or less
  621. */
  622. if (skb_inner_transport_header(skb) + inner_l4_hlen -
  623. skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
  624. return 0;
  625. return eth_hdr->h_proto;
  626. }
  627. static int fm10k_tso(struct fm10k_ring *tx_ring,
  628. struct fm10k_tx_buffer *first)
  629. {
  630. struct sk_buff *skb = first->skb;
  631. struct fm10k_tx_desc *tx_desc;
  632. unsigned char *th;
  633. u8 hdrlen;
  634. if (skb->ip_summed != CHECKSUM_PARTIAL)
  635. return 0;
  636. if (!skb_is_gso(skb))
  637. return 0;
  638. /* compute header lengths */
  639. if (skb->encapsulation) {
  640. if (!fm10k_tx_encap_offload(skb))
  641. goto err_vxlan;
  642. th = skb_inner_transport_header(skb);
  643. } else {
  644. th = skb_transport_header(skb);
  645. }
  646. /* compute offset from SOF to transport header and add header len */
  647. hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2);
  648. first->tx_flags |= FM10K_TX_FLAGS_CSUM;
  649. /* update gso size and bytecount with header size */
  650. first->gso_segs = skb_shinfo(skb)->gso_segs;
  651. first->bytecount += (first->gso_segs - 1) * hdrlen;
  652. /* populate Tx descriptor header size and mss */
  653. tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
  654. tx_desc->hdrlen = hdrlen;
  655. tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  656. return 1;
  657. err_vxlan:
  658. tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
  659. if (!net_ratelimit())
  660. netdev_err(tx_ring->netdev,
  661. "TSO requested for unsupported tunnel, disabling offload\n");
  662. return -1;
  663. }
  664. static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
  665. struct fm10k_tx_buffer *first)
  666. {
  667. struct sk_buff *skb = first->skb;
  668. struct fm10k_tx_desc *tx_desc;
  669. union {
  670. struct iphdr *ipv4;
  671. struct ipv6hdr *ipv6;
  672. u8 *raw;
  673. } network_hdr;
  674. u8 *transport_hdr;
  675. __be16 frag_off;
  676. __be16 protocol;
  677. u8 l4_hdr = 0;
  678. if (skb->ip_summed != CHECKSUM_PARTIAL)
  679. goto no_csum;
  680. if (skb->encapsulation) {
  681. protocol = fm10k_tx_encap_offload(skb);
  682. if (!protocol) {
  683. if (skb_checksum_help(skb)) {
  684. dev_warn(tx_ring->dev,
  685. "failed to offload encap csum!\n");
  686. tx_ring->tx_stats.csum_err++;
  687. }
  688. goto no_csum;
  689. }
  690. network_hdr.raw = skb_inner_network_header(skb);
  691. transport_hdr = skb_inner_transport_header(skb);
  692. } else {
  693. protocol = vlan_get_protocol(skb);
  694. network_hdr.raw = skb_network_header(skb);
  695. transport_hdr = skb_transport_header(skb);
  696. }
  697. switch (protocol) {
  698. case htons(ETH_P_IP):
  699. l4_hdr = network_hdr.ipv4->protocol;
  700. break;
  701. case htons(ETH_P_IPV6):
  702. l4_hdr = network_hdr.ipv6->nexthdr;
  703. if (likely((transport_hdr - network_hdr.raw) ==
  704. sizeof(struct ipv6hdr)))
  705. break;
  706. ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
  707. sizeof(struct ipv6hdr),
  708. &l4_hdr, &frag_off);
  709. if (unlikely(frag_off))
  710. l4_hdr = NEXTHDR_FRAGMENT;
  711. break;
  712. default:
  713. break;
  714. }
  715. switch (l4_hdr) {
  716. case IPPROTO_TCP:
  717. case IPPROTO_UDP:
  718. break;
  719. case IPPROTO_GRE:
  720. if (skb->encapsulation)
  721. break;
  722. default:
  723. if (unlikely(net_ratelimit())) {
  724. dev_warn(tx_ring->dev,
  725. "partial checksum, version=%d l4 proto=%x\n",
  726. protocol, l4_hdr);
  727. }
  728. skb_checksum_help(skb);
  729. tx_ring->tx_stats.csum_err++;
  730. goto no_csum;
  731. }
  732. /* update TX checksum flag */
  733. first->tx_flags |= FM10K_TX_FLAGS_CSUM;
  734. tx_ring->tx_stats.csum_good++;
  735. no_csum:
  736. /* populate Tx descriptor header size and mss */
  737. tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
  738. tx_desc->hdrlen = 0;
  739. tx_desc->mss = 0;
  740. }
  741. #define FM10K_SET_FLAG(_input, _flag, _result) \
  742. ((_flag <= _result) ? \
  743. ((u32)(_input & _flag) * (_result / _flag)) : \
  744. ((u32)(_input & _flag) / (_flag / _result)))
  745. static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
  746. {
  747. /* set type for advanced descriptor with frame checksum insertion */
  748. u32 desc_flags = 0;
  749. /* set checksum offload bits */
  750. desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
  751. FM10K_TXD_FLAG_CSUM);
  752. return desc_flags;
  753. }
  754. static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
  755. struct fm10k_tx_desc *tx_desc, u16 i,
  756. dma_addr_t dma, unsigned int size, u8 desc_flags)
  757. {
  758. /* set RS and INT for last frame in a cache line */
  759. if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0)
  760. desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT;
  761. /* record values to descriptor */
  762. tx_desc->buffer_addr = cpu_to_le64(dma);
  763. tx_desc->flags = desc_flags;
  764. tx_desc->buflen = cpu_to_le16(size);
  765. /* return true if we just wrapped the ring */
  766. return i == tx_ring->count;
  767. }
  768. static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
  769. {
  770. netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
  771. /* Memory barrier before checking head and tail */
  772. smp_mb();
  773. /* Check again in a case another CPU has just made room available */
  774. if (likely(fm10k_desc_unused(tx_ring) < size))
  775. return -EBUSY;
  776. /* A reprieve! - use start_queue because it doesn't call schedule */
  777. netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
  778. ++tx_ring->tx_stats.restart_queue;
  779. return 0;
  780. }
  781. static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
  782. {
  783. if (likely(fm10k_desc_unused(tx_ring) >= size))
  784. return 0;
  785. return __fm10k_maybe_stop_tx(tx_ring, size);
  786. }
  787. static void fm10k_tx_map(struct fm10k_ring *tx_ring,
  788. struct fm10k_tx_buffer *first)
  789. {
  790. struct sk_buff *skb = first->skb;
  791. struct fm10k_tx_buffer *tx_buffer;
  792. struct fm10k_tx_desc *tx_desc;
  793. struct skb_frag_struct *frag;
  794. unsigned char *data;
  795. dma_addr_t dma;
  796. unsigned int data_len, size;
  797. u32 tx_flags = first->tx_flags;
  798. u16 i = tx_ring->next_to_use;
  799. u8 flags = fm10k_tx_desc_flags(skb, tx_flags);
  800. tx_desc = FM10K_TX_DESC(tx_ring, i);
  801. /* add HW VLAN tag */
  802. if (skb_vlan_tag_present(skb))
  803. tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
  804. else
  805. tx_desc->vlan = 0;
  806. size = skb_headlen(skb);
  807. data = skb->data;
  808. dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
  809. data_len = skb->data_len;
  810. tx_buffer = first;
  811. for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
  812. if (dma_mapping_error(tx_ring->dev, dma))
  813. goto dma_error;
  814. /* record length, and DMA address */
  815. dma_unmap_len_set(tx_buffer, len, size);
  816. dma_unmap_addr_set(tx_buffer, dma, dma);
  817. while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) {
  818. if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
  819. FM10K_MAX_DATA_PER_TXD, flags)) {
  820. tx_desc = FM10K_TX_DESC(tx_ring, 0);
  821. i = 0;
  822. }
  823. dma += FM10K_MAX_DATA_PER_TXD;
  824. size -= FM10K_MAX_DATA_PER_TXD;
  825. }
  826. if (likely(!data_len))
  827. break;
  828. if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
  829. dma, size, flags)) {
  830. tx_desc = FM10K_TX_DESC(tx_ring, 0);
  831. i = 0;
  832. }
  833. size = skb_frag_size(frag);
  834. data_len -= size;
  835. dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
  836. DMA_TO_DEVICE);
  837. tx_buffer = &tx_ring->tx_buffer[i];
  838. }
  839. /* write last descriptor with LAST bit set */
  840. flags |= FM10K_TXD_FLAG_LAST;
  841. if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
  842. i = 0;
  843. /* record bytecount for BQL */
  844. netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
  845. /* record SW timestamp if HW timestamp is not available */
  846. skb_tx_timestamp(first->skb);
  847. /* Force memory writes to complete before letting h/w know there
  848. * are new descriptors to fetch. (Only applicable for weak-ordered
  849. * memory model archs, such as IA-64).
  850. *
  851. * We also need this memory barrier to make certain all of the
  852. * status bits have been updated before next_to_watch is written.
  853. */
  854. wmb();
  855. /* set next_to_watch value indicating a packet is present */
  856. first->next_to_watch = tx_desc;
  857. tx_ring->next_to_use = i;
  858. /* Make sure there is space in the ring for the next send. */
  859. fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
  860. /* notify HW of packet */
  861. if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
  862. writel(i, tx_ring->tail);
  863. /* we need this if more than one processor can write to our tail
  864. * at a time, it synchronizes IO on IA64/Altix systems
  865. */
  866. mmiowb();
  867. }
  868. return;
  869. dma_error:
  870. dev_err(tx_ring->dev, "TX DMA map failed\n");
  871. /* clear dma mappings for failed tx_buffer map */
  872. for (;;) {
  873. tx_buffer = &tx_ring->tx_buffer[i];
  874. fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
  875. if (tx_buffer == first)
  876. break;
  877. if (i == 0)
  878. i = tx_ring->count;
  879. i--;
  880. }
  881. tx_ring->next_to_use = i;
  882. }
  883. netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
  884. struct fm10k_ring *tx_ring)
  885. {
  886. u16 count = TXD_USE_COUNT(skb_headlen(skb));
  887. struct fm10k_tx_buffer *first;
  888. unsigned short f;
  889. u32 tx_flags = 0;
  890. int tso;
  891. /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
  892. * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
  893. * + 2 desc gap to keep tail from touching head
  894. * otherwise try next time
  895. */
  896. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  897. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  898. if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
  899. tx_ring->tx_stats.tx_busy++;
  900. return NETDEV_TX_BUSY;
  901. }
  902. /* record the location of the first descriptor for this packet */
  903. first = &tx_ring->tx_buffer[tx_ring->next_to_use];
  904. first->skb = skb;
  905. first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
  906. first->gso_segs = 1;
  907. /* record initial flags and protocol */
  908. first->tx_flags = tx_flags;
  909. tso = fm10k_tso(tx_ring, first);
  910. if (tso < 0)
  911. goto out_drop;
  912. else if (!tso)
  913. fm10k_tx_csum(tx_ring, first);
  914. fm10k_tx_map(tx_ring, first);
  915. return NETDEV_TX_OK;
  916. out_drop:
  917. dev_kfree_skb_any(first->skb);
  918. first->skb = NULL;
  919. return NETDEV_TX_OK;
  920. }
  921. static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
  922. {
  923. return ring->stats.packets;
  924. }
  925. /**
  926. * fm10k_get_tx_pending - how many Tx descriptors not processed
  927. * @ring: the ring structure
  928. * @in_sw: is tx_pending being checked in SW or in HW?
  929. */
  930. u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw)
  931. {
  932. struct fm10k_intfc *interface = ring->q_vector->interface;
  933. struct fm10k_hw *hw = &interface->hw;
  934. u32 head, tail;
  935. if (likely(in_sw)) {
  936. head = ring->next_to_clean;
  937. tail = ring->next_to_use;
  938. } else {
  939. head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
  940. tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
  941. }
  942. return ((head <= tail) ? tail : tail + ring->count) - head;
  943. }
  944. bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
  945. {
  946. u32 tx_done = fm10k_get_tx_completed(tx_ring);
  947. u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
  948. u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
  949. clear_check_for_tx_hang(tx_ring);
  950. /* Check for a hung queue, but be thorough. This verifies
  951. * that a transmit has been completed since the previous
  952. * check AND there is at least one packet pending. By
  953. * requiring this to fail twice we avoid races with
  954. * clearing the ARMED bit and conditions where we
  955. * run the check_tx_hang logic with a transmit completion
  956. * pending but without time to complete it yet.
  957. */
  958. if (!tx_pending || (tx_done_old != tx_done)) {
  959. /* update completed stats and continue */
  960. tx_ring->tx_stats.tx_done_old = tx_done;
  961. /* reset the countdown */
  962. clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
  963. return false;
  964. }
  965. /* make sure it is true for two checks in a row */
  966. return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
  967. }
  968. /**
  969. * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
  970. * @interface: driver private struct
  971. **/
  972. void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
  973. {
  974. /* Do the reset outside of interrupt context */
  975. if (!test_bit(__FM10K_DOWN, &interface->state)) {
  976. interface->tx_timeout_count++;
  977. interface->flags |= FM10K_FLAG_RESET_REQUESTED;
  978. fm10k_service_event_schedule(interface);
  979. }
  980. }
  981. /**
  982. * fm10k_clean_tx_irq - Reclaim resources after transmit completes
  983. * @q_vector: structure containing interrupt and ring information
  984. * @tx_ring: tx ring to clean
  985. * @napi_budget: Used to determine if we are in netpoll
  986. **/
  987. static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
  988. struct fm10k_ring *tx_ring, int napi_budget)
  989. {
  990. struct fm10k_intfc *interface = q_vector->interface;
  991. struct fm10k_tx_buffer *tx_buffer;
  992. struct fm10k_tx_desc *tx_desc;
  993. unsigned int total_bytes = 0, total_packets = 0;
  994. unsigned int budget = q_vector->tx.work_limit;
  995. unsigned int i = tx_ring->next_to_clean;
  996. if (test_bit(__FM10K_DOWN, &interface->state))
  997. return true;
  998. tx_buffer = &tx_ring->tx_buffer[i];
  999. tx_desc = FM10K_TX_DESC(tx_ring, i);
  1000. i -= tx_ring->count;
  1001. do {
  1002. struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch;
  1003. /* if next_to_watch is not set then there is no work pending */
  1004. if (!eop_desc)
  1005. break;
  1006. /* prevent any other reads prior to eop_desc */
  1007. smp_rmb();
  1008. /* if DD is not set pending work has not been completed */
  1009. if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
  1010. break;
  1011. /* clear next_to_watch to prevent false hangs */
  1012. tx_buffer->next_to_watch = NULL;
  1013. /* update the statistics for this packet */
  1014. total_bytes += tx_buffer->bytecount;
  1015. total_packets += tx_buffer->gso_segs;
  1016. /* free the skb */
  1017. napi_consume_skb(tx_buffer->skb, napi_budget);
  1018. /* unmap skb header data */
  1019. dma_unmap_single(tx_ring->dev,
  1020. dma_unmap_addr(tx_buffer, dma),
  1021. dma_unmap_len(tx_buffer, len),
  1022. DMA_TO_DEVICE);
  1023. /* clear tx_buffer data */
  1024. tx_buffer->skb = NULL;
  1025. dma_unmap_len_set(tx_buffer, len, 0);
  1026. /* unmap remaining buffers */
  1027. while (tx_desc != eop_desc) {
  1028. tx_buffer++;
  1029. tx_desc++;
  1030. i++;
  1031. if (unlikely(!i)) {
  1032. i -= tx_ring->count;
  1033. tx_buffer = tx_ring->tx_buffer;
  1034. tx_desc = FM10K_TX_DESC(tx_ring, 0);
  1035. }
  1036. /* unmap any remaining paged data */
  1037. if (dma_unmap_len(tx_buffer, len)) {
  1038. dma_unmap_page(tx_ring->dev,
  1039. dma_unmap_addr(tx_buffer, dma),
  1040. dma_unmap_len(tx_buffer, len),
  1041. DMA_TO_DEVICE);
  1042. dma_unmap_len_set(tx_buffer, len, 0);
  1043. }
  1044. }
  1045. /* move us one more past the eop_desc for start of next pkt */
  1046. tx_buffer++;
  1047. tx_desc++;
  1048. i++;
  1049. if (unlikely(!i)) {
  1050. i -= tx_ring->count;
  1051. tx_buffer = tx_ring->tx_buffer;
  1052. tx_desc = FM10K_TX_DESC(tx_ring, 0);
  1053. }
  1054. /* issue prefetch for next Tx descriptor */
  1055. prefetch(tx_desc);
  1056. /* update budget accounting */
  1057. budget--;
  1058. } while (likely(budget));
  1059. i += tx_ring->count;
  1060. tx_ring->next_to_clean = i;
  1061. u64_stats_update_begin(&tx_ring->syncp);
  1062. tx_ring->stats.bytes += total_bytes;
  1063. tx_ring->stats.packets += total_packets;
  1064. u64_stats_update_end(&tx_ring->syncp);
  1065. q_vector->tx.total_bytes += total_bytes;
  1066. q_vector->tx.total_packets += total_packets;
  1067. if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
  1068. /* schedule immediate reset if we believe we hung */
  1069. struct fm10k_hw *hw = &interface->hw;
  1070. netif_err(interface, drv, tx_ring->netdev,
  1071. "Detected Tx Unit Hang\n"
  1072. " Tx Queue <%d>\n"
  1073. " TDH, TDT <%x>, <%x>\n"
  1074. " next_to_use <%x>\n"
  1075. " next_to_clean <%x>\n",
  1076. tx_ring->queue_index,
  1077. fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
  1078. fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
  1079. tx_ring->next_to_use, i);
  1080. netif_stop_subqueue(tx_ring->netdev,
  1081. tx_ring->queue_index);
  1082. netif_info(interface, probe, tx_ring->netdev,
  1083. "tx hang %d detected on queue %d, resetting interface\n",
  1084. interface->tx_timeout_count + 1,
  1085. tx_ring->queue_index);
  1086. fm10k_tx_timeout_reset(interface);
  1087. /* the netdev is about to reset, no point in enabling stuff */
  1088. return true;
  1089. }
  1090. /* notify netdev of completed buffers */
  1091. netdev_tx_completed_queue(txring_txq(tx_ring),
  1092. total_packets, total_bytes);
  1093. #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
  1094. if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
  1095. (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
  1096. /* Make sure that anybody stopping the queue after this
  1097. * sees the new next_to_clean.
  1098. */
  1099. smp_mb();
  1100. if (__netif_subqueue_stopped(tx_ring->netdev,
  1101. tx_ring->queue_index) &&
  1102. !test_bit(__FM10K_DOWN, &interface->state)) {
  1103. netif_wake_subqueue(tx_ring->netdev,
  1104. tx_ring->queue_index);
  1105. ++tx_ring->tx_stats.restart_queue;
  1106. }
  1107. }
  1108. return !!budget;
  1109. }
  1110. /**
  1111. * fm10k_update_itr - update the dynamic ITR value based on packet size
  1112. *
  1113. * Stores a new ITR value based on strictly on packet size. The
  1114. * divisors and thresholds used by this function were determined based
  1115. * on theoretical maximum wire speed and testing data, in order to
  1116. * minimize response time while increasing bulk throughput.
  1117. *
  1118. * @ring_container: Container for rings to have ITR updated
  1119. **/
  1120. static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
  1121. {
  1122. unsigned int avg_wire_size, packets, itr_round;
  1123. /* Only update ITR if we are using adaptive setting */
  1124. if (!ITR_IS_ADAPTIVE(ring_container->itr))
  1125. goto clear_counts;
  1126. packets = ring_container->total_packets;
  1127. if (!packets)
  1128. goto clear_counts;
  1129. avg_wire_size = ring_container->total_bytes / packets;
  1130. /* The following is a crude approximation of:
  1131. * wmem_default / (size + overhead) = desired_pkts_per_int
  1132. * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
  1133. * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
  1134. *
  1135. * Assuming wmem_default is 212992 and overhead is 640 bytes per
  1136. * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
  1137. * formula down to
  1138. *
  1139. * (34 * (size + 24)) / (size + 640) = ITR
  1140. *
  1141. * We first do some math on the packet size and then finally bitshift
  1142. * by 8 after rounding up. We also have to account for PCIe link speed
  1143. * difference as ITR scales based on this.
  1144. */
  1145. if (avg_wire_size <= 360) {
  1146. /* Start at 250K ints/sec and gradually drop to 77K ints/sec */
  1147. avg_wire_size *= 8;
  1148. avg_wire_size += 376;
  1149. } else if (avg_wire_size <= 1152) {
  1150. /* 77K ints/sec to 45K ints/sec */
  1151. avg_wire_size *= 3;
  1152. avg_wire_size += 2176;
  1153. } else if (avg_wire_size <= 1920) {
  1154. /* 45K ints/sec to 38K ints/sec */
  1155. avg_wire_size += 4480;
  1156. } else {
  1157. /* plateau at a limit of 38K ints/sec */
  1158. avg_wire_size = 6656;
  1159. }
  1160. /* Perform final bitshift for division after rounding up to ensure
  1161. * that the calculation will never get below a 1. The bit shift
  1162. * accounts for changes in the ITR due to PCIe link speed.
  1163. */
  1164. itr_round = READ_ONCE(ring_container->itr_scale) + 8;
  1165. avg_wire_size += BIT(itr_round) - 1;
  1166. avg_wire_size >>= itr_round;
  1167. /* write back value and retain adaptive flag */
  1168. ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
  1169. clear_counts:
  1170. ring_container->total_bytes = 0;
  1171. ring_container->total_packets = 0;
  1172. }
  1173. static void fm10k_qv_enable(struct fm10k_q_vector *q_vector)
  1174. {
  1175. /* Enable auto-mask and clear the current mask */
  1176. u32 itr = FM10K_ITR_ENABLE;
  1177. /* Update Tx ITR */
  1178. fm10k_update_itr(&q_vector->tx);
  1179. /* Update Rx ITR */
  1180. fm10k_update_itr(&q_vector->rx);
  1181. /* Store Tx itr in timer slot 0 */
  1182. itr |= (q_vector->tx.itr & FM10K_ITR_MAX);
  1183. /* Shift Rx itr to timer slot 1 */
  1184. itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT;
  1185. /* Write the final value to the ITR register */
  1186. writel(itr, q_vector->itr);
  1187. }
  1188. static int fm10k_poll(struct napi_struct *napi, int budget)
  1189. {
  1190. struct fm10k_q_vector *q_vector =
  1191. container_of(napi, struct fm10k_q_vector, napi);
  1192. struct fm10k_ring *ring;
  1193. int per_ring_budget, work_done = 0;
  1194. bool clean_complete = true;
  1195. fm10k_for_each_ring(ring, q_vector->tx) {
  1196. if (!fm10k_clean_tx_irq(q_vector, ring, budget))
  1197. clean_complete = false;
  1198. }
  1199. /* Handle case where we are called by netpoll with a budget of 0 */
  1200. if (budget <= 0)
  1201. return budget;
  1202. /* attempt to distribute budget to each queue fairly, but don't
  1203. * allow the budget to go below 1 because we'll exit polling
  1204. */
  1205. if (q_vector->rx.count > 1)
  1206. per_ring_budget = max(budget / q_vector->rx.count, 1);
  1207. else
  1208. per_ring_budget = budget;
  1209. fm10k_for_each_ring(ring, q_vector->rx) {
  1210. int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
  1211. work_done += work;
  1212. if (work >= per_ring_budget)
  1213. clean_complete = false;
  1214. }
  1215. /* If all work not completed, return budget and keep polling */
  1216. if (!clean_complete)
  1217. return budget;
  1218. /* all work done, exit the polling mode */
  1219. napi_complete_done(napi, work_done);
  1220. /* re-enable the q_vector */
  1221. fm10k_qv_enable(q_vector);
  1222. return min(work_done, budget - 1);
  1223. }
  1224. /**
  1225. * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
  1226. * @interface: board private structure to initialize
  1227. *
  1228. * When QoS (Quality of Service) is enabled, allocate queues for
  1229. * each traffic class. If multiqueue isn't available,then abort QoS
  1230. * initialization.
  1231. *
  1232. * This function handles all combinations of Qos and RSS.
  1233. *
  1234. **/
  1235. static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
  1236. {
  1237. struct net_device *dev = interface->netdev;
  1238. struct fm10k_ring_feature *f;
  1239. int rss_i, i;
  1240. int pcs;
  1241. /* Map queue offset and counts onto allocated tx queues */
  1242. pcs = netdev_get_num_tc(dev);
  1243. if (pcs <= 1)
  1244. return false;
  1245. /* set QoS mask and indices */
  1246. f = &interface->ring_feature[RING_F_QOS];
  1247. f->indices = pcs;
  1248. f->mask = BIT(fls(pcs - 1)) - 1;
  1249. /* determine the upper limit for our current DCB mode */
  1250. rss_i = interface->hw.mac.max_queues / pcs;
  1251. rss_i = BIT(fls(rss_i) - 1);
  1252. /* set RSS mask and indices */
  1253. f = &interface->ring_feature[RING_F_RSS];
  1254. rss_i = min_t(u16, rss_i, f->limit);
  1255. f->indices = rss_i;
  1256. f->mask = BIT(fls(rss_i - 1)) - 1;
  1257. /* configure pause class to queue mapping */
  1258. for (i = 0; i < pcs; i++)
  1259. netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
  1260. interface->num_rx_queues = rss_i * pcs;
  1261. interface->num_tx_queues = rss_i * pcs;
  1262. return true;
  1263. }
  1264. /**
  1265. * fm10k_set_rss_queues: Allocate queues for RSS
  1266. * @interface: board private structure to initialize
  1267. *
  1268. * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
  1269. * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
  1270. *
  1271. **/
  1272. static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
  1273. {
  1274. struct fm10k_ring_feature *f;
  1275. u16 rss_i;
  1276. f = &interface->ring_feature[RING_F_RSS];
  1277. rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit);
  1278. /* record indices and power of 2 mask for RSS */
  1279. f->indices = rss_i;
  1280. f->mask = BIT(fls(rss_i - 1)) - 1;
  1281. interface->num_rx_queues = rss_i;
  1282. interface->num_tx_queues = rss_i;
  1283. return true;
  1284. }
  1285. /**
  1286. * fm10k_set_num_queues: Allocate queues for device, feature dependent
  1287. * @interface: board private structure to initialize
  1288. *
  1289. * This is the top level queue allocation routine. The order here is very
  1290. * important, starting with the "most" number of features turned on at once,
  1291. * and ending with the smallest set of features. This way large combinations
  1292. * can be allocated if they're turned on, and smaller combinations are the
  1293. * fallthrough conditions.
  1294. *
  1295. **/
  1296. static void fm10k_set_num_queues(struct fm10k_intfc *interface)
  1297. {
  1298. /* Attempt to setup QoS and RSS first */
  1299. if (fm10k_set_qos_queues(interface))
  1300. return;
  1301. /* If we don't have QoS, just fallback to only RSS. */
  1302. fm10k_set_rss_queues(interface);
  1303. }
  1304. /**
  1305. * fm10k_reset_num_queues - Reset the number of queues to zero
  1306. * @interface: board private structure
  1307. *
  1308. * This function should be called whenever we need to reset the number of
  1309. * queues after an error condition.
  1310. */
  1311. static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
  1312. {
  1313. interface->num_tx_queues = 0;
  1314. interface->num_rx_queues = 0;
  1315. interface->num_q_vectors = 0;
  1316. }
  1317. /**
  1318. * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
  1319. * @interface: board private structure to initialize
  1320. * @v_count: q_vectors allocated on interface, used for ring interleaving
  1321. * @v_idx: index of vector in interface struct
  1322. * @txr_count: total number of Tx rings to allocate
  1323. * @txr_idx: index of first Tx ring to allocate
  1324. * @rxr_count: total number of Rx rings to allocate
  1325. * @rxr_idx: index of first Rx ring to allocate
  1326. *
  1327. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  1328. **/
  1329. static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
  1330. unsigned int v_count, unsigned int v_idx,
  1331. unsigned int txr_count, unsigned int txr_idx,
  1332. unsigned int rxr_count, unsigned int rxr_idx)
  1333. {
  1334. struct fm10k_q_vector *q_vector;
  1335. struct fm10k_ring *ring;
  1336. int ring_count, size;
  1337. ring_count = txr_count + rxr_count;
  1338. size = sizeof(struct fm10k_q_vector) +
  1339. (sizeof(struct fm10k_ring) * ring_count);
  1340. /* allocate q_vector and rings */
  1341. q_vector = kzalloc(size, GFP_KERNEL);
  1342. if (!q_vector)
  1343. return -ENOMEM;
  1344. /* initialize NAPI */
  1345. netif_napi_add(interface->netdev, &q_vector->napi,
  1346. fm10k_poll, NAPI_POLL_WEIGHT);
  1347. /* tie q_vector and interface together */
  1348. interface->q_vector[v_idx] = q_vector;
  1349. q_vector->interface = interface;
  1350. q_vector->v_idx = v_idx;
  1351. /* initialize pointer to rings */
  1352. ring = q_vector->ring;
  1353. /* save Tx ring container info */
  1354. q_vector->tx.ring = ring;
  1355. q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK;
  1356. q_vector->tx.itr = interface->tx_itr;
  1357. q_vector->tx.itr_scale = interface->hw.mac.itr_scale;
  1358. q_vector->tx.count = txr_count;
  1359. while (txr_count) {
  1360. /* assign generic ring traits */
  1361. ring->dev = &interface->pdev->dev;
  1362. ring->netdev = interface->netdev;
  1363. /* configure backlink on ring */
  1364. ring->q_vector = q_vector;
  1365. /* apply Tx specific ring traits */
  1366. ring->count = interface->tx_ring_count;
  1367. ring->queue_index = txr_idx;
  1368. /* assign ring to interface */
  1369. interface->tx_ring[txr_idx] = ring;
  1370. /* update count and index */
  1371. txr_count--;
  1372. txr_idx += v_count;
  1373. /* push pointer to next ring */
  1374. ring++;
  1375. }
  1376. /* save Rx ring container info */
  1377. q_vector->rx.ring = ring;
  1378. q_vector->rx.itr = interface->rx_itr;
  1379. q_vector->rx.itr_scale = interface->hw.mac.itr_scale;
  1380. q_vector->rx.count = rxr_count;
  1381. while (rxr_count) {
  1382. /* assign generic ring traits */
  1383. ring->dev = &interface->pdev->dev;
  1384. ring->netdev = interface->netdev;
  1385. rcu_assign_pointer(ring->l2_accel, interface->l2_accel);
  1386. /* configure backlink on ring */
  1387. ring->q_vector = q_vector;
  1388. /* apply Rx specific ring traits */
  1389. ring->count = interface->rx_ring_count;
  1390. ring->queue_index = rxr_idx;
  1391. /* assign ring to interface */
  1392. interface->rx_ring[rxr_idx] = ring;
  1393. /* update count and index */
  1394. rxr_count--;
  1395. rxr_idx += v_count;
  1396. /* push pointer to next ring */
  1397. ring++;
  1398. }
  1399. fm10k_dbg_q_vector_init(q_vector);
  1400. return 0;
  1401. }
  1402. /**
  1403. * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
  1404. * @interface: board private structure to initialize
  1405. * @v_idx: Index of vector to be freed
  1406. *
  1407. * This function frees the memory allocated to the q_vector. In addition if
  1408. * NAPI is enabled it will delete any references to the NAPI struct prior
  1409. * to freeing the q_vector.
  1410. **/
  1411. static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
  1412. {
  1413. struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
  1414. struct fm10k_ring *ring;
  1415. fm10k_dbg_q_vector_exit(q_vector);
  1416. fm10k_for_each_ring(ring, q_vector->tx)
  1417. interface->tx_ring[ring->queue_index] = NULL;
  1418. fm10k_for_each_ring(ring, q_vector->rx)
  1419. interface->rx_ring[ring->queue_index] = NULL;
  1420. interface->q_vector[v_idx] = NULL;
  1421. netif_napi_del(&q_vector->napi);
  1422. kfree_rcu(q_vector, rcu);
  1423. }
  1424. /**
  1425. * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
  1426. * @interface: board private structure to initialize
  1427. *
  1428. * We allocate one q_vector per queue interrupt. If allocation fails we
  1429. * return -ENOMEM.
  1430. **/
  1431. static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
  1432. {
  1433. unsigned int q_vectors = interface->num_q_vectors;
  1434. unsigned int rxr_remaining = interface->num_rx_queues;
  1435. unsigned int txr_remaining = interface->num_tx_queues;
  1436. unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
  1437. int err;
  1438. if (q_vectors >= (rxr_remaining + txr_remaining)) {
  1439. for (; rxr_remaining; v_idx++) {
  1440. err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
  1441. 0, 0, 1, rxr_idx);
  1442. if (err)
  1443. goto err_out;
  1444. /* update counts and index */
  1445. rxr_remaining--;
  1446. rxr_idx++;
  1447. }
  1448. }
  1449. for (; v_idx < q_vectors; v_idx++) {
  1450. int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
  1451. int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
  1452. err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
  1453. tqpv, txr_idx,
  1454. rqpv, rxr_idx);
  1455. if (err)
  1456. goto err_out;
  1457. /* update counts and index */
  1458. rxr_remaining -= rqpv;
  1459. txr_remaining -= tqpv;
  1460. rxr_idx++;
  1461. txr_idx++;
  1462. }
  1463. return 0;
  1464. err_out:
  1465. fm10k_reset_num_queues(interface);
  1466. while (v_idx--)
  1467. fm10k_free_q_vector(interface, v_idx);
  1468. return -ENOMEM;
  1469. }
  1470. /**
  1471. * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
  1472. * @interface: board private structure to initialize
  1473. *
  1474. * This function frees the memory allocated to the q_vectors. In addition if
  1475. * NAPI is enabled it will delete any references to the NAPI struct prior
  1476. * to freeing the q_vector.
  1477. **/
  1478. static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
  1479. {
  1480. int v_idx = interface->num_q_vectors;
  1481. fm10k_reset_num_queues(interface);
  1482. while (v_idx--)
  1483. fm10k_free_q_vector(interface, v_idx);
  1484. }
  1485. /**
  1486. * f10k_reset_msix_capability - reset MSI-X capability
  1487. * @interface: board private structure to initialize
  1488. *
  1489. * Reset the MSI-X capability back to its starting state
  1490. **/
  1491. static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
  1492. {
  1493. pci_disable_msix(interface->pdev);
  1494. kfree(interface->msix_entries);
  1495. interface->msix_entries = NULL;
  1496. }
  1497. /**
  1498. * f10k_init_msix_capability - configure MSI-X capability
  1499. * @interface: board private structure to initialize
  1500. *
  1501. * Attempt to configure the interrupts using the best available
  1502. * capabilities of the hardware and the kernel.
  1503. **/
  1504. static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
  1505. {
  1506. struct fm10k_hw *hw = &interface->hw;
  1507. int v_budget, vector;
  1508. /* It's easy to be greedy for MSI-X vectors, but it really
  1509. * doesn't do us much good if we have a lot more vectors
  1510. * than CPU's. So let's be conservative and only ask for
  1511. * (roughly) the same number of vectors as there are CPU's.
  1512. * the default is to use pairs of vectors
  1513. */
  1514. v_budget = max(interface->num_rx_queues, interface->num_tx_queues);
  1515. v_budget = min_t(u16, v_budget, num_online_cpus());
  1516. /* account for vectors not related to queues */
  1517. v_budget += NON_Q_VECTORS(hw);
  1518. /* At the same time, hardware can only support a maximum of
  1519. * hw.mac->max_msix_vectors vectors. With features
  1520. * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
  1521. * descriptor queues supported by our device. Thus, we cap it off in
  1522. * those rare cases where the cpu count also exceeds our vector limit.
  1523. */
  1524. v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
  1525. /* A failure in MSI-X entry allocation is fatal. */
  1526. interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
  1527. GFP_KERNEL);
  1528. if (!interface->msix_entries)
  1529. return -ENOMEM;
  1530. /* populate entry values */
  1531. for (vector = 0; vector < v_budget; vector++)
  1532. interface->msix_entries[vector].entry = vector;
  1533. /* Attempt to enable MSI-X with requested value */
  1534. v_budget = pci_enable_msix_range(interface->pdev,
  1535. interface->msix_entries,
  1536. MIN_MSIX_COUNT(hw),
  1537. v_budget);
  1538. if (v_budget < 0) {
  1539. kfree(interface->msix_entries);
  1540. interface->msix_entries = NULL;
  1541. return v_budget;
  1542. }
  1543. /* record the number of queues available for q_vectors */
  1544. interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
  1545. return 0;
  1546. }
  1547. /**
  1548. * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
  1549. * @interface: Interface structure continaining rings and devices
  1550. *
  1551. * Cache the descriptor ring offsets for Qos
  1552. **/
  1553. static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
  1554. {
  1555. struct net_device *dev = interface->netdev;
  1556. int pc, offset, rss_i, i, q_idx;
  1557. u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
  1558. u8 num_pcs = netdev_get_num_tc(dev);
  1559. if (num_pcs <= 1)
  1560. return false;
  1561. rss_i = interface->ring_feature[RING_F_RSS].indices;
  1562. for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
  1563. q_idx = pc;
  1564. for (i = 0; i < rss_i; i++) {
  1565. interface->tx_ring[offset + i]->reg_idx = q_idx;
  1566. interface->tx_ring[offset + i]->qos_pc = pc;
  1567. interface->rx_ring[offset + i]->reg_idx = q_idx;
  1568. interface->rx_ring[offset + i]->qos_pc = pc;
  1569. q_idx += pc_stride;
  1570. }
  1571. }
  1572. return true;
  1573. }
  1574. /**
  1575. * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
  1576. * @interface: Interface structure continaining rings and devices
  1577. *
  1578. * Cache the descriptor ring offsets for RSS
  1579. **/
  1580. static void fm10k_cache_ring_rss(struct fm10k_intfc *interface)
  1581. {
  1582. int i;
  1583. for (i = 0; i < interface->num_rx_queues; i++)
  1584. interface->rx_ring[i]->reg_idx = i;
  1585. for (i = 0; i < interface->num_tx_queues; i++)
  1586. interface->tx_ring[i]->reg_idx = i;
  1587. }
  1588. /**
  1589. * fm10k_assign_rings - Map rings to network devices
  1590. * @interface: Interface structure containing rings and devices
  1591. *
  1592. * This function is meant to go though and configure both the network
  1593. * devices so that they contain rings, and configure the rings so that
  1594. * they function with their network devices.
  1595. **/
  1596. static void fm10k_assign_rings(struct fm10k_intfc *interface)
  1597. {
  1598. if (fm10k_cache_ring_qos(interface))
  1599. return;
  1600. fm10k_cache_ring_rss(interface);
  1601. }
  1602. static void fm10k_init_reta(struct fm10k_intfc *interface)
  1603. {
  1604. u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
  1605. u32 reta;
  1606. /* If the Rx flow indirection table has been configured manually, we
  1607. * need to maintain it when possible.
  1608. */
  1609. if (netif_is_rxfh_configured(interface->netdev)) {
  1610. for (i = FM10K_RETA_SIZE; i--;) {
  1611. reta = interface->reta[i];
  1612. if ((((reta << 24) >> 24) < rss_i) &&
  1613. (((reta << 16) >> 24) < rss_i) &&
  1614. (((reta << 8) >> 24) < rss_i) &&
  1615. (((reta) >> 24) < rss_i))
  1616. continue;
  1617. /* this should never happen */
  1618. dev_err(&interface->pdev->dev,
  1619. "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n");
  1620. goto repopulate_reta;
  1621. }
  1622. /* do nothing if all of the elements are in bounds */
  1623. return;
  1624. }
  1625. repopulate_reta:
  1626. fm10k_write_reta(interface, NULL);
  1627. }
  1628. /**
  1629. * fm10k_init_queueing_scheme - Determine proper queueing scheme
  1630. * @interface: board private structure to initialize
  1631. *
  1632. * We determine which queueing scheme to use based on...
  1633. * - Hardware queue count (num_*_queues)
  1634. * - defined by miscellaneous hardware support/features (RSS, etc.)
  1635. **/
  1636. int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
  1637. {
  1638. int err;
  1639. /* Number of supported queues */
  1640. fm10k_set_num_queues(interface);
  1641. /* Configure MSI-X capability */
  1642. err = fm10k_init_msix_capability(interface);
  1643. if (err) {
  1644. dev_err(&interface->pdev->dev,
  1645. "Unable to initialize MSI-X capability\n");
  1646. goto err_init_msix;
  1647. }
  1648. /* Allocate memory for queues */
  1649. err = fm10k_alloc_q_vectors(interface);
  1650. if (err) {
  1651. dev_err(&interface->pdev->dev,
  1652. "Unable to allocate queue vectors\n");
  1653. goto err_alloc_q_vectors;
  1654. }
  1655. /* Map rings to devices, and map devices to physical queues */
  1656. fm10k_assign_rings(interface);
  1657. /* Initialize RSS redirection table */
  1658. fm10k_init_reta(interface);
  1659. return 0;
  1660. err_alloc_q_vectors:
  1661. fm10k_reset_msix_capability(interface);
  1662. err_init_msix:
  1663. fm10k_reset_num_queues(interface);
  1664. return err;
  1665. }
  1666. /**
  1667. * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
  1668. * @interface: board private structure to clear queueing scheme on
  1669. *
  1670. * We go through and clear queueing specific resources and reset the structure
  1671. * to pre-load conditions
  1672. **/
  1673. void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
  1674. {
  1675. fm10k_free_q_vectors(interface);
  1676. fm10k_reset_msix_capability(interface);
  1677. }