enic_main.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842
  1. /*
  2. * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. *
  18. */
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/string.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/pci.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/if.h>
  31. #include <linux/if_ether.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/in.h>
  34. #include <linux/ip.h>
  35. #include <linux/ipv6.h>
  36. #include <linux/tcp.h>
  37. #include <linux/rtnetlink.h>
  38. #include <linux/prefetch.h>
  39. #include <net/ip6_checksum.h>
  40. #include <linux/ktime.h>
  41. #include <linux/numa.h>
  42. #ifdef CONFIG_RFS_ACCEL
  43. #include <linux/cpu_rmap.h>
  44. #endif
  45. #ifdef CONFIG_NET_RX_BUSY_POLL
  46. #include <net/busy_poll.h>
  47. #endif
  48. #include <linux/crash_dump.h>
  49. #include "cq_enet_desc.h"
  50. #include "vnic_dev.h"
  51. #include "vnic_intr.h"
  52. #include "vnic_stats.h"
  53. #include "vnic_vic.h"
  54. #include "enic_res.h"
  55. #include "enic.h"
  56. #include "enic_dev.h"
  57. #include "enic_pp.h"
  58. #include "enic_clsf.h"
  59. #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
  60. #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
  61. #define MAX_TSO (1 << 16)
  62. #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
  63. #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
  64. #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
  65. #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
  66. #define RX_COPYBREAK_DEFAULT 256
  67. /* Supported devices */
  68. static const struct pci_device_id enic_id_table[] = {
  69. { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
  70. { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
  71. { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
  72. { 0, } /* end of table */
  73. };
  74. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  75. MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
  76. MODULE_LICENSE("GPL");
  77. MODULE_VERSION(DRV_VERSION);
  78. MODULE_DEVICE_TABLE(pci, enic_id_table);
  79. #define ENIC_LARGE_PKT_THRESHOLD 1000
  80. #define ENIC_MAX_COALESCE_TIMERS 10
  81. /* Interrupt moderation table, which will be used to decide the
  82. * coalescing timer values
  83. * {rx_rate in Mbps, mapping percentage of the range}
  84. */
  85. static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
  86. {4000, 0},
  87. {4400, 10},
  88. {5060, 20},
  89. {5230, 30},
  90. {5540, 40},
  91. {5820, 50},
  92. {6120, 60},
  93. {6435, 70},
  94. {6745, 80},
  95. {7000, 90},
  96. {0xFFFFFFFF, 100}
  97. };
  98. /* This table helps the driver to pick different ranges for rx coalescing
  99. * timer depending on the link speed.
  100. */
  101. static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
  102. {0, 0}, /* 0 - 4 Gbps */
  103. {0, 3}, /* 4 - 10 Gbps */
  104. {3, 6}, /* 10 - 40 Gbps */
  105. };
  106. static void enic_init_affinity_hint(struct enic *enic)
  107. {
  108. int numa_node = dev_to_node(&enic->pdev->dev);
  109. int i;
  110. for (i = 0; i < enic->intr_count; i++) {
  111. if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
  112. (enic->msix[i].affinity_mask &&
  113. !cpumask_empty(enic->msix[i].affinity_mask)))
  114. continue;
  115. if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
  116. GFP_KERNEL))
  117. cpumask_set_cpu(cpumask_local_spread(i, numa_node),
  118. enic->msix[i].affinity_mask);
  119. }
  120. }
  121. static void enic_free_affinity_hint(struct enic *enic)
  122. {
  123. int i;
  124. for (i = 0; i < enic->intr_count; i++) {
  125. if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i))
  126. continue;
  127. free_cpumask_var(enic->msix[i].affinity_mask);
  128. }
  129. }
  130. static void enic_set_affinity_hint(struct enic *enic)
  131. {
  132. int i;
  133. int err;
  134. for (i = 0; i < enic->intr_count; i++) {
  135. if (enic_is_err_intr(enic, i) ||
  136. enic_is_notify_intr(enic, i) ||
  137. !enic->msix[i].affinity_mask ||
  138. cpumask_empty(enic->msix[i].affinity_mask))
  139. continue;
  140. err = irq_set_affinity_hint(enic->msix_entry[i].vector,
  141. enic->msix[i].affinity_mask);
  142. if (err)
  143. netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
  144. err);
  145. }
  146. for (i = 0; i < enic->wq_count; i++) {
  147. int wq_intr = enic_msix_wq_intr(enic, i);
  148. if (enic->msix[wq_intr].affinity_mask &&
  149. !cpumask_empty(enic->msix[wq_intr].affinity_mask))
  150. netif_set_xps_queue(enic->netdev,
  151. enic->msix[wq_intr].affinity_mask,
  152. i);
  153. }
  154. }
  155. static void enic_unset_affinity_hint(struct enic *enic)
  156. {
  157. int i;
  158. for (i = 0; i < enic->intr_count; i++)
  159. irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
  160. }
  161. int enic_is_dynamic(struct enic *enic)
  162. {
  163. return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
  164. }
  165. int enic_sriov_enabled(struct enic *enic)
  166. {
  167. return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
  168. }
  169. static int enic_is_sriov_vf(struct enic *enic)
  170. {
  171. return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
  172. }
  173. int enic_is_valid_vf(struct enic *enic, int vf)
  174. {
  175. #ifdef CONFIG_PCI_IOV
  176. return vf >= 0 && vf < enic->num_vfs;
  177. #else
  178. return 0;
  179. #endif
  180. }
  181. static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  182. {
  183. struct enic *enic = vnic_dev_priv(wq->vdev);
  184. if (buf->sop)
  185. pci_unmap_single(enic->pdev, buf->dma_addr,
  186. buf->len, PCI_DMA_TODEVICE);
  187. else
  188. pci_unmap_page(enic->pdev, buf->dma_addr,
  189. buf->len, PCI_DMA_TODEVICE);
  190. if (buf->os_buf)
  191. dev_kfree_skb_any(buf->os_buf);
  192. }
  193. static void enic_wq_free_buf(struct vnic_wq *wq,
  194. struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
  195. {
  196. enic_free_wq_buf(wq, buf);
  197. }
  198. static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
  199. u8 type, u16 q_number, u16 completed_index, void *opaque)
  200. {
  201. struct enic *enic = vnic_dev_priv(vdev);
  202. spin_lock(&enic->wq_lock[q_number]);
  203. vnic_wq_service(&enic->wq[q_number], cq_desc,
  204. completed_index, enic_wq_free_buf,
  205. opaque);
  206. if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
  207. vnic_wq_desc_avail(&enic->wq[q_number]) >=
  208. (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
  209. netif_wake_subqueue(enic->netdev, q_number);
  210. spin_unlock(&enic->wq_lock[q_number]);
  211. return 0;
  212. }
  213. static bool enic_log_q_error(struct enic *enic)
  214. {
  215. unsigned int i;
  216. u32 error_status;
  217. bool err = false;
  218. for (i = 0; i < enic->wq_count; i++) {
  219. error_status = vnic_wq_error_status(&enic->wq[i]);
  220. err |= error_status;
  221. if (error_status)
  222. netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
  223. i, error_status);
  224. }
  225. for (i = 0; i < enic->rq_count; i++) {
  226. error_status = vnic_rq_error_status(&enic->rq[i]);
  227. err |= error_status;
  228. if (error_status)
  229. netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
  230. i, error_status);
  231. }
  232. return err;
  233. }
  234. static void enic_msglvl_check(struct enic *enic)
  235. {
  236. u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
  237. if (msg_enable != enic->msg_enable) {
  238. netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
  239. enic->msg_enable, msg_enable);
  240. enic->msg_enable = msg_enable;
  241. }
  242. }
  243. static void enic_mtu_check(struct enic *enic)
  244. {
  245. u32 mtu = vnic_dev_mtu(enic->vdev);
  246. struct net_device *netdev = enic->netdev;
  247. if (mtu && mtu != enic->port_mtu) {
  248. enic->port_mtu = mtu;
  249. if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
  250. mtu = max_t(int, ENIC_MIN_MTU,
  251. min_t(int, ENIC_MAX_MTU, mtu));
  252. if (mtu != netdev->mtu)
  253. schedule_work(&enic->change_mtu_work);
  254. } else {
  255. if (mtu < netdev->mtu)
  256. netdev_warn(netdev,
  257. "interface MTU (%d) set higher "
  258. "than switch port MTU (%d)\n",
  259. netdev->mtu, mtu);
  260. }
  261. }
  262. }
  263. static void enic_link_check(struct enic *enic)
  264. {
  265. int link_status = vnic_dev_link_status(enic->vdev);
  266. int carrier_ok = netif_carrier_ok(enic->netdev);
  267. if (link_status && !carrier_ok) {
  268. netdev_info(enic->netdev, "Link UP\n");
  269. netif_carrier_on(enic->netdev);
  270. } else if (!link_status && carrier_ok) {
  271. netdev_info(enic->netdev, "Link DOWN\n");
  272. netif_carrier_off(enic->netdev);
  273. }
  274. }
  275. static void enic_notify_check(struct enic *enic)
  276. {
  277. enic_msglvl_check(enic);
  278. enic_mtu_check(enic);
  279. enic_link_check(enic);
  280. }
  281. #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
  282. static irqreturn_t enic_isr_legacy(int irq, void *data)
  283. {
  284. struct net_device *netdev = data;
  285. struct enic *enic = netdev_priv(netdev);
  286. unsigned int io_intr = enic_legacy_io_intr();
  287. unsigned int err_intr = enic_legacy_err_intr();
  288. unsigned int notify_intr = enic_legacy_notify_intr();
  289. u32 pba;
  290. vnic_intr_mask(&enic->intr[io_intr]);
  291. pba = vnic_intr_legacy_pba(enic->legacy_pba);
  292. if (!pba) {
  293. vnic_intr_unmask(&enic->intr[io_intr]);
  294. return IRQ_NONE; /* not our interrupt */
  295. }
  296. if (ENIC_TEST_INTR(pba, notify_intr)) {
  297. enic_notify_check(enic);
  298. vnic_intr_return_all_credits(&enic->intr[notify_intr]);
  299. }
  300. if (ENIC_TEST_INTR(pba, err_intr)) {
  301. vnic_intr_return_all_credits(&enic->intr[err_intr]);
  302. enic_log_q_error(enic);
  303. /* schedule recovery from WQ/RQ error */
  304. schedule_work(&enic->reset);
  305. return IRQ_HANDLED;
  306. }
  307. if (ENIC_TEST_INTR(pba, io_intr))
  308. napi_schedule_irqoff(&enic->napi[0]);
  309. else
  310. vnic_intr_unmask(&enic->intr[io_intr]);
  311. return IRQ_HANDLED;
  312. }
  313. static irqreturn_t enic_isr_msi(int irq, void *data)
  314. {
  315. struct enic *enic = data;
  316. /* With MSI, there is no sharing of interrupts, so this is
  317. * our interrupt and there is no need to ack it. The device
  318. * is not providing per-vector masking, so the OS will not
  319. * write to PCI config space to mask/unmask the interrupt.
  320. * We're using mask_on_assertion for MSI, so the device
  321. * automatically masks the interrupt when the interrupt is
  322. * generated. Later, when exiting polling, the interrupt
  323. * will be unmasked (see enic_poll).
  324. *
  325. * Also, the device uses the same PCIe Traffic Class (TC)
  326. * for Memory Write data and MSI, so there are no ordering
  327. * issues; the MSI will always arrive at the Root Complex
  328. * _after_ corresponding Memory Writes (i.e. descriptor
  329. * writes).
  330. */
  331. napi_schedule_irqoff(&enic->napi[0]);
  332. return IRQ_HANDLED;
  333. }
  334. static irqreturn_t enic_isr_msix(int irq, void *data)
  335. {
  336. struct napi_struct *napi = data;
  337. napi_schedule_irqoff(napi);
  338. return IRQ_HANDLED;
  339. }
  340. static irqreturn_t enic_isr_msix_err(int irq, void *data)
  341. {
  342. struct enic *enic = data;
  343. unsigned int intr = enic_msix_err_intr(enic);
  344. vnic_intr_return_all_credits(&enic->intr[intr]);
  345. if (enic_log_q_error(enic))
  346. /* schedule recovery from WQ/RQ error */
  347. schedule_work(&enic->reset);
  348. return IRQ_HANDLED;
  349. }
  350. static irqreturn_t enic_isr_msix_notify(int irq, void *data)
  351. {
  352. struct enic *enic = data;
  353. unsigned int intr = enic_msix_notify_intr(enic);
  354. enic_notify_check(enic);
  355. vnic_intr_return_all_credits(&enic->intr[intr]);
  356. return IRQ_HANDLED;
  357. }
  358. static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
  359. struct sk_buff *skb, unsigned int len_left,
  360. int loopback)
  361. {
  362. const skb_frag_t *frag;
  363. dma_addr_t dma_addr;
  364. /* Queue additional data fragments */
  365. for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
  366. len_left -= skb_frag_size(frag);
  367. dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
  368. skb_frag_size(frag),
  369. DMA_TO_DEVICE);
  370. if (unlikely(enic_dma_map_check(enic, dma_addr)))
  371. return -ENOMEM;
  372. enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
  373. (len_left == 0), /* EOP? */
  374. loopback);
  375. }
  376. return 0;
  377. }
  378. static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
  379. struct sk_buff *skb, int vlan_tag_insert,
  380. unsigned int vlan_tag, int loopback)
  381. {
  382. unsigned int head_len = skb_headlen(skb);
  383. unsigned int len_left = skb->len - head_len;
  384. int eop = (len_left == 0);
  385. dma_addr_t dma_addr;
  386. int err = 0;
  387. dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
  388. PCI_DMA_TODEVICE);
  389. if (unlikely(enic_dma_map_check(enic, dma_addr)))
  390. return -ENOMEM;
  391. /* Queue the main skb fragment. The fragments are no larger
  392. * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
  393. * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
  394. * per fragment is queued.
  395. */
  396. enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
  397. vlan_tag, eop, loopback);
  398. if (!eop)
  399. err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
  400. return err;
  401. }
  402. static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
  403. struct sk_buff *skb, int vlan_tag_insert,
  404. unsigned int vlan_tag, int loopback)
  405. {
  406. unsigned int head_len = skb_headlen(skb);
  407. unsigned int len_left = skb->len - head_len;
  408. unsigned int hdr_len = skb_checksum_start_offset(skb);
  409. unsigned int csum_offset = hdr_len + skb->csum_offset;
  410. int eop = (len_left == 0);
  411. dma_addr_t dma_addr;
  412. int err = 0;
  413. dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
  414. PCI_DMA_TODEVICE);
  415. if (unlikely(enic_dma_map_check(enic, dma_addr)))
  416. return -ENOMEM;
  417. /* Queue the main skb fragment. The fragments are no larger
  418. * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
  419. * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
  420. * per fragment is queued.
  421. */
  422. enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
  423. hdr_len, vlan_tag_insert, vlan_tag, eop,
  424. loopback);
  425. if (!eop)
  426. err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
  427. return err;
  428. }
  429. static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
  430. struct sk_buff *skb, unsigned int mss,
  431. int vlan_tag_insert, unsigned int vlan_tag,
  432. int loopback)
  433. {
  434. unsigned int frag_len_left = skb_headlen(skb);
  435. unsigned int len_left = skb->len - frag_len_left;
  436. unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  437. int eop = (len_left == 0);
  438. unsigned int len;
  439. dma_addr_t dma_addr;
  440. unsigned int offset = 0;
  441. skb_frag_t *frag;
  442. /* Preload TCP csum field with IP pseudo hdr calculated
  443. * with IP length set to zero. HW will later add in length
  444. * to each TCP segment resulting from the TSO.
  445. */
  446. if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
  447. ip_hdr(skb)->check = 0;
  448. tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
  449. ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  450. } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
  451. tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  452. &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  453. }
  454. /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
  455. * for the main skb fragment
  456. */
  457. while (frag_len_left) {
  458. len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
  459. dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
  460. PCI_DMA_TODEVICE);
  461. if (unlikely(enic_dma_map_check(enic, dma_addr)))
  462. return -ENOMEM;
  463. enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
  464. vlan_tag_insert, vlan_tag,
  465. eop && (len == frag_len_left), loopback);
  466. frag_len_left -= len;
  467. offset += len;
  468. }
  469. if (eop)
  470. return 0;
  471. /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
  472. * for additional data fragments
  473. */
  474. for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
  475. len_left -= skb_frag_size(frag);
  476. frag_len_left = skb_frag_size(frag);
  477. offset = 0;
  478. while (frag_len_left) {
  479. len = min(frag_len_left,
  480. (unsigned int)WQ_ENET_MAX_DESC_LEN);
  481. dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
  482. offset, len,
  483. DMA_TO_DEVICE);
  484. if (unlikely(enic_dma_map_check(enic, dma_addr)))
  485. return -ENOMEM;
  486. enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
  487. (len_left == 0) &&
  488. (len == frag_len_left),/*EOP*/
  489. loopback);
  490. frag_len_left -= len;
  491. offset += len;
  492. }
  493. }
  494. return 0;
  495. }
  496. static inline void enic_queue_wq_skb(struct enic *enic,
  497. struct vnic_wq *wq, struct sk_buff *skb)
  498. {
  499. unsigned int mss = skb_shinfo(skb)->gso_size;
  500. unsigned int vlan_tag = 0;
  501. int vlan_tag_insert = 0;
  502. int loopback = 0;
  503. int err;
  504. if (skb_vlan_tag_present(skb)) {
  505. /* VLAN tag from trunking driver */
  506. vlan_tag_insert = 1;
  507. vlan_tag = skb_vlan_tag_get(skb);
  508. } else if (enic->loop_enable) {
  509. vlan_tag = enic->loop_tag;
  510. loopback = 1;
  511. }
  512. if (mss)
  513. err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
  514. vlan_tag_insert, vlan_tag,
  515. loopback);
  516. else if (skb->ip_summed == CHECKSUM_PARTIAL)
  517. err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
  518. vlan_tag, loopback);
  519. else
  520. err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
  521. vlan_tag, loopback);
  522. if (unlikely(err)) {
  523. struct vnic_wq_buf *buf;
  524. buf = wq->to_use->prev;
  525. /* while not EOP of previous pkt && queue not empty.
  526. * For all non EOP bufs, os_buf is NULL.
  527. */
  528. while (!buf->os_buf && (buf->next != wq->to_clean)) {
  529. enic_free_wq_buf(wq, buf);
  530. wq->ring.desc_avail++;
  531. buf = buf->prev;
  532. }
  533. wq->to_use = buf->next;
  534. dev_kfree_skb(skb);
  535. }
  536. }
  537. /* netif_tx_lock held, process context with BHs disabled, or BH */
  538. static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
  539. struct net_device *netdev)
  540. {
  541. struct enic *enic = netdev_priv(netdev);
  542. struct vnic_wq *wq;
  543. unsigned int txq_map;
  544. struct netdev_queue *txq;
  545. if (skb->len <= 0) {
  546. dev_kfree_skb_any(skb);
  547. return NETDEV_TX_OK;
  548. }
  549. txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
  550. wq = &enic->wq[txq_map];
  551. txq = netdev_get_tx_queue(netdev, txq_map);
  552. /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
  553. * which is very likely. In the off chance it's going to take
  554. * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
  555. */
  556. if (skb_shinfo(skb)->gso_size == 0 &&
  557. skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
  558. skb_linearize(skb)) {
  559. dev_kfree_skb_any(skb);
  560. return NETDEV_TX_OK;
  561. }
  562. spin_lock(&enic->wq_lock[txq_map]);
  563. if (vnic_wq_desc_avail(wq) <
  564. skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
  565. netif_tx_stop_queue(txq);
  566. /* This is a hard error, log it */
  567. netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
  568. spin_unlock(&enic->wq_lock[txq_map]);
  569. return NETDEV_TX_BUSY;
  570. }
  571. enic_queue_wq_skb(enic, wq, skb);
  572. if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
  573. netif_tx_stop_queue(txq);
  574. if (!skb->xmit_more || netif_xmit_stopped(txq))
  575. vnic_wq_doorbell(wq);
  576. spin_unlock(&enic->wq_lock[txq_map]);
  577. return NETDEV_TX_OK;
  578. }
  579. /* dev_base_lock rwlock held, nominally process context */
  580. static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
  581. struct rtnl_link_stats64 *net_stats)
  582. {
  583. struct enic *enic = netdev_priv(netdev);
  584. struct vnic_stats *stats;
  585. int err;
  586. err = enic_dev_stats_dump(enic, &stats);
  587. /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
  588. * For other failures, like devcmd failure, we return previously
  589. * recorded stats.
  590. */
  591. if (err == -ENOMEM)
  592. return net_stats;
  593. net_stats->tx_packets = stats->tx.tx_frames_ok;
  594. net_stats->tx_bytes = stats->tx.tx_bytes_ok;
  595. net_stats->tx_errors = stats->tx.tx_errors;
  596. net_stats->tx_dropped = stats->tx.tx_drops;
  597. net_stats->rx_packets = stats->rx.rx_frames_ok;
  598. net_stats->rx_bytes = stats->rx.rx_bytes_ok;
  599. net_stats->rx_errors = stats->rx.rx_errors;
  600. net_stats->multicast = stats->rx.rx_multicast_frames_ok;
  601. net_stats->rx_over_errors = enic->rq_truncated_pkts;
  602. net_stats->rx_crc_errors = enic->rq_bad_fcs;
  603. net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
  604. return net_stats;
  605. }
  606. static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
  607. {
  608. struct enic *enic = netdev_priv(netdev);
  609. if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
  610. unsigned int mc_count = netdev_mc_count(netdev);
  611. netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
  612. ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
  613. return -ENOSPC;
  614. }
  615. enic_dev_add_addr(enic, mc_addr);
  616. enic->mc_count++;
  617. return 0;
  618. }
  619. static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
  620. {
  621. struct enic *enic = netdev_priv(netdev);
  622. enic_dev_del_addr(enic, mc_addr);
  623. enic->mc_count--;
  624. return 0;
  625. }
  626. static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
  627. {
  628. struct enic *enic = netdev_priv(netdev);
  629. if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
  630. unsigned int uc_count = netdev_uc_count(netdev);
  631. netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
  632. ENIC_UNICAST_PERFECT_FILTERS, uc_count);
  633. return -ENOSPC;
  634. }
  635. enic_dev_add_addr(enic, uc_addr);
  636. enic->uc_count++;
  637. return 0;
  638. }
  639. static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
  640. {
  641. struct enic *enic = netdev_priv(netdev);
  642. enic_dev_del_addr(enic, uc_addr);
  643. enic->uc_count--;
  644. return 0;
  645. }
  646. void enic_reset_addr_lists(struct enic *enic)
  647. {
  648. struct net_device *netdev = enic->netdev;
  649. __dev_uc_unsync(netdev, NULL);
  650. __dev_mc_unsync(netdev, NULL);
  651. enic->mc_count = 0;
  652. enic->uc_count = 0;
  653. enic->flags = 0;
  654. }
  655. static int enic_set_mac_addr(struct net_device *netdev, char *addr)
  656. {
  657. struct enic *enic = netdev_priv(netdev);
  658. if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
  659. if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
  660. return -EADDRNOTAVAIL;
  661. } else {
  662. if (!is_valid_ether_addr(addr))
  663. return -EADDRNOTAVAIL;
  664. }
  665. memcpy(netdev->dev_addr, addr, netdev->addr_len);
  666. return 0;
  667. }
  668. static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
  669. {
  670. struct enic *enic = netdev_priv(netdev);
  671. struct sockaddr *saddr = p;
  672. char *addr = saddr->sa_data;
  673. int err;
  674. if (netif_running(enic->netdev)) {
  675. err = enic_dev_del_station_addr(enic);
  676. if (err)
  677. return err;
  678. }
  679. err = enic_set_mac_addr(netdev, addr);
  680. if (err)
  681. return err;
  682. if (netif_running(enic->netdev)) {
  683. err = enic_dev_add_station_addr(enic);
  684. if (err)
  685. return err;
  686. }
  687. return err;
  688. }
  689. static int enic_set_mac_address(struct net_device *netdev, void *p)
  690. {
  691. struct sockaddr *saddr = p;
  692. char *addr = saddr->sa_data;
  693. struct enic *enic = netdev_priv(netdev);
  694. int err;
  695. err = enic_dev_del_station_addr(enic);
  696. if (err)
  697. return err;
  698. err = enic_set_mac_addr(netdev, addr);
  699. if (err)
  700. return err;
  701. return enic_dev_add_station_addr(enic);
  702. }
  703. /* netif_tx_lock held, BHs disabled */
  704. static void enic_set_rx_mode(struct net_device *netdev)
  705. {
  706. struct enic *enic = netdev_priv(netdev);
  707. int directed = 1;
  708. int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
  709. int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
  710. int promisc = (netdev->flags & IFF_PROMISC) ||
  711. netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
  712. int allmulti = (netdev->flags & IFF_ALLMULTI) ||
  713. netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
  714. unsigned int flags = netdev->flags |
  715. (allmulti ? IFF_ALLMULTI : 0) |
  716. (promisc ? IFF_PROMISC : 0);
  717. if (enic->flags != flags) {
  718. enic->flags = flags;
  719. enic_dev_packet_filter(enic, directed,
  720. multicast, broadcast, promisc, allmulti);
  721. }
  722. if (!promisc) {
  723. __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
  724. if (!allmulti)
  725. __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
  726. }
  727. }
  728. /* netif_tx_lock held, BHs disabled */
  729. static void enic_tx_timeout(struct net_device *netdev)
  730. {
  731. struct enic *enic = netdev_priv(netdev);
  732. schedule_work(&enic->tx_hang_reset);
  733. }
  734. static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
  735. {
  736. struct enic *enic = netdev_priv(netdev);
  737. struct enic_port_profile *pp;
  738. int err;
  739. ENIC_PP_BY_INDEX(enic, vf, pp, &err);
  740. if (err)
  741. return err;
  742. if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
  743. if (vf == PORT_SELF_VF) {
  744. memcpy(pp->vf_mac, mac, ETH_ALEN);
  745. return 0;
  746. } else {
  747. /*
  748. * For sriov vf's set the mac in hw
  749. */
  750. ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
  751. vnic_dev_set_mac_addr, mac);
  752. return enic_dev_status_to_errno(err);
  753. }
  754. } else
  755. return -EINVAL;
  756. }
  757. static int enic_set_vf_port(struct net_device *netdev, int vf,
  758. struct nlattr *port[])
  759. {
  760. struct enic *enic = netdev_priv(netdev);
  761. struct enic_port_profile prev_pp;
  762. struct enic_port_profile *pp;
  763. int err = 0, restore_pp = 1;
  764. ENIC_PP_BY_INDEX(enic, vf, pp, &err);
  765. if (err)
  766. return err;
  767. if (!port[IFLA_PORT_REQUEST])
  768. return -EOPNOTSUPP;
  769. memcpy(&prev_pp, pp, sizeof(*enic->pp));
  770. memset(pp, 0, sizeof(*enic->pp));
  771. pp->set |= ENIC_SET_REQUEST;
  772. pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
  773. if (port[IFLA_PORT_PROFILE]) {
  774. pp->set |= ENIC_SET_NAME;
  775. memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
  776. PORT_PROFILE_MAX);
  777. }
  778. if (port[IFLA_PORT_INSTANCE_UUID]) {
  779. pp->set |= ENIC_SET_INSTANCE;
  780. memcpy(pp->instance_uuid,
  781. nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
  782. }
  783. if (port[IFLA_PORT_HOST_UUID]) {
  784. pp->set |= ENIC_SET_HOST;
  785. memcpy(pp->host_uuid,
  786. nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
  787. }
  788. if (vf == PORT_SELF_VF) {
  789. /* Special case handling: mac came from IFLA_VF_MAC */
  790. if (!is_zero_ether_addr(prev_pp.vf_mac))
  791. memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
  792. if (is_zero_ether_addr(netdev->dev_addr))
  793. eth_hw_addr_random(netdev);
  794. } else {
  795. /* SR-IOV VF: get mac from adapter */
  796. ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
  797. vnic_dev_get_mac_addr, pp->mac_addr);
  798. if (err) {
  799. netdev_err(netdev, "Error getting mac for vf %d\n", vf);
  800. memcpy(pp, &prev_pp, sizeof(*pp));
  801. return enic_dev_status_to_errno(err);
  802. }
  803. }
  804. err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
  805. if (err) {
  806. if (restore_pp) {
  807. /* Things are still the way they were: Implicit
  808. * DISASSOCIATE failed
  809. */
  810. memcpy(pp, &prev_pp, sizeof(*pp));
  811. } else {
  812. memset(pp, 0, sizeof(*pp));
  813. if (vf == PORT_SELF_VF)
  814. eth_zero_addr(netdev->dev_addr);
  815. }
  816. } else {
  817. /* Set flag to indicate that the port assoc/disassoc
  818. * request has been sent out to fw
  819. */
  820. pp->set |= ENIC_PORT_REQUEST_APPLIED;
  821. /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
  822. if (pp->request == PORT_REQUEST_DISASSOCIATE) {
  823. eth_zero_addr(pp->mac_addr);
  824. if (vf == PORT_SELF_VF)
  825. eth_zero_addr(netdev->dev_addr);
  826. }
  827. }
  828. if (vf == PORT_SELF_VF)
  829. eth_zero_addr(pp->vf_mac);
  830. return err;
  831. }
  832. static int enic_get_vf_port(struct net_device *netdev, int vf,
  833. struct sk_buff *skb)
  834. {
  835. struct enic *enic = netdev_priv(netdev);
  836. u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
  837. struct enic_port_profile *pp;
  838. int err;
  839. ENIC_PP_BY_INDEX(enic, vf, pp, &err);
  840. if (err)
  841. return err;
  842. if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
  843. return -ENODATA;
  844. err = enic_process_get_pp_request(enic, vf, pp->request, &response);
  845. if (err)
  846. return err;
  847. if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
  848. nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
  849. ((pp->set & ENIC_SET_NAME) &&
  850. nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
  851. ((pp->set & ENIC_SET_INSTANCE) &&
  852. nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
  853. pp->instance_uuid)) ||
  854. ((pp->set & ENIC_SET_HOST) &&
  855. nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
  856. goto nla_put_failure;
  857. return 0;
  858. nla_put_failure:
  859. return -EMSGSIZE;
  860. }
  861. static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  862. {
  863. struct enic *enic = vnic_dev_priv(rq->vdev);
  864. if (!buf->os_buf)
  865. return;
  866. pci_unmap_single(enic->pdev, buf->dma_addr,
  867. buf->len, PCI_DMA_FROMDEVICE);
  868. dev_kfree_skb_any(buf->os_buf);
  869. buf->os_buf = NULL;
  870. }
  871. static int enic_rq_alloc_buf(struct vnic_rq *rq)
  872. {
  873. struct enic *enic = vnic_dev_priv(rq->vdev);
  874. struct net_device *netdev = enic->netdev;
  875. struct sk_buff *skb;
  876. unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
  877. unsigned int os_buf_index = 0;
  878. dma_addr_t dma_addr;
  879. struct vnic_rq_buf *buf = rq->to_use;
  880. if (buf->os_buf) {
  881. enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
  882. buf->len);
  883. return 0;
  884. }
  885. skb = netdev_alloc_skb_ip_align(netdev, len);
  886. if (!skb)
  887. return -ENOMEM;
  888. dma_addr = pci_map_single(enic->pdev, skb->data, len,
  889. PCI_DMA_FROMDEVICE);
  890. if (unlikely(enic_dma_map_check(enic, dma_addr))) {
  891. dev_kfree_skb(skb);
  892. return -ENOMEM;
  893. }
  894. enic_queue_rq_desc(rq, skb, os_buf_index,
  895. dma_addr, len);
  896. return 0;
  897. }
  898. static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
  899. u32 pkt_len)
  900. {
  901. if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
  902. pkt_size->large_pkt_bytes_cnt += pkt_len;
  903. else
  904. pkt_size->small_pkt_bytes_cnt += pkt_len;
  905. }
  906. static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
  907. struct vnic_rq_buf *buf, u16 len)
  908. {
  909. struct enic *enic = netdev_priv(netdev);
  910. struct sk_buff *new_skb;
  911. if (len > enic->rx_copybreak)
  912. return false;
  913. new_skb = netdev_alloc_skb_ip_align(netdev, len);
  914. if (!new_skb)
  915. return false;
  916. pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
  917. DMA_FROM_DEVICE);
  918. memcpy(new_skb->data, (*skb)->data, len);
  919. *skb = new_skb;
  920. return true;
  921. }
  922. static void enic_rq_indicate_buf(struct vnic_rq *rq,
  923. struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
  924. int skipped, void *opaque)
  925. {
  926. struct enic *enic = vnic_dev_priv(rq->vdev);
  927. struct net_device *netdev = enic->netdev;
  928. struct sk_buff *skb;
  929. struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
  930. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  931. u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
  932. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  933. u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
  934. u8 packet_error;
  935. u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
  936. u32 rss_hash;
  937. if (skipped)
  938. return;
  939. skb = buf->os_buf;
  940. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  941. &type, &color, &q_number, &completed_index,
  942. &ingress_port, &fcoe, &eop, &sop, &rss_type,
  943. &csum_not_calc, &rss_hash, &bytes_written,
  944. &packet_error, &vlan_stripped, &vlan_tci, &checksum,
  945. &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
  946. &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
  947. &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
  948. &fcs_ok);
  949. if (packet_error) {
  950. if (!fcs_ok) {
  951. if (bytes_written > 0)
  952. enic->rq_bad_fcs++;
  953. else if (bytes_written == 0)
  954. enic->rq_truncated_pkts++;
  955. }
  956. pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
  957. PCI_DMA_FROMDEVICE);
  958. dev_kfree_skb_any(skb);
  959. buf->os_buf = NULL;
  960. return;
  961. }
  962. if (eop && bytes_written > 0) {
  963. /* Good receive
  964. */
  965. if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
  966. buf->os_buf = NULL;
  967. pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
  968. PCI_DMA_FROMDEVICE);
  969. }
  970. prefetch(skb->data - NET_IP_ALIGN);
  971. skb_put(skb, bytes_written);
  972. skb->protocol = eth_type_trans(skb, netdev);
  973. skb_record_rx_queue(skb, q_number);
  974. if (netdev->features & NETIF_F_RXHASH) {
  975. skb_set_hash(skb, rss_hash,
  976. (rss_type &
  977. (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
  978. NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
  979. NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
  980. PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
  981. }
  982. /* Hardware does not provide whole packet checksum. It only
  983. * provides pseudo checksum. Since hw validates the packet
  984. * checksum but not provide us the checksum value. use
  985. * CHECSUM_UNNECESSARY.
  986. */
  987. if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
  988. ipv4_csum_ok)
  989. skb->ip_summed = CHECKSUM_UNNECESSARY;
  990. if (vlan_stripped)
  991. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
  992. skb_mark_napi_id(skb, &enic->napi[rq->index]);
  993. if (enic_poll_busy_polling(rq) ||
  994. !(netdev->features & NETIF_F_GRO))
  995. netif_receive_skb(skb);
  996. else
  997. napi_gro_receive(&enic->napi[q_number], skb);
  998. if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
  999. enic_intr_update_pkt_size(&cq->pkt_size_counter,
  1000. bytes_written);
  1001. } else {
  1002. /* Buffer overflow
  1003. */
  1004. pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
  1005. PCI_DMA_FROMDEVICE);
  1006. dev_kfree_skb_any(skb);
  1007. buf->os_buf = NULL;
  1008. }
  1009. }
  1010. static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
  1011. u8 type, u16 q_number, u16 completed_index, void *opaque)
  1012. {
  1013. struct enic *enic = vnic_dev_priv(vdev);
  1014. vnic_rq_service(&enic->rq[q_number], cq_desc,
  1015. completed_index, VNIC_RQ_RETURN_DESC,
  1016. enic_rq_indicate_buf, opaque);
  1017. return 0;
  1018. }
  1019. static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
  1020. {
  1021. unsigned int intr = enic_msix_rq_intr(enic, rq->index);
  1022. struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
  1023. u32 timer = cq->tobe_rx_coal_timeval;
  1024. if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
  1025. vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
  1026. cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
  1027. }
  1028. }
  1029. static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
  1030. {
  1031. struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
  1032. struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
  1033. struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
  1034. int index;
  1035. u32 timer;
  1036. u32 range_start;
  1037. u32 traffic;
  1038. u64 delta;
  1039. ktime_t now = ktime_get();
  1040. delta = ktime_us_delta(now, cq->prev_ts);
  1041. if (delta < ENIC_AIC_TS_BREAK)
  1042. return;
  1043. cq->prev_ts = now;
  1044. traffic = pkt_size_counter->large_pkt_bytes_cnt +
  1045. pkt_size_counter->small_pkt_bytes_cnt;
  1046. /* The table takes Mbps
  1047. * traffic *= 8 => bits
  1048. * traffic *= (10^6 / delta) => bps
  1049. * traffic /= 10^6 => Mbps
  1050. *
  1051. * Combining, traffic *= (8 / delta)
  1052. */
  1053. traffic <<= 3;
  1054. traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
  1055. for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
  1056. if (traffic < mod_table[index].rx_rate)
  1057. break;
  1058. range_start = (pkt_size_counter->small_pkt_bytes_cnt >
  1059. pkt_size_counter->large_pkt_bytes_cnt << 1) ?
  1060. rx_coal->small_pkt_range_start :
  1061. rx_coal->large_pkt_range_start;
  1062. timer = range_start + ((rx_coal->range_end - range_start) *
  1063. mod_table[index].range_percent / 100);
  1064. /* Damping */
  1065. cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
  1066. pkt_size_counter->large_pkt_bytes_cnt = 0;
  1067. pkt_size_counter->small_pkt_bytes_cnt = 0;
  1068. }
  1069. static int enic_poll(struct napi_struct *napi, int budget)
  1070. {
  1071. struct net_device *netdev = napi->dev;
  1072. struct enic *enic = netdev_priv(netdev);
  1073. unsigned int cq_rq = enic_cq_rq(enic, 0);
  1074. unsigned int cq_wq = enic_cq_wq(enic, 0);
  1075. unsigned int intr = enic_legacy_io_intr();
  1076. unsigned int rq_work_to_do = budget;
  1077. unsigned int wq_work_to_do = -1; /* no limit */
  1078. unsigned int work_done, rq_work_done = 0, wq_work_done;
  1079. int err;
  1080. wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
  1081. enic_wq_service, NULL);
  1082. if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
  1083. if (wq_work_done > 0)
  1084. vnic_intr_return_credits(&enic->intr[intr],
  1085. wq_work_done,
  1086. 0 /* dont unmask intr */,
  1087. 0 /* dont reset intr timer */);
  1088. return budget;
  1089. }
  1090. if (budget > 0)
  1091. rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
  1092. rq_work_to_do, enic_rq_service, NULL);
  1093. /* Accumulate intr event credits for this polling
  1094. * cycle. An intr event is the completion of a
  1095. * a WQ or RQ packet.
  1096. */
  1097. work_done = rq_work_done + wq_work_done;
  1098. if (work_done > 0)
  1099. vnic_intr_return_credits(&enic->intr[intr],
  1100. work_done,
  1101. 0 /* don't unmask intr */,
  1102. 0 /* don't reset intr timer */);
  1103. err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
  1104. enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
  1105. /* Buffer allocation failed. Stay in polling
  1106. * mode so we can try to fill the ring again.
  1107. */
  1108. if (err)
  1109. rq_work_done = rq_work_to_do;
  1110. if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
  1111. /* Call the function which refreshes the intr coalescing timer
  1112. * value based on the traffic.
  1113. */
  1114. enic_calc_int_moderation(enic, &enic->rq[0]);
  1115. if (rq_work_done < rq_work_to_do) {
  1116. /* Some work done, but not enough to stay in polling,
  1117. * exit polling
  1118. */
  1119. napi_complete(napi);
  1120. if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
  1121. enic_set_int_moderation(enic, &enic->rq[0]);
  1122. vnic_intr_unmask(&enic->intr[intr]);
  1123. }
  1124. return rq_work_done;
  1125. }
  1126. #ifdef CONFIG_RFS_ACCEL
  1127. static void enic_free_rx_cpu_rmap(struct enic *enic)
  1128. {
  1129. free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
  1130. enic->netdev->rx_cpu_rmap = NULL;
  1131. }
  1132. static void enic_set_rx_cpu_rmap(struct enic *enic)
  1133. {
  1134. int i, res;
  1135. if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
  1136. enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
  1137. if (unlikely(!enic->netdev->rx_cpu_rmap))
  1138. return;
  1139. for (i = 0; i < enic->rq_count; i++) {
  1140. res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
  1141. enic->msix_entry[i].vector);
  1142. if (unlikely(res)) {
  1143. enic_free_rx_cpu_rmap(enic);
  1144. return;
  1145. }
  1146. }
  1147. }
  1148. }
  1149. #else
  1150. static void enic_free_rx_cpu_rmap(struct enic *enic)
  1151. {
  1152. }
  1153. static void enic_set_rx_cpu_rmap(struct enic *enic)
  1154. {
  1155. }
  1156. #endif /* CONFIG_RFS_ACCEL */
  1157. #ifdef CONFIG_NET_RX_BUSY_POLL
  1158. static int enic_busy_poll(struct napi_struct *napi)
  1159. {
  1160. struct net_device *netdev = napi->dev;
  1161. struct enic *enic = netdev_priv(netdev);
  1162. unsigned int rq = (napi - &enic->napi[0]);
  1163. unsigned int cq = enic_cq_rq(enic, rq);
  1164. unsigned int intr = enic_msix_rq_intr(enic, rq);
  1165. unsigned int work_to_do = -1; /* clean all pkts possible */
  1166. unsigned int work_done;
  1167. if (!enic_poll_lock_poll(&enic->rq[rq]))
  1168. return LL_FLUSH_BUSY;
  1169. work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
  1170. enic_rq_service, NULL);
  1171. if (work_done > 0)
  1172. vnic_intr_return_credits(&enic->intr[intr],
  1173. work_done, 0, 0);
  1174. vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
  1175. if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
  1176. enic_calc_int_moderation(enic, &enic->rq[rq]);
  1177. enic_poll_unlock_poll(&enic->rq[rq]);
  1178. return work_done;
  1179. }
  1180. #endif /* CONFIG_NET_RX_BUSY_POLL */
  1181. static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
  1182. {
  1183. struct net_device *netdev = napi->dev;
  1184. struct enic *enic = netdev_priv(netdev);
  1185. unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
  1186. struct vnic_wq *wq = &enic->wq[wq_index];
  1187. unsigned int cq;
  1188. unsigned int intr;
  1189. unsigned int wq_work_to_do = -1; /* clean all desc possible */
  1190. unsigned int wq_work_done;
  1191. unsigned int wq_irq;
  1192. wq_irq = wq->index;
  1193. cq = enic_cq_wq(enic, wq_irq);
  1194. intr = enic_msix_wq_intr(enic, wq_irq);
  1195. wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
  1196. enic_wq_service, NULL);
  1197. vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
  1198. 0 /* don't unmask intr */,
  1199. 1 /* reset intr timer */);
  1200. if (!wq_work_done) {
  1201. napi_complete(napi);
  1202. vnic_intr_unmask(&enic->intr[intr]);
  1203. return 0;
  1204. }
  1205. return budget;
  1206. }
  1207. static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
  1208. {
  1209. struct net_device *netdev = napi->dev;
  1210. struct enic *enic = netdev_priv(netdev);
  1211. unsigned int rq = (napi - &enic->napi[0]);
  1212. unsigned int cq = enic_cq_rq(enic, rq);
  1213. unsigned int intr = enic_msix_rq_intr(enic, rq);
  1214. unsigned int work_to_do = budget;
  1215. unsigned int work_done = 0;
  1216. int err;
  1217. if (!enic_poll_lock_napi(&enic->rq[rq]))
  1218. return budget;
  1219. /* Service RQ
  1220. */
  1221. if (budget > 0)
  1222. work_done = vnic_cq_service(&enic->cq[cq],
  1223. work_to_do, enic_rq_service, NULL);
  1224. /* Return intr event credits for this polling
  1225. * cycle. An intr event is the completion of a
  1226. * RQ packet.
  1227. */
  1228. if (work_done > 0)
  1229. vnic_intr_return_credits(&enic->intr[intr],
  1230. work_done,
  1231. 0 /* don't unmask intr */,
  1232. 0 /* don't reset intr timer */);
  1233. err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
  1234. /* Buffer allocation failed. Stay in polling mode
  1235. * so we can try to fill the ring again.
  1236. */
  1237. if (err)
  1238. work_done = work_to_do;
  1239. if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
  1240. /* Call the function which refreshes the intr coalescing timer
  1241. * value based on the traffic.
  1242. */
  1243. enic_calc_int_moderation(enic, &enic->rq[rq]);
  1244. enic_poll_unlock_napi(&enic->rq[rq], napi);
  1245. if (work_done < work_to_do) {
  1246. /* Some work done, but not enough to stay in polling,
  1247. * exit polling
  1248. */
  1249. napi_complete(napi);
  1250. if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
  1251. enic_set_int_moderation(enic, &enic->rq[rq]);
  1252. vnic_intr_unmask(&enic->intr[intr]);
  1253. }
  1254. return work_done;
  1255. }
  1256. static void enic_notify_timer(unsigned long data)
  1257. {
  1258. struct enic *enic = (struct enic *)data;
  1259. enic_notify_check(enic);
  1260. mod_timer(&enic->notify_timer,
  1261. round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
  1262. }
  1263. static void enic_free_intr(struct enic *enic)
  1264. {
  1265. struct net_device *netdev = enic->netdev;
  1266. unsigned int i;
  1267. enic_free_rx_cpu_rmap(enic);
  1268. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1269. case VNIC_DEV_INTR_MODE_INTX:
  1270. free_irq(enic->pdev->irq, netdev);
  1271. break;
  1272. case VNIC_DEV_INTR_MODE_MSI:
  1273. free_irq(enic->pdev->irq, enic);
  1274. break;
  1275. case VNIC_DEV_INTR_MODE_MSIX:
  1276. for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
  1277. if (enic->msix[i].requested)
  1278. free_irq(enic->msix_entry[i].vector,
  1279. enic->msix[i].devid);
  1280. break;
  1281. default:
  1282. break;
  1283. }
  1284. }
  1285. static int enic_request_intr(struct enic *enic)
  1286. {
  1287. struct net_device *netdev = enic->netdev;
  1288. unsigned int i, intr;
  1289. int err = 0;
  1290. enic_set_rx_cpu_rmap(enic);
  1291. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1292. case VNIC_DEV_INTR_MODE_INTX:
  1293. err = request_irq(enic->pdev->irq, enic_isr_legacy,
  1294. IRQF_SHARED, netdev->name, netdev);
  1295. break;
  1296. case VNIC_DEV_INTR_MODE_MSI:
  1297. err = request_irq(enic->pdev->irq, enic_isr_msi,
  1298. 0, netdev->name, enic);
  1299. break;
  1300. case VNIC_DEV_INTR_MODE_MSIX:
  1301. for (i = 0; i < enic->rq_count; i++) {
  1302. intr = enic_msix_rq_intr(enic, i);
  1303. snprintf(enic->msix[intr].devname,
  1304. sizeof(enic->msix[intr].devname),
  1305. "%.11s-rx-%u", netdev->name, i);
  1306. enic->msix[intr].isr = enic_isr_msix;
  1307. enic->msix[intr].devid = &enic->napi[i];
  1308. }
  1309. for (i = 0; i < enic->wq_count; i++) {
  1310. int wq = enic_cq_wq(enic, i);
  1311. intr = enic_msix_wq_intr(enic, i);
  1312. snprintf(enic->msix[intr].devname,
  1313. sizeof(enic->msix[intr].devname),
  1314. "%.11s-tx-%u", netdev->name, i);
  1315. enic->msix[intr].isr = enic_isr_msix;
  1316. enic->msix[intr].devid = &enic->napi[wq];
  1317. }
  1318. intr = enic_msix_err_intr(enic);
  1319. snprintf(enic->msix[intr].devname,
  1320. sizeof(enic->msix[intr].devname),
  1321. "%.11s-err", netdev->name);
  1322. enic->msix[intr].isr = enic_isr_msix_err;
  1323. enic->msix[intr].devid = enic;
  1324. intr = enic_msix_notify_intr(enic);
  1325. snprintf(enic->msix[intr].devname,
  1326. sizeof(enic->msix[intr].devname),
  1327. "%.11s-notify", netdev->name);
  1328. enic->msix[intr].isr = enic_isr_msix_notify;
  1329. enic->msix[intr].devid = enic;
  1330. for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
  1331. enic->msix[i].requested = 0;
  1332. for (i = 0; i < enic->intr_count; i++) {
  1333. err = request_irq(enic->msix_entry[i].vector,
  1334. enic->msix[i].isr, 0,
  1335. enic->msix[i].devname,
  1336. enic->msix[i].devid);
  1337. if (err) {
  1338. enic_free_intr(enic);
  1339. break;
  1340. }
  1341. enic->msix[i].requested = 1;
  1342. }
  1343. break;
  1344. default:
  1345. break;
  1346. }
  1347. return err;
  1348. }
  1349. static void enic_synchronize_irqs(struct enic *enic)
  1350. {
  1351. unsigned int i;
  1352. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1353. case VNIC_DEV_INTR_MODE_INTX:
  1354. case VNIC_DEV_INTR_MODE_MSI:
  1355. synchronize_irq(enic->pdev->irq);
  1356. break;
  1357. case VNIC_DEV_INTR_MODE_MSIX:
  1358. for (i = 0; i < enic->intr_count; i++)
  1359. synchronize_irq(enic->msix_entry[i].vector);
  1360. break;
  1361. default:
  1362. break;
  1363. }
  1364. }
  1365. static void enic_set_rx_coal_setting(struct enic *enic)
  1366. {
  1367. unsigned int speed;
  1368. int index = -1;
  1369. struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
  1370. /* 1. Read the link speed from fw
  1371. * 2. Pick the default range for the speed
  1372. * 3. Update it in enic->rx_coalesce_setting
  1373. */
  1374. speed = vnic_dev_port_speed(enic->vdev);
  1375. if (ENIC_LINK_SPEED_10G < speed)
  1376. index = ENIC_LINK_40G_INDEX;
  1377. else if (ENIC_LINK_SPEED_4G < speed)
  1378. index = ENIC_LINK_10G_INDEX;
  1379. else
  1380. index = ENIC_LINK_4G_INDEX;
  1381. rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
  1382. rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
  1383. rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
  1384. /* Start with the value provided by UCSM */
  1385. for (index = 0; index < enic->rq_count; index++)
  1386. enic->cq[index].cur_rx_coal_timeval =
  1387. enic->config.intr_timer_usec;
  1388. rx_coal->use_adaptive_rx_coalesce = 1;
  1389. }
  1390. static int enic_dev_notify_set(struct enic *enic)
  1391. {
  1392. int err;
  1393. spin_lock_bh(&enic->devcmd_lock);
  1394. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1395. case VNIC_DEV_INTR_MODE_INTX:
  1396. err = vnic_dev_notify_set(enic->vdev,
  1397. enic_legacy_notify_intr());
  1398. break;
  1399. case VNIC_DEV_INTR_MODE_MSIX:
  1400. err = vnic_dev_notify_set(enic->vdev,
  1401. enic_msix_notify_intr(enic));
  1402. break;
  1403. default:
  1404. err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
  1405. break;
  1406. }
  1407. spin_unlock_bh(&enic->devcmd_lock);
  1408. return err;
  1409. }
  1410. static void enic_notify_timer_start(struct enic *enic)
  1411. {
  1412. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1413. case VNIC_DEV_INTR_MODE_MSI:
  1414. mod_timer(&enic->notify_timer, jiffies);
  1415. break;
  1416. default:
  1417. /* Using intr for notification for INTx/MSI-X */
  1418. break;
  1419. }
  1420. }
  1421. /* rtnl lock is held, process context */
  1422. static int enic_open(struct net_device *netdev)
  1423. {
  1424. struct enic *enic = netdev_priv(netdev);
  1425. unsigned int i;
  1426. int err;
  1427. err = enic_request_intr(enic);
  1428. if (err) {
  1429. netdev_err(netdev, "Unable to request irq.\n");
  1430. return err;
  1431. }
  1432. enic_init_affinity_hint(enic);
  1433. enic_set_affinity_hint(enic);
  1434. err = enic_dev_notify_set(enic);
  1435. if (err) {
  1436. netdev_err(netdev,
  1437. "Failed to alloc notify buffer, aborting.\n");
  1438. goto err_out_free_intr;
  1439. }
  1440. for (i = 0; i < enic->rq_count; i++) {
  1441. /* enable rq before updating rq desc */
  1442. vnic_rq_enable(&enic->rq[i]);
  1443. vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
  1444. /* Need at least one buffer on ring to get going */
  1445. if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
  1446. netdev_err(netdev, "Unable to alloc receive buffers\n");
  1447. err = -ENOMEM;
  1448. goto err_out_free_rq;
  1449. }
  1450. }
  1451. for (i = 0; i < enic->wq_count; i++)
  1452. vnic_wq_enable(&enic->wq[i]);
  1453. if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
  1454. enic_dev_add_station_addr(enic);
  1455. enic_set_rx_mode(netdev);
  1456. netif_tx_wake_all_queues(netdev);
  1457. for (i = 0; i < enic->rq_count; i++) {
  1458. enic_busy_poll_init_lock(&enic->rq[i]);
  1459. napi_enable(&enic->napi[i]);
  1460. }
  1461. if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
  1462. for (i = 0; i < enic->wq_count; i++)
  1463. napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
  1464. enic_dev_enable(enic);
  1465. for (i = 0; i < enic->intr_count; i++)
  1466. vnic_intr_unmask(&enic->intr[i]);
  1467. enic_notify_timer_start(enic);
  1468. enic_rfs_flw_tbl_init(enic);
  1469. return 0;
  1470. err_out_free_rq:
  1471. for (i = 0; i < enic->rq_count; i++) {
  1472. err = vnic_rq_disable(&enic->rq[i]);
  1473. if (err)
  1474. return err;
  1475. vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
  1476. }
  1477. enic_dev_notify_unset(enic);
  1478. err_out_free_intr:
  1479. enic_unset_affinity_hint(enic);
  1480. enic_free_intr(enic);
  1481. return err;
  1482. }
  1483. /* rtnl lock is held, process context */
  1484. static int enic_stop(struct net_device *netdev)
  1485. {
  1486. struct enic *enic = netdev_priv(netdev);
  1487. unsigned int i;
  1488. int err;
  1489. for (i = 0; i < enic->intr_count; i++) {
  1490. vnic_intr_mask(&enic->intr[i]);
  1491. (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
  1492. }
  1493. enic_synchronize_irqs(enic);
  1494. del_timer_sync(&enic->notify_timer);
  1495. enic_rfs_flw_tbl_free(enic);
  1496. enic_dev_disable(enic);
  1497. for (i = 0; i < enic->rq_count; i++) {
  1498. napi_disable(&enic->napi[i]);
  1499. local_bh_disable();
  1500. while (!enic_poll_lock_napi(&enic->rq[i]))
  1501. mdelay(1);
  1502. local_bh_enable();
  1503. }
  1504. netif_carrier_off(netdev);
  1505. netif_tx_disable(netdev);
  1506. if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
  1507. for (i = 0; i < enic->wq_count; i++)
  1508. napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
  1509. if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
  1510. enic_dev_del_station_addr(enic);
  1511. for (i = 0; i < enic->wq_count; i++) {
  1512. err = vnic_wq_disable(&enic->wq[i]);
  1513. if (err)
  1514. return err;
  1515. }
  1516. for (i = 0; i < enic->rq_count; i++) {
  1517. err = vnic_rq_disable(&enic->rq[i]);
  1518. if (err)
  1519. return err;
  1520. }
  1521. enic_dev_notify_unset(enic);
  1522. enic_unset_affinity_hint(enic);
  1523. enic_free_intr(enic);
  1524. for (i = 0; i < enic->wq_count; i++)
  1525. vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
  1526. for (i = 0; i < enic->rq_count; i++)
  1527. vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
  1528. for (i = 0; i < enic->cq_count; i++)
  1529. vnic_cq_clean(&enic->cq[i]);
  1530. for (i = 0; i < enic->intr_count; i++)
  1531. vnic_intr_clean(&enic->intr[i]);
  1532. return 0;
  1533. }
  1534. static int enic_change_mtu(struct net_device *netdev, int new_mtu)
  1535. {
  1536. struct enic *enic = netdev_priv(netdev);
  1537. int running = netif_running(netdev);
  1538. if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
  1539. return -EINVAL;
  1540. if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
  1541. return -EOPNOTSUPP;
  1542. if (running)
  1543. enic_stop(netdev);
  1544. netdev->mtu = new_mtu;
  1545. if (netdev->mtu > enic->port_mtu)
  1546. netdev_warn(netdev,
  1547. "interface MTU (%d) set higher than port MTU (%d)\n",
  1548. netdev->mtu, enic->port_mtu);
  1549. if (running)
  1550. enic_open(netdev);
  1551. return 0;
  1552. }
  1553. static void enic_change_mtu_work(struct work_struct *work)
  1554. {
  1555. struct enic *enic = container_of(work, struct enic, change_mtu_work);
  1556. struct net_device *netdev = enic->netdev;
  1557. int new_mtu = vnic_dev_mtu(enic->vdev);
  1558. int err;
  1559. unsigned int i;
  1560. new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
  1561. rtnl_lock();
  1562. /* Stop RQ */
  1563. del_timer_sync(&enic->notify_timer);
  1564. for (i = 0; i < enic->rq_count; i++)
  1565. napi_disable(&enic->napi[i]);
  1566. vnic_intr_mask(&enic->intr[0]);
  1567. enic_synchronize_irqs(enic);
  1568. err = vnic_rq_disable(&enic->rq[0]);
  1569. if (err) {
  1570. rtnl_unlock();
  1571. netdev_err(netdev, "Unable to disable RQ.\n");
  1572. return;
  1573. }
  1574. vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
  1575. vnic_cq_clean(&enic->cq[0]);
  1576. vnic_intr_clean(&enic->intr[0]);
  1577. /* Fill RQ with new_mtu-sized buffers */
  1578. netdev->mtu = new_mtu;
  1579. vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
  1580. /* Need at least one buffer on ring to get going */
  1581. if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
  1582. rtnl_unlock();
  1583. netdev_err(netdev, "Unable to alloc receive buffers.\n");
  1584. return;
  1585. }
  1586. /* Start RQ */
  1587. vnic_rq_enable(&enic->rq[0]);
  1588. napi_enable(&enic->napi[0]);
  1589. vnic_intr_unmask(&enic->intr[0]);
  1590. enic_notify_timer_start(enic);
  1591. rtnl_unlock();
  1592. netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
  1593. }
  1594. #ifdef CONFIG_NET_POLL_CONTROLLER
  1595. static void enic_poll_controller(struct net_device *netdev)
  1596. {
  1597. struct enic *enic = netdev_priv(netdev);
  1598. struct vnic_dev *vdev = enic->vdev;
  1599. unsigned int i, intr;
  1600. switch (vnic_dev_get_intr_mode(vdev)) {
  1601. case VNIC_DEV_INTR_MODE_MSIX:
  1602. for (i = 0; i < enic->rq_count; i++) {
  1603. intr = enic_msix_rq_intr(enic, i);
  1604. enic_isr_msix(enic->msix_entry[intr].vector,
  1605. &enic->napi[i]);
  1606. }
  1607. for (i = 0; i < enic->wq_count; i++) {
  1608. intr = enic_msix_wq_intr(enic, i);
  1609. enic_isr_msix(enic->msix_entry[intr].vector,
  1610. &enic->napi[enic_cq_wq(enic, i)]);
  1611. }
  1612. break;
  1613. case VNIC_DEV_INTR_MODE_MSI:
  1614. enic_isr_msi(enic->pdev->irq, enic);
  1615. break;
  1616. case VNIC_DEV_INTR_MODE_INTX:
  1617. enic_isr_legacy(enic->pdev->irq, netdev);
  1618. break;
  1619. default:
  1620. break;
  1621. }
  1622. }
  1623. #endif
  1624. static int enic_dev_wait(struct vnic_dev *vdev,
  1625. int (*start)(struct vnic_dev *, int),
  1626. int (*finished)(struct vnic_dev *, int *),
  1627. int arg)
  1628. {
  1629. unsigned long time;
  1630. int done;
  1631. int err;
  1632. BUG_ON(in_interrupt());
  1633. err = start(vdev, arg);
  1634. if (err)
  1635. return err;
  1636. /* Wait for func to complete...2 seconds max
  1637. */
  1638. time = jiffies + (HZ * 2);
  1639. do {
  1640. err = finished(vdev, &done);
  1641. if (err)
  1642. return err;
  1643. if (done)
  1644. return 0;
  1645. schedule_timeout_uninterruptible(HZ / 10);
  1646. } while (time_after(time, jiffies));
  1647. return -ETIMEDOUT;
  1648. }
  1649. static int enic_dev_open(struct enic *enic)
  1650. {
  1651. int err;
  1652. err = enic_dev_wait(enic->vdev, vnic_dev_open,
  1653. vnic_dev_open_done, 0);
  1654. if (err)
  1655. dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
  1656. err);
  1657. return err;
  1658. }
  1659. static int enic_dev_soft_reset(struct enic *enic)
  1660. {
  1661. int err;
  1662. err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
  1663. vnic_dev_soft_reset_done, 0);
  1664. if (err)
  1665. netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
  1666. err);
  1667. return err;
  1668. }
  1669. static int enic_dev_hang_reset(struct enic *enic)
  1670. {
  1671. int err;
  1672. err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
  1673. vnic_dev_hang_reset_done, 0);
  1674. if (err)
  1675. netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
  1676. err);
  1677. return err;
  1678. }
  1679. int __enic_set_rsskey(struct enic *enic)
  1680. {
  1681. union vnic_rss_key *rss_key_buf_va;
  1682. dma_addr_t rss_key_buf_pa;
  1683. int i, kidx, bidx, err;
  1684. rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
  1685. sizeof(union vnic_rss_key),
  1686. &rss_key_buf_pa);
  1687. if (!rss_key_buf_va)
  1688. return -ENOMEM;
  1689. for (i = 0; i < ENIC_RSS_LEN; i++) {
  1690. kidx = i / ENIC_RSS_BYTES_PER_KEY;
  1691. bidx = i % ENIC_RSS_BYTES_PER_KEY;
  1692. rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
  1693. }
  1694. spin_lock_bh(&enic->devcmd_lock);
  1695. err = enic_set_rss_key(enic,
  1696. rss_key_buf_pa,
  1697. sizeof(union vnic_rss_key));
  1698. spin_unlock_bh(&enic->devcmd_lock);
  1699. pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
  1700. rss_key_buf_va, rss_key_buf_pa);
  1701. return err;
  1702. }
  1703. static int enic_set_rsskey(struct enic *enic)
  1704. {
  1705. netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
  1706. return __enic_set_rsskey(enic);
  1707. }
  1708. static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
  1709. {
  1710. dma_addr_t rss_cpu_buf_pa;
  1711. union vnic_rss_cpu *rss_cpu_buf_va = NULL;
  1712. unsigned int i;
  1713. int err;
  1714. rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
  1715. sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
  1716. if (!rss_cpu_buf_va)
  1717. return -ENOMEM;
  1718. for (i = 0; i < (1 << rss_hash_bits); i++)
  1719. (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
  1720. spin_lock_bh(&enic->devcmd_lock);
  1721. err = enic_set_rss_cpu(enic,
  1722. rss_cpu_buf_pa,
  1723. sizeof(union vnic_rss_cpu));
  1724. spin_unlock_bh(&enic->devcmd_lock);
  1725. pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
  1726. rss_cpu_buf_va, rss_cpu_buf_pa);
  1727. return err;
  1728. }
  1729. static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
  1730. u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
  1731. {
  1732. const u8 tso_ipid_split_en = 0;
  1733. const u8 ig_vlan_strip_en = 1;
  1734. int err;
  1735. /* Enable VLAN tag stripping.
  1736. */
  1737. spin_lock_bh(&enic->devcmd_lock);
  1738. err = enic_set_nic_cfg(enic,
  1739. rss_default_cpu, rss_hash_type,
  1740. rss_hash_bits, rss_base_cpu,
  1741. rss_enable, tso_ipid_split_en,
  1742. ig_vlan_strip_en);
  1743. spin_unlock_bh(&enic->devcmd_lock);
  1744. return err;
  1745. }
  1746. static int enic_set_rss_nic_cfg(struct enic *enic)
  1747. {
  1748. struct device *dev = enic_get_dev(enic);
  1749. const u8 rss_default_cpu = 0;
  1750. const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
  1751. NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
  1752. NIC_CFG_RSS_HASH_TYPE_IPV6 |
  1753. NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
  1754. const u8 rss_hash_bits = 7;
  1755. const u8 rss_base_cpu = 0;
  1756. u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
  1757. if (rss_enable) {
  1758. if (!enic_set_rsskey(enic)) {
  1759. if (enic_set_rsscpu(enic, rss_hash_bits)) {
  1760. rss_enable = 0;
  1761. dev_warn(dev, "RSS disabled, "
  1762. "Failed to set RSS cpu indirection table.");
  1763. }
  1764. } else {
  1765. rss_enable = 0;
  1766. dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
  1767. }
  1768. }
  1769. return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
  1770. rss_hash_bits, rss_base_cpu, rss_enable);
  1771. }
  1772. static void enic_reset(struct work_struct *work)
  1773. {
  1774. struct enic *enic = container_of(work, struct enic, reset);
  1775. if (!netif_running(enic->netdev))
  1776. return;
  1777. rtnl_lock();
  1778. spin_lock(&enic->enic_api_lock);
  1779. enic_stop(enic->netdev);
  1780. enic_dev_soft_reset(enic);
  1781. enic_reset_addr_lists(enic);
  1782. enic_init_vnic_resources(enic);
  1783. enic_set_rss_nic_cfg(enic);
  1784. enic_dev_set_ig_vlan_rewrite_mode(enic);
  1785. enic_open(enic->netdev);
  1786. spin_unlock(&enic->enic_api_lock);
  1787. call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
  1788. rtnl_unlock();
  1789. }
  1790. static void enic_tx_hang_reset(struct work_struct *work)
  1791. {
  1792. struct enic *enic = container_of(work, struct enic, tx_hang_reset);
  1793. rtnl_lock();
  1794. spin_lock(&enic->enic_api_lock);
  1795. enic_dev_hang_notify(enic);
  1796. enic_stop(enic->netdev);
  1797. enic_dev_hang_reset(enic);
  1798. enic_reset_addr_lists(enic);
  1799. enic_init_vnic_resources(enic);
  1800. enic_set_rss_nic_cfg(enic);
  1801. enic_dev_set_ig_vlan_rewrite_mode(enic);
  1802. enic_open(enic->netdev);
  1803. spin_unlock(&enic->enic_api_lock);
  1804. call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
  1805. rtnl_unlock();
  1806. }
  1807. static int enic_set_intr_mode(struct enic *enic)
  1808. {
  1809. unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
  1810. unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
  1811. unsigned int i;
  1812. /* Set interrupt mode (INTx, MSI, MSI-X) depending
  1813. * on system capabilities.
  1814. *
  1815. * Try MSI-X first
  1816. *
  1817. * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
  1818. * (the second to last INTR is used for WQ/RQ errors)
  1819. * (the last INTR is used for notifications)
  1820. */
  1821. BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
  1822. for (i = 0; i < n + m + 2; i++)
  1823. enic->msix_entry[i].entry = i;
  1824. /* Use multiple RQs if RSS is enabled
  1825. */
  1826. if (ENIC_SETTING(enic, RSS) &&
  1827. enic->config.intr_mode < 1 &&
  1828. enic->rq_count >= n &&
  1829. enic->wq_count >= m &&
  1830. enic->cq_count >= n + m &&
  1831. enic->intr_count >= n + m + 2) {
  1832. if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
  1833. n + m + 2, n + m + 2) > 0) {
  1834. enic->rq_count = n;
  1835. enic->wq_count = m;
  1836. enic->cq_count = n + m;
  1837. enic->intr_count = n + m + 2;
  1838. vnic_dev_set_intr_mode(enic->vdev,
  1839. VNIC_DEV_INTR_MODE_MSIX);
  1840. return 0;
  1841. }
  1842. }
  1843. if (enic->config.intr_mode < 1 &&
  1844. enic->rq_count >= 1 &&
  1845. enic->wq_count >= m &&
  1846. enic->cq_count >= 1 + m &&
  1847. enic->intr_count >= 1 + m + 2) {
  1848. if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
  1849. 1 + m + 2, 1 + m + 2) > 0) {
  1850. enic->rq_count = 1;
  1851. enic->wq_count = m;
  1852. enic->cq_count = 1 + m;
  1853. enic->intr_count = 1 + m + 2;
  1854. vnic_dev_set_intr_mode(enic->vdev,
  1855. VNIC_DEV_INTR_MODE_MSIX);
  1856. return 0;
  1857. }
  1858. }
  1859. /* Next try MSI
  1860. *
  1861. * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
  1862. */
  1863. if (enic->config.intr_mode < 2 &&
  1864. enic->rq_count >= 1 &&
  1865. enic->wq_count >= 1 &&
  1866. enic->cq_count >= 2 &&
  1867. enic->intr_count >= 1 &&
  1868. !pci_enable_msi(enic->pdev)) {
  1869. enic->rq_count = 1;
  1870. enic->wq_count = 1;
  1871. enic->cq_count = 2;
  1872. enic->intr_count = 1;
  1873. vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
  1874. return 0;
  1875. }
  1876. /* Next try INTx
  1877. *
  1878. * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
  1879. * (the first INTR is used for WQ/RQ)
  1880. * (the second INTR is used for WQ/RQ errors)
  1881. * (the last INTR is used for notifications)
  1882. */
  1883. if (enic->config.intr_mode < 3 &&
  1884. enic->rq_count >= 1 &&
  1885. enic->wq_count >= 1 &&
  1886. enic->cq_count >= 2 &&
  1887. enic->intr_count >= 3) {
  1888. enic->rq_count = 1;
  1889. enic->wq_count = 1;
  1890. enic->cq_count = 2;
  1891. enic->intr_count = 3;
  1892. vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
  1893. return 0;
  1894. }
  1895. vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
  1896. return -EINVAL;
  1897. }
  1898. static void enic_clear_intr_mode(struct enic *enic)
  1899. {
  1900. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  1901. case VNIC_DEV_INTR_MODE_MSIX:
  1902. pci_disable_msix(enic->pdev);
  1903. break;
  1904. case VNIC_DEV_INTR_MODE_MSI:
  1905. pci_disable_msi(enic->pdev);
  1906. break;
  1907. default:
  1908. break;
  1909. }
  1910. vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
  1911. }
  1912. static const struct net_device_ops enic_netdev_dynamic_ops = {
  1913. .ndo_open = enic_open,
  1914. .ndo_stop = enic_stop,
  1915. .ndo_start_xmit = enic_hard_start_xmit,
  1916. .ndo_get_stats64 = enic_get_stats,
  1917. .ndo_validate_addr = eth_validate_addr,
  1918. .ndo_set_rx_mode = enic_set_rx_mode,
  1919. .ndo_set_mac_address = enic_set_mac_address_dynamic,
  1920. .ndo_change_mtu = enic_change_mtu,
  1921. .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
  1922. .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
  1923. .ndo_tx_timeout = enic_tx_timeout,
  1924. .ndo_set_vf_port = enic_set_vf_port,
  1925. .ndo_get_vf_port = enic_get_vf_port,
  1926. .ndo_set_vf_mac = enic_set_vf_mac,
  1927. #ifdef CONFIG_NET_POLL_CONTROLLER
  1928. .ndo_poll_controller = enic_poll_controller,
  1929. #endif
  1930. #ifdef CONFIG_RFS_ACCEL
  1931. .ndo_rx_flow_steer = enic_rx_flow_steer,
  1932. #endif
  1933. #ifdef CONFIG_NET_RX_BUSY_POLL
  1934. .ndo_busy_poll = enic_busy_poll,
  1935. #endif
  1936. };
  1937. static const struct net_device_ops enic_netdev_ops = {
  1938. .ndo_open = enic_open,
  1939. .ndo_stop = enic_stop,
  1940. .ndo_start_xmit = enic_hard_start_xmit,
  1941. .ndo_get_stats64 = enic_get_stats,
  1942. .ndo_validate_addr = eth_validate_addr,
  1943. .ndo_set_mac_address = enic_set_mac_address,
  1944. .ndo_set_rx_mode = enic_set_rx_mode,
  1945. .ndo_change_mtu = enic_change_mtu,
  1946. .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
  1947. .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
  1948. .ndo_tx_timeout = enic_tx_timeout,
  1949. .ndo_set_vf_port = enic_set_vf_port,
  1950. .ndo_get_vf_port = enic_get_vf_port,
  1951. .ndo_set_vf_mac = enic_set_vf_mac,
  1952. #ifdef CONFIG_NET_POLL_CONTROLLER
  1953. .ndo_poll_controller = enic_poll_controller,
  1954. #endif
  1955. #ifdef CONFIG_RFS_ACCEL
  1956. .ndo_rx_flow_steer = enic_rx_flow_steer,
  1957. #endif
  1958. #ifdef CONFIG_NET_RX_BUSY_POLL
  1959. .ndo_busy_poll = enic_busy_poll,
  1960. #endif
  1961. };
  1962. static void enic_dev_deinit(struct enic *enic)
  1963. {
  1964. unsigned int i;
  1965. for (i = 0; i < enic->rq_count; i++) {
  1966. napi_hash_del(&enic->napi[i]);
  1967. netif_napi_del(&enic->napi[i]);
  1968. }
  1969. if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
  1970. for (i = 0; i < enic->wq_count; i++)
  1971. netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
  1972. enic_free_vnic_resources(enic);
  1973. enic_clear_intr_mode(enic);
  1974. enic_free_affinity_hint(enic);
  1975. }
  1976. static void enic_kdump_kernel_config(struct enic *enic)
  1977. {
  1978. if (is_kdump_kernel()) {
  1979. dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
  1980. enic->rq_count = 1;
  1981. enic->wq_count = 1;
  1982. enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
  1983. enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
  1984. enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
  1985. }
  1986. }
  1987. static int enic_dev_init(struct enic *enic)
  1988. {
  1989. struct device *dev = enic_get_dev(enic);
  1990. struct net_device *netdev = enic->netdev;
  1991. unsigned int i;
  1992. int err;
  1993. /* Get interrupt coalesce timer info */
  1994. err = enic_dev_intr_coal_timer_info(enic);
  1995. if (err) {
  1996. dev_warn(dev, "Using default conversion factor for "
  1997. "interrupt coalesce timer\n");
  1998. vnic_dev_intr_coal_timer_info_default(enic->vdev);
  1999. }
  2000. /* Get vNIC configuration
  2001. */
  2002. err = enic_get_vnic_config(enic);
  2003. if (err) {
  2004. dev_err(dev, "Get vNIC configuration failed, aborting\n");
  2005. return err;
  2006. }
  2007. /* Get available resource counts
  2008. */
  2009. enic_get_res_counts(enic);
  2010. /* modify resource count if we are in kdump_kernel
  2011. */
  2012. enic_kdump_kernel_config(enic);
  2013. /* Set interrupt mode based on resource counts and system
  2014. * capabilities
  2015. */
  2016. err = enic_set_intr_mode(enic);
  2017. if (err) {
  2018. dev_err(dev, "Failed to set intr mode based on resource "
  2019. "counts and system capabilities, aborting\n");
  2020. return err;
  2021. }
  2022. /* Allocate and configure vNIC resources
  2023. */
  2024. err = enic_alloc_vnic_resources(enic);
  2025. if (err) {
  2026. dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
  2027. goto err_out_free_vnic_resources;
  2028. }
  2029. enic_init_vnic_resources(enic);
  2030. err = enic_set_rss_nic_cfg(enic);
  2031. if (err) {
  2032. dev_err(dev, "Failed to config nic, aborting\n");
  2033. goto err_out_free_vnic_resources;
  2034. }
  2035. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  2036. default:
  2037. netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
  2038. break;
  2039. case VNIC_DEV_INTR_MODE_MSIX:
  2040. for (i = 0; i < enic->rq_count; i++) {
  2041. netif_napi_add(netdev, &enic->napi[i],
  2042. enic_poll_msix_rq, NAPI_POLL_WEIGHT);
  2043. }
  2044. for (i = 0; i < enic->wq_count; i++)
  2045. netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
  2046. enic_poll_msix_wq, NAPI_POLL_WEIGHT);
  2047. break;
  2048. }
  2049. return 0;
  2050. err_out_free_vnic_resources:
  2051. enic_free_affinity_hint(enic);
  2052. enic_clear_intr_mode(enic);
  2053. enic_free_vnic_resources(enic);
  2054. return err;
  2055. }
  2056. static void enic_iounmap(struct enic *enic)
  2057. {
  2058. unsigned int i;
  2059. for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
  2060. if (enic->bar[i].vaddr)
  2061. iounmap(enic->bar[i].vaddr);
  2062. }
  2063. static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  2064. {
  2065. struct device *dev = &pdev->dev;
  2066. struct net_device *netdev;
  2067. struct enic *enic;
  2068. int using_dac = 0;
  2069. unsigned int i;
  2070. int err;
  2071. #ifdef CONFIG_PCI_IOV
  2072. int pos = 0;
  2073. #endif
  2074. int num_pps = 1;
  2075. /* Allocate net device structure and initialize. Private
  2076. * instance data is initialized to zero.
  2077. */
  2078. netdev = alloc_etherdev_mqs(sizeof(struct enic),
  2079. ENIC_RQ_MAX, ENIC_WQ_MAX);
  2080. if (!netdev)
  2081. return -ENOMEM;
  2082. pci_set_drvdata(pdev, netdev);
  2083. SET_NETDEV_DEV(netdev, &pdev->dev);
  2084. enic = netdev_priv(netdev);
  2085. enic->netdev = netdev;
  2086. enic->pdev = pdev;
  2087. /* Setup PCI resources
  2088. */
  2089. err = pci_enable_device_mem(pdev);
  2090. if (err) {
  2091. dev_err(dev, "Cannot enable PCI device, aborting\n");
  2092. goto err_out_free_netdev;
  2093. }
  2094. err = pci_request_regions(pdev, DRV_NAME);
  2095. if (err) {
  2096. dev_err(dev, "Cannot request PCI regions, aborting\n");
  2097. goto err_out_disable_device;
  2098. }
  2099. pci_set_master(pdev);
  2100. /* Query PCI controller on system for DMA addressing
  2101. * limitation for the device. Try 47-bit first, and
  2102. * fail to 32-bit.
  2103. */
  2104. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
  2105. if (err) {
  2106. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  2107. if (err) {
  2108. dev_err(dev, "No usable DMA configuration, aborting\n");
  2109. goto err_out_release_regions;
  2110. }
  2111. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  2112. if (err) {
  2113. dev_err(dev, "Unable to obtain %u-bit DMA "
  2114. "for consistent allocations, aborting\n", 32);
  2115. goto err_out_release_regions;
  2116. }
  2117. } else {
  2118. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
  2119. if (err) {
  2120. dev_err(dev, "Unable to obtain %u-bit DMA "
  2121. "for consistent allocations, aborting\n", 47);
  2122. goto err_out_release_regions;
  2123. }
  2124. using_dac = 1;
  2125. }
  2126. /* Map vNIC resources from BAR0-5
  2127. */
  2128. for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
  2129. if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
  2130. continue;
  2131. enic->bar[i].len = pci_resource_len(pdev, i);
  2132. enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
  2133. if (!enic->bar[i].vaddr) {
  2134. dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
  2135. err = -ENODEV;
  2136. goto err_out_iounmap;
  2137. }
  2138. enic->bar[i].bus_addr = pci_resource_start(pdev, i);
  2139. }
  2140. /* Register vNIC device
  2141. */
  2142. enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
  2143. ARRAY_SIZE(enic->bar));
  2144. if (!enic->vdev) {
  2145. dev_err(dev, "vNIC registration failed, aborting\n");
  2146. err = -ENODEV;
  2147. goto err_out_iounmap;
  2148. }
  2149. err = vnic_devcmd_init(enic->vdev);
  2150. if (err)
  2151. goto err_out_vnic_unregister;
  2152. #ifdef CONFIG_PCI_IOV
  2153. /* Get number of subvnics */
  2154. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  2155. if (pos) {
  2156. pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
  2157. &enic->num_vfs);
  2158. if (enic->num_vfs) {
  2159. err = pci_enable_sriov(pdev, enic->num_vfs);
  2160. if (err) {
  2161. dev_err(dev, "SRIOV enable failed, aborting."
  2162. " pci_enable_sriov() returned %d\n",
  2163. err);
  2164. goto err_out_vnic_unregister;
  2165. }
  2166. enic->priv_flags |= ENIC_SRIOV_ENABLED;
  2167. num_pps = enic->num_vfs;
  2168. }
  2169. }
  2170. #endif
  2171. /* Allocate structure for port profiles */
  2172. enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
  2173. if (!enic->pp) {
  2174. err = -ENOMEM;
  2175. goto err_out_disable_sriov_pp;
  2176. }
  2177. /* Issue device open to get device in known state
  2178. */
  2179. err = enic_dev_open(enic);
  2180. if (err) {
  2181. dev_err(dev, "vNIC dev open failed, aborting\n");
  2182. goto err_out_disable_sriov;
  2183. }
  2184. /* Setup devcmd lock
  2185. */
  2186. spin_lock_init(&enic->devcmd_lock);
  2187. spin_lock_init(&enic->enic_api_lock);
  2188. /*
  2189. * Set ingress vlan rewrite mode before vnic initialization
  2190. */
  2191. err = enic_dev_set_ig_vlan_rewrite_mode(enic);
  2192. if (err) {
  2193. dev_err(dev,
  2194. "Failed to set ingress vlan rewrite mode, aborting.\n");
  2195. goto err_out_dev_close;
  2196. }
  2197. /* Issue device init to initialize the vnic-to-switch link.
  2198. * We'll start with carrier off and wait for link UP
  2199. * notification later to turn on carrier. We don't need
  2200. * to wait here for the vnic-to-switch link initialization
  2201. * to complete; link UP notification is the indication that
  2202. * the process is complete.
  2203. */
  2204. netif_carrier_off(netdev);
  2205. /* Do not call dev_init for a dynamic vnic.
  2206. * For a dynamic vnic, init_prov_info will be
  2207. * called later by an upper layer.
  2208. */
  2209. if (!enic_is_dynamic(enic)) {
  2210. err = vnic_dev_init(enic->vdev, 0);
  2211. if (err) {
  2212. dev_err(dev, "vNIC dev init failed, aborting\n");
  2213. goto err_out_dev_close;
  2214. }
  2215. }
  2216. err = enic_dev_init(enic);
  2217. if (err) {
  2218. dev_err(dev, "Device initialization failed, aborting\n");
  2219. goto err_out_dev_close;
  2220. }
  2221. netif_set_real_num_tx_queues(netdev, enic->wq_count);
  2222. netif_set_real_num_rx_queues(netdev, enic->rq_count);
  2223. /* Setup notification timer, HW reset task, and wq locks
  2224. */
  2225. init_timer(&enic->notify_timer);
  2226. enic->notify_timer.function = enic_notify_timer;
  2227. enic->notify_timer.data = (unsigned long)enic;
  2228. enic_set_rx_coal_setting(enic);
  2229. INIT_WORK(&enic->reset, enic_reset);
  2230. INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
  2231. INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
  2232. for (i = 0; i < enic->wq_count; i++)
  2233. spin_lock_init(&enic->wq_lock[i]);
  2234. /* Register net device
  2235. */
  2236. enic->port_mtu = enic->config.mtu;
  2237. (void)enic_change_mtu(netdev, enic->port_mtu);
  2238. err = enic_set_mac_addr(netdev, enic->mac_addr);
  2239. if (err) {
  2240. dev_err(dev, "Invalid MAC address, aborting\n");
  2241. goto err_out_dev_deinit;
  2242. }
  2243. enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
  2244. /* rx coalesce time already got initialized. This gets used
  2245. * if adaptive coal is turned off
  2246. */
  2247. enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
  2248. if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
  2249. netdev->netdev_ops = &enic_netdev_dynamic_ops;
  2250. else
  2251. netdev->netdev_ops = &enic_netdev_ops;
  2252. netdev->watchdog_timeo = 2 * HZ;
  2253. enic_set_ethtool_ops(netdev);
  2254. netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
  2255. if (ENIC_SETTING(enic, LOOP)) {
  2256. netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  2257. enic->loop_enable = 1;
  2258. enic->loop_tag = enic->config.loop_tag;
  2259. dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
  2260. }
  2261. if (ENIC_SETTING(enic, TXCSUM))
  2262. netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
  2263. if (ENIC_SETTING(enic, TSO))
  2264. netdev->hw_features |= NETIF_F_TSO |
  2265. NETIF_F_TSO6 | NETIF_F_TSO_ECN;
  2266. if (ENIC_SETTING(enic, RSS))
  2267. netdev->hw_features |= NETIF_F_RXHASH;
  2268. if (ENIC_SETTING(enic, RXCSUM))
  2269. netdev->hw_features |= NETIF_F_RXCSUM;
  2270. netdev->features |= netdev->hw_features;
  2271. netdev->vlan_features |= netdev->features;
  2272. #ifdef CONFIG_RFS_ACCEL
  2273. netdev->hw_features |= NETIF_F_NTUPLE;
  2274. #endif
  2275. if (using_dac)
  2276. netdev->features |= NETIF_F_HIGHDMA;
  2277. netdev->priv_flags |= IFF_UNICAST_FLT;
  2278. err = register_netdev(netdev);
  2279. if (err) {
  2280. dev_err(dev, "Cannot register net device, aborting\n");
  2281. goto err_out_dev_deinit;
  2282. }
  2283. enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
  2284. return 0;
  2285. err_out_dev_deinit:
  2286. enic_dev_deinit(enic);
  2287. err_out_dev_close:
  2288. vnic_dev_close(enic->vdev);
  2289. err_out_disable_sriov:
  2290. kfree(enic->pp);
  2291. err_out_disable_sriov_pp:
  2292. #ifdef CONFIG_PCI_IOV
  2293. if (enic_sriov_enabled(enic)) {
  2294. pci_disable_sriov(pdev);
  2295. enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
  2296. }
  2297. #endif
  2298. err_out_vnic_unregister:
  2299. vnic_dev_unregister(enic->vdev);
  2300. err_out_iounmap:
  2301. enic_iounmap(enic);
  2302. err_out_release_regions:
  2303. pci_release_regions(pdev);
  2304. err_out_disable_device:
  2305. pci_disable_device(pdev);
  2306. err_out_free_netdev:
  2307. free_netdev(netdev);
  2308. return err;
  2309. }
  2310. static void enic_remove(struct pci_dev *pdev)
  2311. {
  2312. struct net_device *netdev = pci_get_drvdata(pdev);
  2313. if (netdev) {
  2314. struct enic *enic = netdev_priv(netdev);
  2315. cancel_work_sync(&enic->reset);
  2316. cancel_work_sync(&enic->change_mtu_work);
  2317. unregister_netdev(netdev);
  2318. enic_dev_deinit(enic);
  2319. vnic_dev_close(enic->vdev);
  2320. #ifdef CONFIG_PCI_IOV
  2321. if (enic_sriov_enabled(enic)) {
  2322. pci_disable_sriov(pdev);
  2323. enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
  2324. }
  2325. #endif
  2326. kfree(enic->pp);
  2327. vnic_dev_unregister(enic->vdev);
  2328. enic_iounmap(enic);
  2329. pci_release_regions(pdev);
  2330. pci_disable_device(pdev);
  2331. free_netdev(netdev);
  2332. }
  2333. }
  2334. static struct pci_driver enic_driver = {
  2335. .name = DRV_NAME,
  2336. .id_table = enic_id_table,
  2337. .probe = enic_probe,
  2338. .remove = enic_remove,
  2339. };
  2340. static int __init enic_init_module(void)
  2341. {
  2342. pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
  2343. return pci_register_driver(&enic_driver);
  2344. }
  2345. static void __exit enic_cleanup_module(void)
  2346. {
  2347. pci_unregister_driver(&enic_driver);
  2348. }
  2349. module_init(enic_init_module);
  2350. module_exit(enic_cleanup_module);