qede_main.c 102 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918
  1. /* QLogic qede NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #include <linux/version.h>
  11. #include <linux/device.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/errno.h>
  16. #include <linux/list.h>
  17. #include <linux/string.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/interrupt.h>
  20. #include <asm/byteorder.h>
  21. #include <asm/param.h>
  22. #include <linux/io.h>
  23. #include <linux/netdev_features.h>
  24. #include <linux/udp.h>
  25. #include <linux/tcp.h>
  26. #include <net/udp_tunnel.h>
  27. #include <linux/ip.h>
  28. #include <net/ipv6.h>
  29. #include <net/tcp.h>
  30. #include <linux/if_ether.h>
  31. #include <linux/if_vlan.h>
  32. #include <linux/pkt_sched.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/in.h>
  35. #include <linux/random.h>
  36. #include <net/ip6_checksum.h>
  37. #include <linux/bitops.h>
  38. #include <linux/qed/qede_roce.h>
  39. #include "qede.h"
  40. static char version[] =
  41. "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
  42. MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
  43. MODULE_LICENSE("GPL");
  44. MODULE_VERSION(DRV_MODULE_VERSION);
  45. static uint debug;
  46. module_param(debug, uint, 0);
  47. MODULE_PARM_DESC(debug, " Default debug msglevel");
  48. static const struct qed_eth_ops *qed_ops;
  49. #define CHIP_NUM_57980S_40 0x1634
  50. #define CHIP_NUM_57980S_10 0x1666
  51. #define CHIP_NUM_57980S_MF 0x1636
  52. #define CHIP_NUM_57980S_100 0x1644
  53. #define CHIP_NUM_57980S_50 0x1654
  54. #define CHIP_NUM_57980S_25 0x1656
  55. #define CHIP_NUM_57980S_IOV 0x1664
  56. #ifndef PCI_DEVICE_ID_NX2_57980E
  57. #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
  58. #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
  59. #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
  60. #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
  61. #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
  62. #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
  63. #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
  64. #endif
  65. enum qede_pci_private {
  66. QEDE_PRIVATE_PF,
  67. QEDE_PRIVATE_VF
  68. };
  69. static const struct pci_device_id qede_pci_tbl[] = {
  70. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
  71. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
  72. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
  73. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
  74. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
  75. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
  76. #ifdef CONFIG_QED_SRIOV
  77. {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
  78. #endif
  79. { 0 }
  80. };
  81. MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
  82. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  83. #define TX_TIMEOUT (5 * HZ)
  84. static void qede_remove(struct pci_dev *pdev);
  85. static int qede_alloc_rx_buffer(struct qede_dev *edev,
  86. struct qede_rx_queue *rxq);
  87. static void qede_link_update(void *dev, struct qed_link_output *link);
  88. #ifdef CONFIG_QED_SRIOV
  89. static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
  90. __be16 vlan_proto)
  91. {
  92. struct qede_dev *edev = netdev_priv(ndev);
  93. if (vlan > 4095) {
  94. DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
  95. return -EINVAL;
  96. }
  97. if (vlan_proto != htons(ETH_P_8021Q))
  98. return -EPROTONOSUPPORT;
  99. DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
  100. vlan, vf);
  101. return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
  102. }
  103. static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
  104. {
  105. struct qede_dev *edev = netdev_priv(ndev);
  106. DP_VERBOSE(edev, QED_MSG_IOV,
  107. "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
  108. mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
  109. if (!is_valid_ether_addr(mac)) {
  110. DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
  111. return -EINVAL;
  112. }
  113. return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
  114. }
  115. static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
  116. {
  117. struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
  118. struct qed_dev_info *qed_info = &edev->dev_info.common;
  119. int rc;
  120. DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
  121. rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
  122. /* Enable/Disable Tx switching for PF */
  123. if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
  124. qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
  125. struct qed_update_vport_params params;
  126. memset(&params, 0, sizeof(params));
  127. params.vport_id = 0;
  128. params.update_tx_switching_flg = 1;
  129. params.tx_switching_flg = num_vfs_param ? 1 : 0;
  130. edev->ops->vport_update(edev->cdev, &params);
  131. }
  132. return rc;
  133. }
  134. #endif
  135. static struct pci_driver qede_pci_driver = {
  136. .name = "qede",
  137. .id_table = qede_pci_tbl,
  138. .probe = qede_probe,
  139. .remove = qede_remove,
  140. #ifdef CONFIG_QED_SRIOV
  141. .sriov_configure = qede_sriov_configure,
  142. #endif
  143. };
  144. static void qede_force_mac(void *dev, u8 *mac)
  145. {
  146. struct qede_dev *edev = dev;
  147. ether_addr_copy(edev->ndev->dev_addr, mac);
  148. ether_addr_copy(edev->primary_mac, mac);
  149. }
  150. static struct qed_eth_cb_ops qede_ll_ops = {
  151. {
  152. .link_update = qede_link_update,
  153. },
  154. .force_mac = qede_force_mac,
  155. };
  156. static int qede_netdev_event(struct notifier_block *this, unsigned long event,
  157. void *ptr)
  158. {
  159. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  160. struct ethtool_drvinfo drvinfo;
  161. struct qede_dev *edev;
  162. if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
  163. goto done;
  164. /* Check whether this is a qede device */
  165. if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
  166. goto done;
  167. memset(&drvinfo, 0, sizeof(drvinfo));
  168. ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
  169. if (strcmp(drvinfo.driver, "qede"))
  170. goto done;
  171. edev = netdev_priv(ndev);
  172. switch (event) {
  173. case NETDEV_CHANGENAME:
  174. /* Notify qed of the name change */
  175. if (!edev->ops || !edev->ops->common)
  176. goto done;
  177. edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
  178. break;
  179. case NETDEV_CHANGEADDR:
  180. edev = netdev_priv(ndev);
  181. qede_roce_event_changeaddr(edev);
  182. break;
  183. }
  184. done:
  185. return NOTIFY_DONE;
  186. }
  187. static struct notifier_block qede_netdev_notifier = {
  188. .notifier_call = qede_netdev_event,
  189. };
  190. static
  191. int __init qede_init(void)
  192. {
  193. int ret;
  194. pr_info("qede_init: %s\n", version);
  195. qed_ops = qed_get_eth_ops();
  196. if (!qed_ops) {
  197. pr_notice("Failed to get qed ethtool operations\n");
  198. return -EINVAL;
  199. }
  200. /* Must register notifier before pci ops, since we might miss
  201. * interface rename after pci probe and netdev registeration.
  202. */
  203. ret = register_netdevice_notifier(&qede_netdev_notifier);
  204. if (ret) {
  205. pr_notice("Failed to register netdevice_notifier\n");
  206. qed_put_eth_ops();
  207. return -EINVAL;
  208. }
  209. ret = pci_register_driver(&qede_pci_driver);
  210. if (ret) {
  211. pr_notice("Failed to register driver\n");
  212. unregister_netdevice_notifier(&qede_netdev_notifier);
  213. qed_put_eth_ops();
  214. return -EINVAL;
  215. }
  216. return 0;
  217. }
  218. static void __exit qede_cleanup(void)
  219. {
  220. if (debug & QED_LOG_INFO_MASK)
  221. pr_info("qede_cleanup called\n");
  222. unregister_netdevice_notifier(&qede_netdev_notifier);
  223. pci_unregister_driver(&qede_pci_driver);
  224. qed_put_eth_ops();
  225. }
  226. module_init(qede_init);
  227. module_exit(qede_cleanup);
  228. /* -------------------------------------------------------------------------
  229. * START OF FAST-PATH
  230. * -------------------------------------------------------------------------
  231. */
  232. /* Unmap the data and free skb */
  233. static int qede_free_tx_pkt(struct qede_dev *edev,
  234. struct qede_tx_queue *txq, int *len)
  235. {
  236. u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
  237. struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
  238. struct eth_tx_1st_bd *first_bd;
  239. struct eth_tx_bd *tx_data_bd;
  240. int bds_consumed = 0;
  241. int nbds;
  242. bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
  243. int i, split_bd_len = 0;
  244. if (unlikely(!skb)) {
  245. DP_ERR(edev,
  246. "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
  247. idx, txq->sw_tx_cons, txq->sw_tx_prod);
  248. return -1;
  249. }
  250. *len = skb->len;
  251. first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
  252. bds_consumed++;
  253. nbds = first_bd->data.nbds;
  254. if (data_split) {
  255. struct eth_tx_bd *split = (struct eth_tx_bd *)
  256. qed_chain_consume(&txq->tx_pbl);
  257. split_bd_len = BD_UNMAP_LEN(split);
  258. bds_consumed++;
  259. }
  260. dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
  261. BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
  262. /* Unmap the data of the skb frags */
  263. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
  264. tx_data_bd = (struct eth_tx_bd *)
  265. qed_chain_consume(&txq->tx_pbl);
  266. dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
  267. BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
  268. }
  269. while (bds_consumed++ < nbds)
  270. qed_chain_consume(&txq->tx_pbl);
  271. /* Free skb */
  272. dev_kfree_skb_any(skb);
  273. txq->sw_tx_ring[idx].skb = NULL;
  274. txq->sw_tx_ring[idx].flags = 0;
  275. return 0;
  276. }
  277. /* Unmap the data and free skb when mapping failed during start_xmit */
  278. static void qede_free_failed_tx_pkt(struct qede_dev *edev,
  279. struct qede_tx_queue *txq,
  280. struct eth_tx_1st_bd *first_bd,
  281. int nbd, bool data_split)
  282. {
  283. u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
  284. struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
  285. struct eth_tx_bd *tx_data_bd;
  286. int i, split_bd_len = 0;
  287. /* Return prod to its position before this skb was handled */
  288. qed_chain_set_prod(&txq->tx_pbl,
  289. le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
  290. first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
  291. if (data_split) {
  292. struct eth_tx_bd *split = (struct eth_tx_bd *)
  293. qed_chain_produce(&txq->tx_pbl);
  294. split_bd_len = BD_UNMAP_LEN(split);
  295. nbd--;
  296. }
  297. dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
  298. BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
  299. /* Unmap the data of the skb frags */
  300. for (i = 0; i < nbd; i++) {
  301. tx_data_bd = (struct eth_tx_bd *)
  302. qed_chain_produce(&txq->tx_pbl);
  303. if (tx_data_bd->nbytes)
  304. dma_unmap_page(&edev->pdev->dev,
  305. BD_UNMAP_ADDR(tx_data_bd),
  306. BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
  307. }
  308. /* Return again prod to its position before this skb was handled */
  309. qed_chain_set_prod(&txq->tx_pbl,
  310. le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
  311. /* Free skb */
  312. dev_kfree_skb_any(skb);
  313. txq->sw_tx_ring[idx].skb = NULL;
  314. txq->sw_tx_ring[idx].flags = 0;
  315. }
  316. static u32 qede_xmit_type(struct qede_dev *edev,
  317. struct sk_buff *skb, int *ipv6_ext)
  318. {
  319. u32 rc = XMIT_L4_CSUM;
  320. __be16 l3_proto;
  321. if (skb->ip_summed != CHECKSUM_PARTIAL)
  322. return XMIT_PLAIN;
  323. l3_proto = vlan_get_protocol(skb);
  324. if (l3_proto == htons(ETH_P_IPV6) &&
  325. (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
  326. *ipv6_ext = 1;
  327. if (skb->encapsulation)
  328. rc |= XMIT_ENC;
  329. if (skb_is_gso(skb))
  330. rc |= XMIT_LSO;
  331. return rc;
  332. }
  333. static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
  334. struct eth_tx_2nd_bd *second_bd,
  335. struct eth_tx_3rd_bd *third_bd)
  336. {
  337. u8 l4_proto;
  338. u16 bd2_bits1 = 0, bd2_bits2 = 0;
  339. bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
  340. bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
  341. ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
  342. << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
  343. bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
  344. ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
  345. if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
  346. l4_proto = ipv6_hdr(skb)->nexthdr;
  347. else
  348. l4_proto = ip_hdr(skb)->protocol;
  349. if (l4_proto == IPPROTO_UDP)
  350. bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
  351. if (third_bd)
  352. third_bd->data.bitfields |=
  353. cpu_to_le16(((tcp_hdrlen(skb) / 4) &
  354. ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
  355. ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
  356. second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
  357. second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
  358. }
  359. static int map_frag_to_bd(struct qede_dev *edev,
  360. skb_frag_t *frag, struct eth_tx_bd *bd)
  361. {
  362. dma_addr_t mapping;
  363. /* Map skb non-linear frag data for DMA */
  364. mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
  365. skb_frag_size(frag), DMA_TO_DEVICE);
  366. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  367. DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
  368. return -ENOMEM;
  369. }
  370. /* Setup the data pointer of the frag data */
  371. BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
  372. return 0;
  373. }
  374. static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
  375. {
  376. if (is_encap_pkt)
  377. return (skb_inner_transport_header(skb) +
  378. inner_tcp_hdrlen(skb) - skb->data);
  379. else
  380. return (skb_transport_header(skb) +
  381. tcp_hdrlen(skb) - skb->data);
  382. }
  383. /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
  384. #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
  385. static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
  386. u8 xmit_type)
  387. {
  388. int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
  389. if (xmit_type & XMIT_LSO) {
  390. int hlen;
  391. hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
  392. /* linear payload would require its own BD */
  393. if (skb_headlen(skb) > hlen)
  394. allowed_frags--;
  395. }
  396. return (skb_shinfo(skb)->nr_frags > allowed_frags);
  397. }
  398. #endif
  399. static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
  400. {
  401. /* wmb makes sure that the BDs data is updated before updating the
  402. * producer, otherwise FW may read old data from the BDs.
  403. */
  404. wmb();
  405. barrier();
  406. writel(txq->tx_db.raw, txq->doorbell_addr);
  407. /* mmiowb is needed to synchronize doorbell writes from more than one
  408. * processor. It guarantees that the write arrives to the device before
  409. * the queue lock is released and another start_xmit is called (possibly
  410. * on another CPU). Without this barrier, the next doorbell can bypass
  411. * this doorbell. This is applicable to IA64/Altix systems.
  412. */
  413. mmiowb();
  414. }
  415. /* Main transmit function */
  416. static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
  417. struct net_device *ndev)
  418. {
  419. struct qede_dev *edev = netdev_priv(ndev);
  420. struct netdev_queue *netdev_txq;
  421. struct qede_tx_queue *txq;
  422. struct eth_tx_1st_bd *first_bd;
  423. struct eth_tx_2nd_bd *second_bd = NULL;
  424. struct eth_tx_3rd_bd *third_bd = NULL;
  425. struct eth_tx_bd *tx_data_bd = NULL;
  426. u16 txq_index;
  427. u8 nbd = 0;
  428. dma_addr_t mapping;
  429. int rc, frag_idx = 0, ipv6_ext = 0;
  430. u8 xmit_type;
  431. u16 idx;
  432. u16 hlen;
  433. bool data_split = false;
  434. /* Get tx-queue context and netdev index */
  435. txq_index = skb_get_queue_mapping(skb);
  436. WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
  437. txq = QEDE_TX_QUEUE(edev, txq_index);
  438. netdev_txq = netdev_get_tx_queue(ndev, txq_index);
  439. WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
  440. xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
  441. #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
  442. if (qede_pkt_req_lin(edev, skb, xmit_type)) {
  443. if (skb_linearize(skb)) {
  444. DP_NOTICE(edev,
  445. "SKB linearization failed - silently dropping this SKB\n");
  446. dev_kfree_skb_any(skb);
  447. return NETDEV_TX_OK;
  448. }
  449. }
  450. #endif
  451. /* Fill the entry in the SW ring and the BDs in the FW ring */
  452. idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
  453. txq->sw_tx_ring[idx].skb = skb;
  454. first_bd = (struct eth_tx_1st_bd *)
  455. qed_chain_produce(&txq->tx_pbl);
  456. memset(first_bd, 0, sizeof(*first_bd));
  457. first_bd->data.bd_flags.bitfields =
  458. 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
  459. /* Map skb linear data for DMA and set in the first BD */
  460. mapping = dma_map_single(&edev->pdev->dev, skb->data,
  461. skb_headlen(skb), DMA_TO_DEVICE);
  462. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  463. DP_NOTICE(edev, "SKB mapping failed\n");
  464. qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
  465. qede_update_tx_producer(txq);
  466. return NETDEV_TX_OK;
  467. }
  468. nbd++;
  469. BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
  470. /* In case there is IPv6 with extension headers or LSO we need 2nd and
  471. * 3rd BDs.
  472. */
  473. if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
  474. second_bd = (struct eth_tx_2nd_bd *)
  475. qed_chain_produce(&txq->tx_pbl);
  476. memset(second_bd, 0, sizeof(*second_bd));
  477. nbd++;
  478. third_bd = (struct eth_tx_3rd_bd *)
  479. qed_chain_produce(&txq->tx_pbl);
  480. memset(third_bd, 0, sizeof(*third_bd));
  481. nbd++;
  482. /* We need to fill in additional data in second_bd... */
  483. tx_data_bd = (struct eth_tx_bd *)second_bd;
  484. }
  485. if (skb_vlan_tag_present(skb)) {
  486. first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
  487. first_bd->data.bd_flags.bitfields |=
  488. 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
  489. }
  490. /* Fill the parsing flags & params according to the requested offload */
  491. if (xmit_type & XMIT_L4_CSUM) {
  492. /* We don't re-calculate IP checksum as it is already done by
  493. * the upper stack
  494. */
  495. first_bd->data.bd_flags.bitfields |=
  496. 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
  497. if (xmit_type & XMIT_ENC) {
  498. first_bd->data.bd_flags.bitfields |=
  499. 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
  500. first_bd->data.bitfields |=
  501. 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
  502. }
  503. /* Legacy FW had flipped behavior in regard to this bit -
  504. * I.e., needed to set to prevent FW from touching encapsulated
  505. * packets when it didn't need to.
  506. */
  507. if (unlikely(txq->is_legacy))
  508. first_bd->data.bitfields ^=
  509. 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
  510. /* If the packet is IPv6 with extension header, indicate that
  511. * to FW and pass few params, since the device cracker doesn't
  512. * support parsing IPv6 with extension header/s.
  513. */
  514. if (unlikely(ipv6_ext))
  515. qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
  516. }
  517. if (xmit_type & XMIT_LSO) {
  518. first_bd->data.bd_flags.bitfields |=
  519. (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
  520. third_bd->data.lso_mss =
  521. cpu_to_le16(skb_shinfo(skb)->gso_size);
  522. if (unlikely(xmit_type & XMIT_ENC)) {
  523. first_bd->data.bd_flags.bitfields |=
  524. 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
  525. hlen = qede_get_skb_hlen(skb, true);
  526. } else {
  527. first_bd->data.bd_flags.bitfields |=
  528. 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
  529. hlen = qede_get_skb_hlen(skb, false);
  530. }
  531. /* @@@TBD - if will not be removed need to check */
  532. third_bd->data.bitfields |=
  533. cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
  534. /* Make life easier for FW guys who can't deal with header and
  535. * data on same BD. If we need to split, use the second bd...
  536. */
  537. if (unlikely(skb_headlen(skb) > hlen)) {
  538. DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
  539. "TSO split header size is %d (%x:%x)\n",
  540. first_bd->nbytes, first_bd->addr.hi,
  541. first_bd->addr.lo);
  542. mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
  543. le32_to_cpu(first_bd->addr.lo)) +
  544. hlen;
  545. BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
  546. le16_to_cpu(first_bd->nbytes) -
  547. hlen);
  548. /* this marks the BD as one that has no
  549. * individual mapping
  550. */
  551. txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
  552. first_bd->nbytes = cpu_to_le16(hlen);
  553. tx_data_bd = (struct eth_tx_bd *)third_bd;
  554. data_split = true;
  555. }
  556. } else {
  557. first_bd->data.bitfields |=
  558. (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
  559. ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
  560. }
  561. /* Handle fragmented skb */
  562. /* special handle for frags inside 2nd and 3rd bds.. */
  563. while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
  564. rc = map_frag_to_bd(edev,
  565. &skb_shinfo(skb)->frags[frag_idx],
  566. tx_data_bd);
  567. if (rc) {
  568. qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
  569. data_split);
  570. qede_update_tx_producer(txq);
  571. return NETDEV_TX_OK;
  572. }
  573. if (tx_data_bd == (struct eth_tx_bd *)second_bd)
  574. tx_data_bd = (struct eth_tx_bd *)third_bd;
  575. else
  576. tx_data_bd = NULL;
  577. frag_idx++;
  578. }
  579. /* map last frags into 4th, 5th .... */
  580. for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
  581. tx_data_bd = (struct eth_tx_bd *)
  582. qed_chain_produce(&txq->tx_pbl);
  583. memset(tx_data_bd, 0, sizeof(*tx_data_bd));
  584. rc = map_frag_to_bd(edev,
  585. &skb_shinfo(skb)->frags[frag_idx],
  586. tx_data_bd);
  587. if (rc) {
  588. qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
  589. data_split);
  590. qede_update_tx_producer(txq);
  591. return NETDEV_TX_OK;
  592. }
  593. }
  594. /* update the first BD with the actual num BDs */
  595. first_bd->data.nbds = nbd;
  596. netdev_tx_sent_queue(netdev_txq, skb->len);
  597. skb_tx_timestamp(skb);
  598. /* Advance packet producer only before sending the packet since mapping
  599. * of pages may fail.
  600. */
  601. txq->sw_tx_prod++;
  602. /* 'next page' entries are counted in the producer value */
  603. txq->tx_db.data.bd_prod =
  604. cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
  605. if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
  606. qede_update_tx_producer(txq);
  607. if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
  608. < (MAX_SKB_FRAGS + 1))) {
  609. if (skb->xmit_more)
  610. qede_update_tx_producer(txq);
  611. netif_tx_stop_queue(netdev_txq);
  612. txq->stopped_cnt++;
  613. DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
  614. "Stop queue was called\n");
  615. /* paired memory barrier is in qede_tx_int(), we have to keep
  616. * ordering of set_bit() in netif_tx_stop_queue() and read of
  617. * fp->bd_tx_cons
  618. */
  619. smp_mb();
  620. if (qed_chain_get_elem_left(&txq->tx_pbl)
  621. >= (MAX_SKB_FRAGS + 1) &&
  622. (edev->state == QEDE_STATE_OPEN)) {
  623. netif_tx_wake_queue(netdev_txq);
  624. DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
  625. "Wake queue was called\n");
  626. }
  627. }
  628. return NETDEV_TX_OK;
  629. }
  630. int qede_txq_has_work(struct qede_tx_queue *txq)
  631. {
  632. u16 hw_bd_cons;
  633. /* Tell compiler that consumer and producer can change */
  634. barrier();
  635. hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
  636. if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
  637. return 0;
  638. return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
  639. }
  640. static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
  641. {
  642. struct netdev_queue *netdev_txq;
  643. u16 hw_bd_cons;
  644. unsigned int pkts_compl = 0, bytes_compl = 0;
  645. int rc;
  646. netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
  647. hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
  648. barrier();
  649. while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
  650. int len = 0;
  651. rc = qede_free_tx_pkt(edev, txq, &len);
  652. if (rc) {
  653. DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
  654. hw_bd_cons,
  655. qed_chain_get_cons_idx(&txq->tx_pbl));
  656. break;
  657. }
  658. bytes_compl += len;
  659. pkts_compl++;
  660. txq->sw_tx_cons++;
  661. txq->xmit_pkts++;
  662. }
  663. netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
  664. /* Need to make the tx_bd_cons update visible to start_xmit()
  665. * before checking for netif_tx_queue_stopped(). Without the
  666. * memory barrier, there is a small possibility that
  667. * start_xmit() will miss it and cause the queue to be stopped
  668. * forever.
  669. * On the other hand we need an rmb() here to ensure the proper
  670. * ordering of bit testing in the following
  671. * netif_tx_queue_stopped(txq) call.
  672. */
  673. smp_mb();
  674. if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
  675. /* Taking tx_lock is needed to prevent reenabling the queue
  676. * while it's empty. This could have happen if rx_action() gets
  677. * suspended in qede_tx_int() after the condition before
  678. * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
  679. *
  680. * stops the queue->sees fresh tx_bd_cons->releases the queue->
  681. * sends some packets consuming the whole queue again->
  682. * stops the queue
  683. */
  684. __netif_tx_lock(netdev_txq, smp_processor_id());
  685. if ((netif_tx_queue_stopped(netdev_txq)) &&
  686. (edev->state == QEDE_STATE_OPEN) &&
  687. (qed_chain_get_elem_left(&txq->tx_pbl)
  688. >= (MAX_SKB_FRAGS + 1))) {
  689. netif_tx_wake_queue(netdev_txq);
  690. DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
  691. "Wake queue was called\n");
  692. }
  693. __netif_tx_unlock(netdev_txq);
  694. }
  695. return 0;
  696. }
  697. bool qede_has_rx_work(struct qede_rx_queue *rxq)
  698. {
  699. u16 hw_comp_cons, sw_comp_cons;
  700. /* Tell compiler that status block fields can change */
  701. barrier();
  702. hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
  703. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  704. return hw_comp_cons != sw_comp_cons;
  705. }
  706. static bool qede_has_tx_work(struct qede_fastpath *fp)
  707. {
  708. u8 tc;
  709. for (tc = 0; tc < fp->edev->num_tc; tc++)
  710. if (qede_txq_has_work(&fp->txqs[tc]))
  711. return true;
  712. return false;
  713. }
  714. static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
  715. {
  716. qed_chain_consume(&rxq->rx_bd_ring);
  717. rxq->sw_rx_cons++;
  718. }
  719. /* This function reuses the buffer(from an offset) from
  720. * consumer index to producer index in the bd ring
  721. */
  722. static inline void qede_reuse_page(struct qede_dev *edev,
  723. struct qede_rx_queue *rxq,
  724. struct sw_rx_data *curr_cons)
  725. {
  726. struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
  727. struct sw_rx_data *curr_prod;
  728. dma_addr_t new_mapping;
  729. curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
  730. *curr_prod = *curr_cons;
  731. new_mapping = curr_prod->mapping + curr_prod->page_offset;
  732. rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
  733. rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
  734. rxq->sw_rx_prod++;
  735. curr_cons->data = NULL;
  736. }
  737. /* In case of allocation failures reuse buffers
  738. * from consumer index to produce buffers for firmware
  739. */
  740. void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
  741. struct qede_dev *edev, u8 count)
  742. {
  743. struct sw_rx_data *curr_cons;
  744. for (; count > 0; count--) {
  745. curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
  746. qede_reuse_page(edev, rxq, curr_cons);
  747. qede_rx_bd_ring_consume(rxq);
  748. }
  749. }
  750. static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
  751. struct qede_rx_queue *rxq,
  752. struct sw_rx_data *curr_cons)
  753. {
  754. /* Move to the next segment in the page */
  755. curr_cons->page_offset += rxq->rx_buf_seg_size;
  756. if (curr_cons->page_offset == PAGE_SIZE) {
  757. if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
  758. /* Since we failed to allocate new buffer
  759. * current buffer can be used again.
  760. */
  761. curr_cons->page_offset -= rxq->rx_buf_seg_size;
  762. return -ENOMEM;
  763. }
  764. dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
  765. PAGE_SIZE, DMA_FROM_DEVICE);
  766. } else {
  767. /* Increment refcount of the page as we don't want
  768. * network stack to take the ownership of the page
  769. * which can be recycled multiple times by the driver.
  770. */
  771. page_ref_inc(curr_cons->data);
  772. qede_reuse_page(edev, rxq, curr_cons);
  773. }
  774. return 0;
  775. }
  776. void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
  777. {
  778. u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
  779. u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
  780. struct eth_rx_prod_data rx_prods = {0};
  781. /* Update producers */
  782. rx_prods.bd_prod = cpu_to_le16(bd_prod);
  783. rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
  784. /* Make sure that the BD and SGE data is updated before updating the
  785. * producers since FW might read the BD/SGE right after the producer
  786. * is updated.
  787. */
  788. wmb();
  789. internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
  790. (u32 *)&rx_prods);
  791. /* mmiowb is needed to synchronize doorbell writes from more than one
  792. * processor. It guarantees that the write arrives to the device before
  793. * the napi lock is released and another qede_poll is called (possibly
  794. * on another CPU). Without this barrier, the next doorbell can bypass
  795. * this doorbell. This is applicable to IA64/Altix systems.
  796. */
  797. mmiowb();
  798. }
  799. static u32 qede_get_rxhash(struct qede_dev *edev,
  800. u8 bitfields,
  801. __le32 rss_hash, enum pkt_hash_types *rxhash_type)
  802. {
  803. enum rss_hash_type htype;
  804. htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
  805. if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
  806. *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
  807. (htype == RSS_HASH_TYPE_IPV6)) ?
  808. PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
  809. return le32_to_cpu(rss_hash);
  810. }
  811. *rxhash_type = PKT_HASH_TYPE_NONE;
  812. return 0;
  813. }
  814. static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
  815. {
  816. skb_checksum_none_assert(skb);
  817. if (csum_flag & QEDE_CSUM_UNNECESSARY)
  818. skb->ip_summed = CHECKSUM_UNNECESSARY;
  819. if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
  820. skb->csum_level = 1;
  821. }
  822. static inline void qede_skb_receive(struct qede_dev *edev,
  823. struct qede_fastpath *fp,
  824. struct sk_buff *skb, u16 vlan_tag)
  825. {
  826. if (vlan_tag)
  827. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
  828. napi_gro_receive(&fp->napi, skb);
  829. }
  830. static void qede_set_gro_params(struct qede_dev *edev,
  831. struct sk_buff *skb,
  832. struct eth_fast_path_rx_tpa_start_cqe *cqe)
  833. {
  834. u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
  835. if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
  836. PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
  837. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  838. else
  839. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  840. skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
  841. cqe->header_len;
  842. }
  843. static int qede_fill_frag_skb(struct qede_dev *edev,
  844. struct qede_rx_queue *rxq,
  845. u8 tpa_agg_index, u16 len_on_bd)
  846. {
  847. struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
  848. NUM_RX_BDS_MAX];
  849. struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
  850. struct sk_buff *skb = tpa_info->skb;
  851. if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
  852. goto out;
  853. /* Add one frag and update the appropriate fields in the skb */
  854. skb_fill_page_desc(skb, tpa_info->frag_id++,
  855. current_bd->data, current_bd->page_offset,
  856. len_on_bd);
  857. if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
  858. /* Incr page ref count to reuse on allocation failure
  859. * so that it doesn't get freed while freeing SKB.
  860. */
  861. page_ref_inc(current_bd->data);
  862. goto out;
  863. }
  864. qed_chain_consume(&rxq->rx_bd_ring);
  865. rxq->sw_rx_cons++;
  866. skb->data_len += len_on_bd;
  867. skb->truesize += rxq->rx_buf_seg_size;
  868. skb->len += len_on_bd;
  869. return 0;
  870. out:
  871. tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
  872. qede_recycle_rx_bd_ring(rxq, edev, 1);
  873. return -ENOMEM;
  874. }
  875. static void qede_tpa_start(struct qede_dev *edev,
  876. struct qede_rx_queue *rxq,
  877. struct eth_fast_path_rx_tpa_start_cqe *cqe)
  878. {
  879. struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
  880. struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
  881. struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
  882. struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
  883. dma_addr_t mapping = tpa_info->replace_buf_mapping;
  884. struct sw_rx_data *sw_rx_data_cons;
  885. struct sw_rx_data *sw_rx_data_prod;
  886. enum pkt_hash_types rxhash_type;
  887. u32 rxhash;
  888. sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
  889. sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
  890. /* Use pre-allocated replacement buffer - we can't release the agg.
  891. * start until its over and we don't want to risk allocation failing
  892. * here, so re-allocate when aggregation will be over.
  893. */
  894. sw_rx_data_prod->mapping = replace_buf->mapping;
  895. sw_rx_data_prod->data = replace_buf->data;
  896. rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
  897. rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
  898. sw_rx_data_prod->page_offset = replace_buf->page_offset;
  899. rxq->sw_rx_prod++;
  900. /* move partial skb from cons to pool (don't unmap yet)
  901. * save mapping, incase we drop the packet later on.
  902. */
  903. tpa_info->start_buf = *sw_rx_data_cons;
  904. mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
  905. le32_to_cpu(rx_bd_cons->addr.lo));
  906. tpa_info->start_buf_mapping = mapping;
  907. rxq->sw_rx_cons++;
  908. /* set tpa state to start only if we are able to allocate skb
  909. * for this aggregation, otherwise mark as error and aggregation will
  910. * be dropped
  911. */
  912. tpa_info->skb = netdev_alloc_skb(edev->ndev,
  913. le16_to_cpu(cqe->len_on_first_bd));
  914. if (unlikely(!tpa_info->skb)) {
  915. DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
  916. tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
  917. goto cons_buf;
  918. }
  919. skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
  920. memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
  921. /* Start filling in the aggregation info */
  922. tpa_info->frag_id = 0;
  923. tpa_info->agg_state = QEDE_AGG_STATE_START;
  924. rxhash = qede_get_rxhash(edev, cqe->bitfields,
  925. cqe->rss_hash, &rxhash_type);
  926. skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
  927. if ((le16_to_cpu(cqe->pars_flags.flags) >>
  928. PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
  929. PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
  930. tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
  931. else
  932. tpa_info->vlan_tag = 0;
  933. /* This is needed in order to enable forwarding support */
  934. qede_set_gro_params(edev, tpa_info->skb, cqe);
  935. cons_buf: /* We still need to handle bd_len_list to consume buffers */
  936. if (likely(cqe->ext_bd_len_list[0]))
  937. qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
  938. le16_to_cpu(cqe->ext_bd_len_list[0]));
  939. if (unlikely(cqe->ext_bd_len_list[1])) {
  940. DP_ERR(edev,
  941. "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
  942. tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
  943. }
  944. }
  945. #ifdef CONFIG_INET
  946. static void qede_gro_ip_csum(struct sk_buff *skb)
  947. {
  948. const struct iphdr *iph = ip_hdr(skb);
  949. struct tcphdr *th;
  950. skb_set_transport_header(skb, sizeof(struct iphdr));
  951. th = tcp_hdr(skb);
  952. th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
  953. iph->saddr, iph->daddr, 0);
  954. tcp_gro_complete(skb);
  955. }
  956. static void qede_gro_ipv6_csum(struct sk_buff *skb)
  957. {
  958. struct ipv6hdr *iph = ipv6_hdr(skb);
  959. struct tcphdr *th;
  960. skb_set_transport_header(skb, sizeof(struct ipv6hdr));
  961. th = tcp_hdr(skb);
  962. th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
  963. &iph->saddr, &iph->daddr, 0);
  964. tcp_gro_complete(skb);
  965. }
  966. #endif
  967. static void qede_gro_receive(struct qede_dev *edev,
  968. struct qede_fastpath *fp,
  969. struct sk_buff *skb,
  970. u16 vlan_tag)
  971. {
  972. /* FW can send a single MTU sized packet from gro flow
  973. * due to aggregation timeout/last segment etc. which
  974. * is not expected to be a gro packet. If a skb has zero
  975. * frags then simply push it in the stack as non gso skb.
  976. */
  977. if (unlikely(!skb->data_len)) {
  978. skb_shinfo(skb)->gso_type = 0;
  979. skb_shinfo(skb)->gso_size = 0;
  980. goto send_skb;
  981. }
  982. #ifdef CONFIG_INET
  983. if (skb_shinfo(skb)->gso_size) {
  984. skb_set_network_header(skb, 0);
  985. switch (skb->protocol) {
  986. case htons(ETH_P_IP):
  987. qede_gro_ip_csum(skb);
  988. break;
  989. case htons(ETH_P_IPV6):
  990. qede_gro_ipv6_csum(skb);
  991. break;
  992. default:
  993. DP_ERR(edev,
  994. "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
  995. ntohs(skb->protocol));
  996. }
  997. }
  998. #endif
  999. send_skb:
  1000. skb_record_rx_queue(skb, fp->rxq->rxq_id);
  1001. qede_skb_receive(edev, fp, skb, vlan_tag);
  1002. }
  1003. static inline void qede_tpa_cont(struct qede_dev *edev,
  1004. struct qede_rx_queue *rxq,
  1005. struct eth_fast_path_rx_tpa_cont_cqe *cqe)
  1006. {
  1007. int i;
  1008. for (i = 0; cqe->len_list[i]; i++)
  1009. qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
  1010. le16_to_cpu(cqe->len_list[i]));
  1011. if (unlikely(i > 1))
  1012. DP_ERR(edev,
  1013. "Strange - TPA cont with more than a single len_list entry\n");
  1014. }
  1015. static void qede_tpa_end(struct qede_dev *edev,
  1016. struct qede_fastpath *fp,
  1017. struct eth_fast_path_rx_tpa_end_cqe *cqe)
  1018. {
  1019. struct qede_rx_queue *rxq = fp->rxq;
  1020. struct qede_agg_info *tpa_info;
  1021. struct sk_buff *skb;
  1022. int i;
  1023. tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
  1024. skb = tpa_info->skb;
  1025. for (i = 0; cqe->len_list[i]; i++)
  1026. qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
  1027. le16_to_cpu(cqe->len_list[i]));
  1028. if (unlikely(i > 1))
  1029. DP_ERR(edev,
  1030. "Strange - TPA emd with more than a single len_list entry\n");
  1031. if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
  1032. goto err;
  1033. /* Sanity */
  1034. if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
  1035. DP_ERR(edev,
  1036. "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
  1037. cqe->num_of_bds, tpa_info->frag_id);
  1038. if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
  1039. DP_ERR(edev,
  1040. "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
  1041. le16_to_cpu(cqe->total_packet_len), skb->len);
  1042. memcpy(skb->data,
  1043. page_address(tpa_info->start_buf.data) +
  1044. tpa_info->start_cqe.placement_offset +
  1045. tpa_info->start_buf.page_offset,
  1046. le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
  1047. /* Recycle [mapped] start buffer for the next replacement */
  1048. tpa_info->replace_buf = tpa_info->start_buf;
  1049. tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
  1050. /* Finalize the SKB */
  1051. skb->protocol = eth_type_trans(skb, edev->ndev);
  1052. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1053. /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
  1054. * to skb_shinfo(skb)->gso_segs
  1055. */
  1056. NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
  1057. qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
  1058. tpa_info->agg_state = QEDE_AGG_STATE_NONE;
  1059. return;
  1060. err:
  1061. /* The BD starting the aggregation is still mapped; Re-use it for
  1062. * future aggregations [as replacement buffer]
  1063. */
  1064. memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
  1065. sizeof(struct sw_rx_data));
  1066. tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
  1067. tpa_info->start_buf.data = NULL;
  1068. tpa_info->agg_state = QEDE_AGG_STATE_NONE;
  1069. dev_kfree_skb_any(tpa_info->skb);
  1070. tpa_info->skb = NULL;
  1071. }
  1072. static bool qede_tunn_exist(u16 flag)
  1073. {
  1074. return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
  1075. PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
  1076. }
  1077. static u8 qede_check_tunn_csum(u16 flag)
  1078. {
  1079. u16 csum_flag = 0;
  1080. u8 tcsum = 0;
  1081. if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
  1082. PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
  1083. csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
  1084. PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
  1085. if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
  1086. PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
  1087. csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
  1088. PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
  1089. tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
  1090. }
  1091. csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
  1092. PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
  1093. PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
  1094. PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
  1095. if (csum_flag & flag)
  1096. return QEDE_CSUM_ERROR;
  1097. return QEDE_CSUM_UNNECESSARY | tcsum;
  1098. }
  1099. static u8 qede_check_notunn_csum(u16 flag)
  1100. {
  1101. u16 csum_flag = 0;
  1102. u8 csum = 0;
  1103. if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
  1104. PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
  1105. csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
  1106. PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
  1107. csum = QEDE_CSUM_UNNECESSARY;
  1108. }
  1109. csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
  1110. PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
  1111. if (csum_flag & flag)
  1112. return QEDE_CSUM_ERROR;
  1113. return csum;
  1114. }
  1115. static u8 qede_check_csum(u16 flag)
  1116. {
  1117. if (!qede_tunn_exist(flag))
  1118. return qede_check_notunn_csum(flag);
  1119. else
  1120. return qede_check_tunn_csum(flag);
  1121. }
  1122. static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
  1123. u16 flag)
  1124. {
  1125. u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
  1126. if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
  1127. ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
  1128. (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
  1129. PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
  1130. return true;
  1131. return false;
  1132. }
  1133. static int qede_rx_int(struct qede_fastpath *fp, int budget)
  1134. {
  1135. struct qede_dev *edev = fp->edev;
  1136. struct qede_rx_queue *rxq = fp->rxq;
  1137. u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
  1138. int rx_pkt = 0;
  1139. u8 csum_flag;
  1140. hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
  1141. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  1142. /* Memory barrier to prevent the CPU from doing speculative reads of CQE
  1143. * / BD in the while-loop before reading hw_comp_cons. If the CQE is
  1144. * read before it is written by FW, then FW writes CQE and SB, and then
  1145. * the CPU reads the hw_comp_cons, it will use an old CQE.
  1146. */
  1147. rmb();
  1148. /* Loop to complete all indicated BDs */
  1149. while (sw_comp_cons != hw_comp_cons) {
  1150. struct eth_fast_path_rx_reg_cqe *fp_cqe;
  1151. enum pkt_hash_types rxhash_type;
  1152. enum eth_rx_cqe_type cqe_type;
  1153. struct sw_rx_data *sw_rx_data;
  1154. union eth_rx_cqe *cqe;
  1155. struct sk_buff *skb;
  1156. struct page *data;
  1157. __le16 flags;
  1158. u16 len, pad;
  1159. u32 rx_hash;
  1160. /* Get the CQE from the completion ring */
  1161. cqe = (union eth_rx_cqe *)
  1162. qed_chain_consume(&rxq->rx_comp_ring);
  1163. cqe_type = cqe->fast_path_regular.type;
  1164. if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
  1165. edev->ops->eth_cqe_completion(
  1166. edev->cdev, fp->id,
  1167. (struct eth_slow_path_rx_cqe *)cqe);
  1168. goto next_cqe;
  1169. }
  1170. if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
  1171. switch (cqe_type) {
  1172. case ETH_RX_CQE_TYPE_TPA_START:
  1173. qede_tpa_start(edev, rxq,
  1174. &cqe->fast_path_tpa_start);
  1175. goto next_cqe;
  1176. case ETH_RX_CQE_TYPE_TPA_CONT:
  1177. qede_tpa_cont(edev, rxq,
  1178. &cqe->fast_path_tpa_cont);
  1179. goto next_cqe;
  1180. case ETH_RX_CQE_TYPE_TPA_END:
  1181. qede_tpa_end(edev, fp,
  1182. &cqe->fast_path_tpa_end);
  1183. goto next_rx_only;
  1184. default:
  1185. break;
  1186. }
  1187. }
  1188. /* Get the data from the SW ring */
  1189. sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
  1190. sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
  1191. data = sw_rx_data->data;
  1192. fp_cqe = &cqe->fast_path_regular;
  1193. len = le16_to_cpu(fp_cqe->len_on_first_bd);
  1194. pad = fp_cqe->placement_offset;
  1195. flags = cqe->fast_path_regular.pars_flags.flags;
  1196. /* If this is an error packet then drop it */
  1197. parse_flag = le16_to_cpu(flags);
  1198. csum_flag = qede_check_csum(parse_flag);
  1199. if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
  1200. if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
  1201. parse_flag)) {
  1202. rxq->rx_ip_frags++;
  1203. goto alloc_skb;
  1204. }
  1205. DP_NOTICE(edev,
  1206. "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
  1207. sw_comp_cons, parse_flag);
  1208. rxq->rx_hw_errors++;
  1209. qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
  1210. goto next_cqe;
  1211. }
  1212. alloc_skb:
  1213. skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
  1214. if (unlikely(!skb)) {
  1215. DP_NOTICE(edev,
  1216. "skb allocation failed, dropping incoming packet\n");
  1217. qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
  1218. rxq->rx_alloc_errors++;
  1219. goto next_cqe;
  1220. }
  1221. /* Copy data into SKB */
  1222. if (len + pad <= edev->rx_copybreak) {
  1223. memcpy(skb_put(skb, len),
  1224. page_address(data) + pad +
  1225. sw_rx_data->page_offset, len);
  1226. qede_reuse_page(edev, rxq, sw_rx_data);
  1227. } else {
  1228. struct skb_frag_struct *frag;
  1229. unsigned int pull_len;
  1230. unsigned char *va;
  1231. frag = &skb_shinfo(skb)->frags[0];
  1232. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
  1233. pad + sw_rx_data->page_offset,
  1234. len, rxq->rx_buf_seg_size);
  1235. va = skb_frag_address(frag);
  1236. pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
  1237. /* Align the pull_len to optimize memcpy */
  1238. memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
  1239. skb_frag_size_sub(frag, pull_len);
  1240. frag->page_offset += pull_len;
  1241. skb->data_len -= pull_len;
  1242. skb->tail += pull_len;
  1243. if (unlikely(qede_realloc_rx_buffer(edev, rxq,
  1244. sw_rx_data))) {
  1245. DP_ERR(edev, "Failed to allocate rx buffer\n");
  1246. /* Incr page ref count to reuse on allocation
  1247. * failure so that it doesn't get freed while
  1248. * freeing SKB.
  1249. */
  1250. page_ref_inc(sw_rx_data->data);
  1251. rxq->rx_alloc_errors++;
  1252. qede_recycle_rx_bd_ring(rxq, edev,
  1253. fp_cqe->bd_num);
  1254. dev_kfree_skb_any(skb);
  1255. goto next_cqe;
  1256. }
  1257. }
  1258. qede_rx_bd_ring_consume(rxq);
  1259. if (fp_cqe->bd_num != 1) {
  1260. u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
  1261. u8 num_frags;
  1262. pkt_len -= len;
  1263. for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
  1264. num_frags--) {
  1265. u16 cur_size = pkt_len > rxq->rx_buf_size ?
  1266. rxq->rx_buf_size : pkt_len;
  1267. if (unlikely(!cur_size)) {
  1268. DP_ERR(edev,
  1269. "Still got %d BDs for mapping jumbo, but length became 0\n",
  1270. num_frags);
  1271. qede_recycle_rx_bd_ring(rxq, edev,
  1272. num_frags);
  1273. dev_kfree_skb_any(skb);
  1274. goto next_cqe;
  1275. }
  1276. if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
  1277. qede_recycle_rx_bd_ring(rxq, edev,
  1278. num_frags);
  1279. dev_kfree_skb_any(skb);
  1280. goto next_cqe;
  1281. }
  1282. sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
  1283. sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
  1284. qede_rx_bd_ring_consume(rxq);
  1285. dma_unmap_page(&edev->pdev->dev,
  1286. sw_rx_data->mapping,
  1287. PAGE_SIZE, DMA_FROM_DEVICE);
  1288. skb_fill_page_desc(skb,
  1289. skb_shinfo(skb)->nr_frags++,
  1290. sw_rx_data->data, 0,
  1291. cur_size);
  1292. skb->truesize += PAGE_SIZE;
  1293. skb->data_len += cur_size;
  1294. skb->len += cur_size;
  1295. pkt_len -= cur_size;
  1296. }
  1297. if (unlikely(pkt_len))
  1298. DP_ERR(edev,
  1299. "Mapped all BDs of jumbo, but still have %d bytes\n",
  1300. pkt_len);
  1301. }
  1302. skb->protocol = eth_type_trans(skb, edev->ndev);
  1303. rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
  1304. fp_cqe->rss_hash, &rxhash_type);
  1305. skb_set_hash(skb, rx_hash, rxhash_type);
  1306. qede_set_skb_csum(skb, csum_flag);
  1307. skb_record_rx_queue(skb, fp->rxq->rxq_id);
  1308. qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
  1309. next_rx_only:
  1310. rx_pkt++;
  1311. next_cqe: /* don't consume bd rx buffer */
  1312. qed_chain_recycle_consumed(&rxq->rx_comp_ring);
  1313. sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
  1314. /* CR TPA - revisit how to handle budget in TPA perhaps
  1315. * increase on "end"
  1316. */
  1317. if (rx_pkt == budget)
  1318. break;
  1319. } /* repeat while sw_comp_cons != hw_comp_cons... */
  1320. /* Update producers */
  1321. qede_update_rx_prod(edev, rxq);
  1322. rxq->rcv_pkts += rx_pkt;
  1323. return rx_pkt;
  1324. }
  1325. static int qede_poll(struct napi_struct *napi, int budget)
  1326. {
  1327. struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
  1328. napi);
  1329. struct qede_dev *edev = fp->edev;
  1330. int rx_work_done = 0;
  1331. u8 tc;
  1332. for (tc = 0; tc < edev->num_tc; tc++)
  1333. if (likely(fp->type & QEDE_FASTPATH_TX) &&
  1334. qede_txq_has_work(&fp->txqs[tc]))
  1335. qede_tx_int(edev, &fp->txqs[tc]);
  1336. rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
  1337. qede_has_rx_work(fp->rxq)) ?
  1338. qede_rx_int(fp, budget) : 0;
  1339. if (rx_work_done < budget) {
  1340. qed_sb_update_sb_idx(fp->sb_info);
  1341. /* *_has_*_work() reads the status block,
  1342. * thus we need to ensure that status block indices
  1343. * have been actually read (qed_sb_update_sb_idx)
  1344. * prior to this check (*_has_*_work) so that
  1345. * we won't write the "newer" value of the status block
  1346. * to HW (if there was a DMA right after
  1347. * qede_has_rx_work and if there is no rmb, the memory
  1348. * reading (qed_sb_update_sb_idx) may be postponed
  1349. * to right before *_ack_sb). In this case there
  1350. * will never be another interrupt until there is
  1351. * another update of the status block, while there
  1352. * is still unhandled work.
  1353. */
  1354. rmb();
  1355. /* Fall out from the NAPI loop if needed */
  1356. if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
  1357. qede_has_rx_work(fp->rxq)) ||
  1358. (likely(fp->type & QEDE_FASTPATH_TX) &&
  1359. qede_has_tx_work(fp)))) {
  1360. napi_complete(napi);
  1361. /* Update and reenable interrupts */
  1362. qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
  1363. 1 /*update*/);
  1364. } else {
  1365. rx_work_done = budget;
  1366. }
  1367. }
  1368. return rx_work_done;
  1369. }
  1370. static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
  1371. {
  1372. struct qede_fastpath *fp = fp_cookie;
  1373. qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
  1374. napi_schedule_irqoff(&fp->napi);
  1375. return IRQ_HANDLED;
  1376. }
  1377. /* -------------------------------------------------------------------------
  1378. * END OF FAST-PATH
  1379. * -------------------------------------------------------------------------
  1380. */
  1381. static int qede_open(struct net_device *ndev);
  1382. static int qede_close(struct net_device *ndev);
  1383. static int qede_set_mac_addr(struct net_device *ndev, void *p);
  1384. static void qede_set_rx_mode(struct net_device *ndev);
  1385. static void qede_config_rx_mode(struct net_device *ndev);
  1386. static int qede_set_ucast_rx_mac(struct qede_dev *edev,
  1387. enum qed_filter_xcast_params_type opcode,
  1388. unsigned char mac[ETH_ALEN])
  1389. {
  1390. struct qed_filter_params filter_cmd;
  1391. memset(&filter_cmd, 0, sizeof(filter_cmd));
  1392. filter_cmd.type = QED_FILTER_TYPE_UCAST;
  1393. filter_cmd.filter.ucast.type = opcode;
  1394. filter_cmd.filter.ucast.mac_valid = 1;
  1395. ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
  1396. return edev->ops->filter_config(edev->cdev, &filter_cmd);
  1397. }
  1398. static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
  1399. enum qed_filter_xcast_params_type opcode,
  1400. u16 vid)
  1401. {
  1402. struct qed_filter_params filter_cmd;
  1403. memset(&filter_cmd, 0, sizeof(filter_cmd));
  1404. filter_cmd.type = QED_FILTER_TYPE_UCAST;
  1405. filter_cmd.filter.ucast.type = opcode;
  1406. filter_cmd.filter.ucast.vlan_valid = 1;
  1407. filter_cmd.filter.ucast.vlan = vid;
  1408. return edev->ops->filter_config(edev->cdev, &filter_cmd);
  1409. }
  1410. void qede_fill_by_demand_stats(struct qede_dev *edev)
  1411. {
  1412. struct qed_eth_stats stats;
  1413. edev->ops->get_vport_stats(edev->cdev, &stats);
  1414. edev->stats.no_buff_discards = stats.no_buff_discards;
  1415. edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
  1416. edev->stats.ttl0_discard = stats.ttl0_discard;
  1417. edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
  1418. edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
  1419. edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
  1420. edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
  1421. edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
  1422. edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
  1423. edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
  1424. edev->stats.mac_filter_discards = stats.mac_filter_discards;
  1425. edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
  1426. edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
  1427. edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
  1428. edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
  1429. edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
  1430. edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
  1431. edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
  1432. edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
  1433. edev->stats.coalesced_events = stats.tpa_coalesced_events;
  1434. edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
  1435. edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
  1436. edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
  1437. edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
  1438. edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
  1439. edev->stats.rx_128_to_255_byte_packets =
  1440. stats.rx_128_to_255_byte_packets;
  1441. edev->stats.rx_256_to_511_byte_packets =
  1442. stats.rx_256_to_511_byte_packets;
  1443. edev->stats.rx_512_to_1023_byte_packets =
  1444. stats.rx_512_to_1023_byte_packets;
  1445. edev->stats.rx_1024_to_1518_byte_packets =
  1446. stats.rx_1024_to_1518_byte_packets;
  1447. edev->stats.rx_1519_to_1522_byte_packets =
  1448. stats.rx_1519_to_1522_byte_packets;
  1449. edev->stats.rx_1519_to_2047_byte_packets =
  1450. stats.rx_1519_to_2047_byte_packets;
  1451. edev->stats.rx_2048_to_4095_byte_packets =
  1452. stats.rx_2048_to_4095_byte_packets;
  1453. edev->stats.rx_4096_to_9216_byte_packets =
  1454. stats.rx_4096_to_9216_byte_packets;
  1455. edev->stats.rx_9217_to_16383_byte_packets =
  1456. stats.rx_9217_to_16383_byte_packets;
  1457. edev->stats.rx_crc_errors = stats.rx_crc_errors;
  1458. edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
  1459. edev->stats.rx_pause_frames = stats.rx_pause_frames;
  1460. edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
  1461. edev->stats.rx_align_errors = stats.rx_align_errors;
  1462. edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
  1463. edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
  1464. edev->stats.rx_jabbers = stats.rx_jabbers;
  1465. edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
  1466. edev->stats.rx_fragments = stats.rx_fragments;
  1467. edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
  1468. edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
  1469. edev->stats.tx_128_to_255_byte_packets =
  1470. stats.tx_128_to_255_byte_packets;
  1471. edev->stats.tx_256_to_511_byte_packets =
  1472. stats.tx_256_to_511_byte_packets;
  1473. edev->stats.tx_512_to_1023_byte_packets =
  1474. stats.tx_512_to_1023_byte_packets;
  1475. edev->stats.tx_1024_to_1518_byte_packets =
  1476. stats.tx_1024_to_1518_byte_packets;
  1477. edev->stats.tx_1519_to_2047_byte_packets =
  1478. stats.tx_1519_to_2047_byte_packets;
  1479. edev->stats.tx_2048_to_4095_byte_packets =
  1480. stats.tx_2048_to_4095_byte_packets;
  1481. edev->stats.tx_4096_to_9216_byte_packets =
  1482. stats.tx_4096_to_9216_byte_packets;
  1483. edev->stats.tx_9217_to_16383_byte_packets =
  1484. stats.tx_9217_to_16383_byte_packets;
  1485. edev->stats.tx_pause_frames = stats.tx_pause_frames;
  1486. edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
  1487. edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
  1488. edev->stats.tx_total_collisions = stats.tx_total_collisions;
  1489. edev->stats.brb_truncates = stats.brb_truncates;
  1490. edev->stats.brb_discards = stats.brb_discards;
  1491. edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
  1492. }
  1493. static
  1494. struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
  1495. struct rtnl_link_stats64 *stats)
  1496. {
  1497. struct qede_dev *edev = netdev_priv(dev);
  1498. qede_fill_by_demand_stats(edev);
  1499. stats->rx_packets = edev->stats.rx_ucast_pkts +
  1500. edev->stats.rx_mcast_pkts +
  1501. edev->stats.rx_bcast_pkts;
  1502. stats->tx_packets = edev->stats.tx_ucast_pkts +
  1503. edev->stats.tx_mcast_pkts +
  1504. edev->stats.tx_bcast_pkts;
  1505. stats->rx_bytes = edev->stats.rx_ucast_bytes +
  1506. edev->stats.rx_mcast_bytes +
  1507. edev->stats.rx_bcast_bytes;
  1508. stats->tx_bytes = edev->stats.tx_ucast_bytes +
  1509. edev->stats.tx_mcast_bytes +
  1510. edev->stats.tx_bcast_bytes;
  1511. stats->tx_errors = edev->stats.tx_err_drop_pkts;
  1512. stats->multicast = edev->stats.rx_mcast_pkts +
  1513. edev->stats.rx_bcast_pkts;
  1514. stats->rx_fifo_errors = edev->stats.no_buff_discards;
  1515. stats->collisions = edev->stats.tx_total_collisions;
  1516. stats->rx_crc_errors = edev->stats.rx_crc_errors;
  1517. stats->rx_frame_errors = edev->stats.rx_align_errors;
  1518. return stats;
  1519. }
  1520. #ifdef CONFIG_QED_SRIOV
  1521. static int qede_get_vf_config(struct net_device *dev, int vfidx,
  1522. struct ifla_vf_info *ivi)
  1523. {
  1524. struct qede_dev *edev = netdev_priv(dev);
  1525. if (!edev->ops)
  1526. return -EINVAL;
  1527. return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
  1528. }
  1529. static int qede_set_vf_rate(struct net_device *dev, int vfidx,
  1530. int min_tx_rate, int max_tx_rate)
  1531. {
  1532. struct qede_dev *edev = netdev_priv(dev);
  1533. return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
  1534. max_tx_rate);
  1535. }
  1536. static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
  1537. {
  1538. struct qede_dev *edev = netdev_priv(dev);
  1539. if (!edev->ops)
  1540. return -EINVAL;
  1541. return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
  1542. }
  1543. static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
  1544. int link_state)
  1545. {
  1546. struct qede_dev *edev = netdev_priv(dev);
  1547. if (!edev->ops)
  1548. return -EINVAL;
  1549. return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
  1550. }
  1551. #endif
  1552. static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
  1553. {
  1554. struct qed_update_vport_params params;
  1555. int rc;
  1556. /* Proceed only if action actually needs to be performed */
  1557. if (edev->accept_any_vlan == action)
  1558. return;
  1559. memset(&params, 0, sizeof(params));
  1560. params.vport_id = 0;
  1561. params.accept_any_vlan = action;
  1562. params.update_accept_any_vlan_flg = 1;
  1563. rc = edev->ops->vport_update(edev->cdev, &params);
  1564. if (rc) {
  1565. DP_ERR(edev, "Failed to %s accept-any-vlan\n",
  1566. action ? "enable" : "disable");
  1567. } else {
  1568. DP_INFO(edev, "%s accept-any-vlan\n",
  1569. action ? "enabled" : "disabled");
  1570. edev->accept_any_vlan = action;
  1571. }
  1572. }
  1573. static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
  1574. {
  1575. struct qede_dev *edev = netdev_priv(dev);
  1576. struct qede_vlan *vlan, *tmp;
  1577. int rc;
  1578. DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
  1579. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  1580. if (!vlan) {
  1581. DP_INFO(edev, "Failed to allocate struct for vlan\n");
  1582. return -ENOMEM;
  1583. }
  1584. INIT_LIST_HEAD(&vlan->list);
  1585. vlan->vid = vid;
  1586. vlan->configured = false;
  1587. /* Verify vlan isn't already configured */
  1588. list_for_each_entry(tmp, &edev->vlan_list, list) {
  1589. if (tmp->vid == vlan->vid) {
  1590. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  1591. "vlan already configured\n");
  1592. kfree(vlan);
  1593. return -EEXIST;
  1594. }
  1595. }
  1596. /* If interface is down, cache this VLAN ID and return */
  1597. if (edev->state != QEDE_STATE_OPEN) {
  1598. DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
  1599. "Interface is down, VLAN %d will be configured when interface is up\n",
  1600. vid);
  1601. if (vid != 0)
  1602. edev->non_configured_vlans++;
  1603. list_add(&vlan->list, &edev->vlan_list);
  1604. return 0;
  1605. }
  1606. /* Check for the filter limit.
  1607. * Note - vlan0 has a reserved filter and can be added without
  1608. * worrying about quota
  1609. */
  1610. if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
  1611. (vlan->vid == 0)) {
  1612. rc = qede_set_ucast_rx_vlan(edev,
  1613. QED_FILTER_XCAST_TYPE_ADD,
  1614. vlan->vid);
  1615. if (rc) {
  1616. DP_ERR(edev, "Failed to configure VLAN %d\n",
  1617. vlan->vid);
  1618. kfree(vlan);
  1619. return -EINVAL;
  1620. }
  1621. vlan->configured = true;
  1622. /* vlan0 filter isn't consuming out of our quota */
  1623. if (vlan->vid != 0)
  1624. edev->configured_vlans++;
  1625. } else {
  1626. /* Out of quota; Activate accept-any-VLAN mode */
  1627. if (!edev->non_configured_vlans)
  1628. qede_config_accept_any_vlan(edev, true);
  1629. edev->non_configured_vlans++;
  1630. }
  1631. list_add(&vlan->list, &edev->vlan_list);
  1632. return 0;
  1633. }
  1634. static void qede_del_vlan_from_list(struct qede_dev *edev,
  1635. struct qede_vlan *vlan)
  1636. {
  1637. /* vlan0 filter isn't consuming out of our quota */
  1638. if (vlan->vid != 0) {
  1639. if (vlan->configured)
  1640. edev->configured_vlans--;
  1641. else
  1642. edev->non_configured_vlans--;
  1643. }
  1644. list_del(&vlan->list);
  1645. kfree(vlan);
  1646. }
  1647. static int qede_configure_vlan_filters(struct qede_dev *edev)
  1648. {
  1649. int rc = 0, real_rc = 0, accept_any_vlan = 0;
  1650. struct qed_dev_eth_info *dev_info;
  1651. struct qede_vlan *vlan = NULL;
  1652. if (list_empty(&edev->vlan_list))
  1653. return 0;
  1654. dev_info = &edev->dev_info;
  1655. /* Configure non-configured vlans */
  1656. list_for_each_entry(vlan, &edev->vlan_list, list) {
  1657. if (vlan->configured)
  1658. continue;
  1659. /* We have used all our credits, now enable accept_any_vlan */
  1660. if ((vlan->vid != 0) &&
  1661. (edev->configured_vlans == dev_info->num_vlan_filters)) {
  1662. accept_any_vlan = 1;
  1663. continue;
  1664. }
  1665. DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
  1666. rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
  1667. vlan->vid);
  1668. if (rc) {
  1669. DP_ERR(edev, "Failed to configure VLAN %u\n",
  1670. vlan->vid);
  1671. real_rc = rc;
  1672. continue;
  1673. }
  1674. vlan->configured = true;
  1675. /* vlan0 filter doesn't consume our VLAN filter's quota */
  1676. if (vlan->vid != 0) {
  1677. edev->non_configured_vlans--;
  1678. edev->configured_vlans++;
  1679. }
  1680. }
  1681. /* enable accept_any_vlan mode if we have more VLANs than credits,
  1682. * or remove accept_any_vlan mode if we've actually removed
  1683. * a non-configured vlan, and all remaining vlans are truly configured.
  1684. */
  1685. if (accept_any_vlan)
  1686. qede_config_accept_any_vlan(edev, true);
  1687. else if (!edev->non_configured_vlans)
  1688. qede_config_accept_any_vlan(edev, false);
  1689. return real_rc;
  1690. }
  1691. static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
  1692. {
  1693. struct qede_dev *edev = netdev_priv(dev);
  1694. struct qede_vlan *vlan = NULL;
  1695. int rc;
  1696. DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
  1697. /* Find whether entry exists */
  1698. list_for_each_entry(vlan, &edev->vlan_list, list)
  1699. if (vlan->vid == vid)
  1700. break;
  1701. if (!vlan || (vlan->vid != vid)) {
  1702. DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
  1703. "Vlan isn't configured\n");
  1704. return 0;
  1705. }
  1706. if (edev->state != QEDE_STATE_OPEN) {
  1707. /* As interface is already down, we don't have a VPORT
  1708. * instance to remove vlan filter. So just update vlan list
  1709. */
  1710. DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
  1711. "Interface is down, removing VLAN from list only\n");
  1712. qede_del_vlan_from_list(edev, vlan);
  1713. return 0;
  1714. }
  1715. /* Remove vlan */
  1716. if (vlan->configured) {
  1717. rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
  1718. vid);
  1719. if (rc) {
  1720. DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
  1721. return -EINVAL;
  1722. }
  1723. }
  1724. qede_del_vlan_from_list(edev, vlan);
  1725. /* We have removed a VLAN - try to see if we can
  1726. * configure non-configured VLAN from the list.
  1727. */
  1728. rc = qede_configure_vlan_filters(edev);
  1729. return rc;
  1730. }
  1731. static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
  1732. {
  1733. struct qede_vlan *vlan = NULL;
  1734. if (list_empty(&edev->vlan_list))
  1735. return;
  1736. list_for_each_entry(vlan, &edev->vlan_list, list) {
  1737. if (!vlan->configured)
  1738. continue;
  1739. vlan->configured = false;
  1740. /* vlan0 filter isn't consuming out of our quota */
  1741. if (vlan->vid != 0) {
  1742. edev->non_configured_vlans++;
  1743. edev->configured_vlans--;
  1744. }
  1745. DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
  1746. "marked vlan %d as non-configured\n", vlan->vid);
  1747. }
  1748. edev->accept_any_vlan = false;
  1749. }
  1750. static int qede_set_features(struct net_device *dev, netdev_features_t features)
  1751. {
  1752. struct qede_dev *edev = netdev_priv(dev);
  1753. netdev_features_t changes = features ^ dev->features;
  1754. bool need_reload = false;
  1755. /* No action needed if hardware GRO is disabled during driver load */
  1756. if (changes & NETIF_F_GRO) {
  1757. if (dev->features & NETIF_F_GRO)
  1758. need_reload = !edev->gro_disable;
  1759. else
  1760. need_reload = edev->gro_disable;
  1761. }
  1762. if (need_reload && netif_running(edev->ndev)) {
  1763. dev->features = features;
  1764. qede_reload(edev, NULL, NULL);
  1765. return 1;
  1766. }
  1767. return 0;
  1768. }
  1769. static void qede_udp_tunnel_add(struct net_device *dev,
  1770. struct udp_tunnel_info *ti)
  1771. {
  1772. struct qede_dev *edev = netdev_priv(dev);
  1773. u16 t_port = ntohs(ti->port);
  1774. switch (ti->type) {
  1775. case UDP_TUNNEL_TYPE_VXLAN:
  1776. if (edev->vxlan_dst_port)
  1777. return;
  1778. edev->vxlan_dst_port = t_port;
  1779. DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
  1780. t_port);
  1781. set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
  1782. break;
  1783. case UDP_TUNNEL_TYPE_GENEVE:
  1784. if (edev->geneve_dst_port)
  1785. return;
  1786. edev->geneve_dst_port = t_port;
  1787. DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
  1788. t_port);
  1789. set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
  1790. break;
  1791. default:
  1792. return;
  1793. }
  1794. schedule_delayed_work(&edev->sp_task, 0);
  1795. }
  1796. static void qede_udp_tunnel_del(struct net_device *dev,
  1797. struct udp_tunnel_info *ti)
  1798. {
  1799. struct qede_dev *edev = netdev_priv(dev);
  1800. u16 t_port = ntohs(ti->port);
  1801. switch (ti->type) {
  1802. case UDP_TUNNEL_TYPE_VXLAN:
  1803. if (t_port != edev->vxlan_dst_port)
  1804. return;
  1805. edev->vxlan_dst_port = 0;
  1806. DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
  1807. t_port);
  1808. set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
  1809. break;
  1810. case UDP_TUNNEL_TYPE_GENEVE:
  1811. if (t_port != edev->geneve_dst_port)
  1812. return;
  1813. edev->geneve_dst_port = 0;
  1814. DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
  1815. t_port);
  1816. set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
  1817. break;
  1818. default:
  1819. return;
  1820. }
  1821. schedule_delayed_work(&edev->sp_task, 0);
  1822. }
  1823. static const struct net_device_ops qede_netdev_ops = {
  1824. .ndo_open = qede_open,
  1825. .ndo_stop = qede_close,
  1826. .ndo_start_xmit = qede_start_xmit,
  1827. .ndo_set_rx_mode = qede_set_rx_mode,
  1828. .ndo_set_mac_address = qede_set_mac_addr,
  1829. .ndo_validate_addr = eth_validate_addr,
  1830. .ndo_change_mtu = qede_change_mtu,
  1831. #ifdef CONFIG_QED_SRIOV
  1832. .ndo_set_vf_mac = qede_set_vf_mac,
  1833. .ndo_set_vf_vlan = qede_set_vf_vlan,
  1834. #endif
  1835. .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
  1836. .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
  1837. .ndo_set_features = qede_set_features,
  1838. .ndo_get_stats64 = qede_get_stats64,
  1839. #ifdef CONFIG_QED_SRIOV
  1840. .ndo_set_vf_link_state = qede_set_vf_link_state,
  1841. .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
  1842. .ndo_get_vf_config = qede_get_vf_config,
  1843. .ndo_set_vf_rate = qede_set_vf_rate,
  1844. #endif
  1845. .ndo_udp_tunnel_add = qede_udp_tunnel_add,
  1846. .ndo_udp_tunnel_del = qede_udp_tunnel_del,
  1847. };
  1848. /* -------------------------------------------------------------------------
  1849. * START OF PROBE / REMOVE
  1850. * -------------------------------------------------------------------------
  1851. */
  1852. static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
  1853. struct pci_dev *pdev,
  1854. struct qed_dev_eth_info *info,
  1855. u32 dp_module, u8 dp_level)
  1856. {
  1857. struct net_device *ndev;
  1858. struct qede_dev *edev;
  1859. ndev = alloc_etherdev_mqs(sizeof(*edev),
  1860. info->num_queues, info->num_queues);
  1861. if (!ndev) {
  1862. pr_err("etherdev allocation failed\n");
  1863. return NULL;
  1864. }
  1865. edev = netdev_priv(ndev);
  1866. edev->ndev = ndev;
  1867. edev->cdev = cdev;
  1868. edev->pdev = pdev;
  1869. edev->dp_module = dp_module;
  1870. edev->dp_level = dp_level;
  1871. edev->ops = qed_ops;
  1872. edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
  1873. edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
  1874. DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
  1875. info->num_queues, info->num_queues);
  1876. SET_NETDEV_DEV(ndev, &pdev->dev);
  1877. memset(&edev->stats, 0, sizeof(edev->stats));
  1878. memcpy(&edev->dev_info, info, sizeof(*info));
  1879. edev->num_tc = edev->dev_info.num_tc;
  1880. INIT_LIST_HEAD(&edev->vlan_list);
  1881. return edev;
  1882. }
  1883. static void qede_init_ndev(struct qede_dev *edev)
  1884. {
  1885. struct net_device *ndev = edev->ndev;
  1886. struct pci_dev *pdev = edev->pdev;
  1887. u32 hw_features;
  1888. pci_set_drvdata(pdev, ndev);
  1889. ndev->mem_start = edev->dev_info.common.pci_mem_start;
  1890. ndev->base_addr = ndev->mem_start;
  1891. ndev->mem_end = edev->dev_info.common.pci_mem_end;
  1892. ndev->irq = edev->dev_info.common.pci_irq;
  1893. ndev->watchdog_timeo = TX_TIMEOUT;
  1894. ndev->netdev_ops = &qede_netdev_ops;
  1895. qede_set_ethtool_ops(ndev);
  1896. /* user-changeble features */
  1897. hw_features = NETIF_F_GRO | NETIF_F_SG |
  1898. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1899. NETIF_F_TSO | NETIF_F_TSO6;
  1900. /* Encap features*/
  1901. hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
  1902. NETIF_F_TSO_ECN;
  1903. ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1904. NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
  1905. NETIF_F_TSO6 | NETIF_F_GSO_GRE |
  1906. NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
  1907. ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  1908. NETIF_F_HIGHDMA;
  1909. ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
  1910. NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
  1911. NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
  1912. ndev->hw_features = hw_features;
  1913. /* Set network device HW mac */
  1914. ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
  1915. }
  1916. /* This function converts from 32b param to two params of level and module
  1917. * Input 32b decoding:
  1918. * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
  1919. * 'happy' flow, e.g. memory allocation failed.
  1920. * b30 - enable all INFO prints. INFO prints are for major steps in the flow
  1921. * and provide important parameters.
  1922. * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
  1923. * module. VERBOSE prints are for tracking the specific flow in low level.
  1924. *
  1925. * Notice that the level should be that of the lowest required logs.
  1926. */
  1927. void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
  1928. {
  1929. *p_dp_level = QED_LEVEL_NOTICE;
  1930. *p_dp_module = 0;
  1931. if (debug & QED_LOG_VERBOSE_MASK) {
  1932. *p_dp_level = QED_LEVEL_VERBOSE;
  1933. *p_dp_module = (debug & 0x3FFFFFFF);
  1934. } else if (debug & QED_LOG_INFO_MASK) {
  1935. *p_dp_level = QED_LEVEL_INFO;
  1936. } else if (debug & QED_LOG_NOTICE_MASK) {
  1937. *p_dp_level = QED_LEVEL_NOTICE;
  1938. }
  1939. }
  1940. static void qede_free_fp_array(struct qede_dev *edev)
  1941. {
  1942. if (edev->fp_array) {
  1943. struct qede_fastpath *fp;
  1944. int i;
  1945. for_each_queue(i) {
  1946. fp = &edev->fp_array[i];
  1947. kfree(fp->sb_info);
  1948. kfree(fp->rxq);
  1949. kfree(fp->txqs);
  1950. }
  1951. kfree(edev->fp_array);
  1952. }
  1953. edev->num_queues = 0;
  1954. edev->fp_num_tx = 0;
  1955. edev->fp_num_rx = 0;
  1956. }
  1957. static int qede_alloc_fp_array(struct qede_dev *edev)
  1958. {
  1959. u8 fp_combined, fp_rx = edev->fp_num_rx;
  1960. struct qede_fastpath *fp;
  1961. int i;
  1962. edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
  1963. sizeof(*edev->fp_array), GFP_KERNEL);
  1964. if (!edev->fp_array) {
  1965. DP_NOTICE(edev, "fp array allocation failed\n");
  1966. goto err;
  1967. }
  1968. fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
  1969. /* Allocate the FP elements for Rx queues followed by combined and then
  1970. * the Tx. This ordering should be maintained so that the respective
  1971. * queues (Rx or Tx) will be together in the fastpath array and the
  1972. * associated ids will be sequential.
  1973. */
  1974. for_each_queue(i) {
  1975. fp = &edev->fp_array[i];
  1976. fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
  1977. if (!fp->sb_info) {
  1978. DP_NOTICE(edev, "sb info struct allocation failed\n");
  1979. goto err;
  1980. }
  1981. if (fp_rx) {
  1982. fp->type = QEDE_FASTPATH_RX;
  1983. fp_rx--;
  1984. } else if (fp_combined) {
  1985. fp->type = QEDE_FASTPATH_COMBINED;
  1986. fp_combined--;
  1987. } else {
  1988. fp->type = QEDE_FASTPATH_TX;
  1989. }
  1990. if (fp->type & QEDE_FASTPATH_TX) {
  1991. fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
  1992. GFP_KERNEL);
  1993. if (!fp->txqs) {
  1994. DP_NOTICE(edev,
  1995. "TXQ array allocation failed\n");
  1996. goto err;
  1997. }
  1998. }
  1999. if (fp->type & QEDE_FASTPATH_RX) {
  2000. fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
  2001. if (!fp->rxq) {
  2002. DP_NOTICE(edev,
  2003. "RXQ struct allocation failed\n");
  2004. goto err;
  2005. }
  2006. }
  2007. }
  2008. return 0;
  2009. err:
  2010. qede_free_fp_array(edev);
  2011. return -ENOMEM;
  2012. }
  2013. static void qede_sp_task(struct work_struct *work)
  2014. {
  2015. struct qede_dev *edev = container_of(work, struct qede_dev,
  2016. sp_task.work);
  2017. struct qed_dev *cdev = edev->cdev;
  2018. mutex_lock(&edev->qede_lock);
  2019. if (edev->state == QEDE_STATE_OPEN) {
  2020. if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
  2021. qede_config_rx_mode(edev->ndev);
  2022. }
  2023. if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
  2024. struct qed_tunn_params tunn_params;
  2025. memset(&tunn_params, 0, sizeof(tunn_params));
  2026. tunn_params.update_vxlan_port = 1;
  2027. tunn_params.vxlan_port = edev->vxlan_dst_port;
  2028. qed_ops->tunn_config(cdev, &tunn_params);
  2029. }
  2030. if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
  2031. struct qed_tunn_params tunn_params;
  2032. memset(&tunn_params, 0, sizeof(tunn_params));
  2033. tunn_params.update_geneve_port = 1;
  2034. tunn_params.geneve_port = edev->geneve_dst_port;
  2035. qed_ops->tunn_config(cdev, &tunn_params);
  2036. }
  2037. mutex_unlock(&edev->qede_lock);
  2038. }
  2039. static void qede_update_pf_params(struct qed_dev *cdev)
  2040. {
  2041. struct qed_pf_params pf_params;
  2042. /* 64 rx + 64 tx */
  2043. memset(&pf_params, 0, sizeof(struct qed_pf_params));
  2044. pf_params.eth_pf_params.num_cons = 128;
  2045. qed_ops->common->update_pf_params(cdev, &pf_params);
  2046. }
  2047. enum qede_probe_mode {
  2048. QEDE_PROBE_NORMAL,
  2049. };
  2050. static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
  2051. bool is_vf, enum qede_probe_mode mode)
  2052. {
  2053. struct qed_probe_params probe_params;
  2054. struct qed_slowpath_params sp_params;
  2055. struct qed_dev_eth_info dev_info;
  2056. struct qede_dev *edev;
  2057. struct qed_dev *cdev;
  2058. int rc;
  2059. if (unlikely(dp_level & QED_LEVEL_INFO))
  2060. pr_notice("Starting qede probe\n");
  2061. memset(&probe_params, 0, sizeof(probe_params));
  2062. probe_params.protocol = QED_PROTOCOL_ETH;
  2063. probe_params.dp_module = dp_module;
  2064. probe_params.dp_level = dp_level;
  2065. probe_params.is_vf = is_vf;
  2066. cdev = qed_ops->common->probe(pdev, &probe_params);
  2067. if (!cdev) {
  2068. rc = -ENODEV;
  2069. goto err0;
  2070. }
  2071. qede_update_pf_params(cdev);
  2072. /* Start the Slowpath-process */
  2073. memset(&sp_params, 0, sizeof(sp_params));
  2074. sp_params.int_mode = QED_INT_MODE_MSIX;
  2075. sp_params.drv_major = QEDE_MAJOR_VERSION;
  2076. sp_params.drv_minor = QEDE_MINOR_VERSION;
  2077. sp_params.drv_rev = QEDE_REVISION_VERSION;
  2078. sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
  2079. strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
  2080. rc = qed_ops->common->slowpath_start(cdev, &sp_params);
  2081. if (rc) {
  2082. pr_notice("Cannot start slowpath\n");
  2083. goto err1;
  2084. }
  2085. /* Learn information crucial for qede to progress */
  2086. rc = qed_ops->fill_dev_info(cdev, &dev_info);
  2087. if (rc)
  2088. goto err2;
  2089. edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
  2090. dp_level);
  2091. if (!edev) {
  2092. rc = -ENOMEM;
  2093. goto err2;
  2094. }
  2095. if (is_vf)
  2096. edev->flags |= QEDE_FLAG_IS_VF;
  2097. qede_init_ndev(edev);
  2098. rc = qede_roce_dev_add(edev);
  2099. if (rc)
  2100. goto err3;
  2101. rc = register_netdev(edev->ndev);
  2102. if (rc) {
  2103. DP_NOTICE(edev, "Cannot register net-device\n");
  2104. goto err4;
  2105. }
  2106. edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
  2107. edev->ops->register_ops(cdev, &qede_ll_ops, edev);
  2108. #ifdef CONFIG_DCB
  2109. if (!IS_VF(edev))
  2110. qede_set_dcbnl_ops(edev->ndev);
  2111. #endif
  2112. INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
  2113. mutex_init(&edev->qede_lock);
  2114. edev->rx_copybreak = QEDE_RX_HDR_SIZE;
  2115. DP_INFO(edev, "Ending successfully qede probe\n");
  2116. return 0;
  2117. err4:
  2118. qede_roce_dev_remove(edev);
  2119. err3:
  2120. free_netdev(edev->ndev);
  2121. err2:
  2122. qed_ops->common->slowpath_stop(cdev);
  2123. err1:
  2124. qed_ops->common->remove(cdev);
  2125. err0:
  2126. return rc;
  2127. }
  2128. static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  2129. {
  2130. bool is_vf = false;
  2131. u32 dp_module = 0;
  2132. u8 dp_level = 0;
  2133. switch ((enum qede_pci_private)id->driver_data) {
  2134. case QEDE_PRIVATE_VF:
  2135. if (debug & QED_LOG_VERBOSE_MASK)
  2136. dev_err(&pdev->dev, "Probing a VF\n");
  2137. is_vf = true;
  2138. break;
  2139. default:
  2140. if (debug & QED_LOG_VERBOSE_MASK)
  2141. dev_err(&pdev->dev, "Probing a PF\n");
  2142. }
  2143. qede_config_debug(debug, &dp_module, &dp_level);
  2144. return __qede_probe(pdev, dp_module, dp_level, is_vf,
  2145. QEDE_PROBE_NORMAL);
  2146. }
  2147. enum qede_remove_mode {
  2148. QEDE_REMOVE_NORMAL,
  2149. };
  2150. static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
  2151. {
  2152. struct net_device *ndev = pci_get_drvdata(pdev);
  2153. struct qede_dev *edev = netdev_priv(ndev);
  2154. struct qed_dev *cdev = edev->cdev;
  2155. DP_INFO(edev, "Starting qede_remove\n");
  2156. cancel_delayed_work_sync(&edev->sp_task);
  2157. unregister_netdev(ndev);
  2158. qede_roce_dev_remove(edev);
  2159. edev->ops->common->set_power_state(cdev, PCI_D0);
  2160. pci_set_drvdata(pdev, NULL);
  2161. free_netdev(ndev);
  2162. /* Use global ops since we've freed edev */
  2163. qed_ops->common->slowpath_stop(cdev);
  2164. qed_ops->common->remove(cdev);
  2165. dev_info(&pdev->dev, "Ending qede_remove successfully\n");
  2166. }
  2167. static void qede_remove(struct pci_dev *pdev)
  2168. {
  2169. __qede_remove(pdev, QEDE_REMOVE_NORMAL);
  2170. }
  2171. /* -------------------------------------------------------------------------
  2172. * START OF LOAD / UNLOAD
  2173. * -------------------------------------------------------------------------
  2174. */
  2175. static int qede_set_num_queues(struct qede_dev *edev)
  2176. {
  2177. int rc;
  2178. u16 rss_num;
  2179. /* Setup queues according to possible resources*/
  2180. if (edev->req_queues)
  2181. rss_num = edev->req_queues;
  2182. else
  2183. rss_num = netif_get_num_default_rss_queues() *
  2184. edev->dev_info.common.num_hwfns;
  2185. rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
  2186. rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
  2187. if (rc > 0) {
  2188. /* Managed to request interrupts for our queues */
  2189. edev->num_queues = rc;
  2190. DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
  2191. QEDE_QUEUE_CNT(edev), rss_num);
  2192. rc = 0;
  2193. }
  2194. edev->fp_num_tx = edev->req_num_tx;
  2195. edev->fp_num_rx = edev->req_num_rx;
  2196. return rc;
  2197. }
  2198. static void qede_free_mem_sb(struct qede_dev *edev,
  2199. struct qed_sb_info *sb_info)
  2200. {
  2201. if (sb_info->sb_virt)
  2202. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
  2203. (void *)sb_info->sb_virt, sb_info->sb_phys);
  2204. }
  2205. /* This function allocates fast-path status block memory */
  2206. static int qede_alloc_mem_sb(struct qede_dev *edev,
  2207. struct qed_sb_info *sb_info, u16 sb_id)
  2208. {
  2209. struct status_block *sb_virt;
  2210. dma_addr_t sb_phys;
  2211. int rc;
  2212. sb_virt = dma_alloc_coherent(&edev->pdev->dev,
  2213. sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
  2214. if (!sb_virt) {
  2215. DP_ERR(edev, "Status block allocation failed\n");
  2216. return -ENOMEM;
  2217. }
  2218. rc = edev->ops->common->sb_init(edev->cdev, sb_info,
  2219. sb_virt, sb_phys, sb_id,
  2220. QED_SB_TYPE_L2_QUEUE);
  2221. if (rc) {
  2222. DP_ERR(edev, "Status block initialization failed\n");
  2223. dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
  2224. sb_virt, sb_phys);
  2225. return rc;
  2226. }
  2227. return 0;
  2228. }
  2229. static void qede_free_rx_buffers(struct qede_dev *edev,
  2230. struct qede_rx_queue *rxq)
  2231. {
  2232. u16 i;
  2233. for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
  2234. struct sw_rx_data *rx_buf;
  2235. struct page *data;
  2236. rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
  2237. data = rx_buf->data;
  2238. dma_unmap_page(&edev->pdev->dev,
  2239. rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
  2240. rx_buf->data = NULL;
  2241. __free_page(data);
  2242. }
  2243. }
  2244. static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
  2245. {
  2246. int i;
  2247. if (edev->gro_disable)
  2248. return;
  2249. for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
  2250. struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
  2251. struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
  2252. if (replace_buf->data) {
  2253. dma_unmap_page(&edev->pdev->dev,
  2254. replace_buf->mapping,
  2255. PAGE_SIZE, DMA_FROM_DEVICE);
  2256. __free_page(replace_buf->data);
  2257. }
  2258. }
  2259. }
  2260. static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
  2261. {
  2262. qede_free_sge_mem(edev, rxq);
  2263. /* Free rx buffers */
  2264. qede_free_rx_buffers(edev, rxq);
  2265. /* Free the parallel SW ring */
  2266. kfree(rxq->sw_rx_ring);
  2267. /* Free the real RQ ring used by FW */
  2268. edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
  2269. edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
  2270. }
  2271. static int qede_alloc_rx_buffer(struct qede_dev *edev,
  2272. struct qede_rx_queue *rxq)
  2273. {
  2274. struct sw_rx_data *sw_rx_data;
  2275. struct eth_rx_bd *rx_bd;
  2276. dma_addr_t mapping;
  2277. struct page *data;
  2278. data = alloc_pages(GFP_ATOMIC, 0);
  2279. if (unlikely(!data)) {
  2280. DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
  2281. return -ENOMEM;
  2282. }
  2283. /* Map the entire page as it would be used
  2284. * for multiple RX buffer segment size mapping.
  2285. */
  2286. mapping = dma_map_page(&edev->pdev->dev, data, 0,
  2287. PAGE_SIZE, DMA_FROM_DEVICE);
  2288. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  2289. __free_page(data);
  2290. DP_NOTICE(edev, "Failed to map Rx buffer\n");
  2291. return -ENOMEM;
  2292. }
  2293. sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
  2294. sw_rx_data->page_offset = 0;
  2295. sw_rx_data->data = data;
  2296. sw_rx_data->mapping = mapping;
  2297. /* Advance PROD and get BD pointer */
  2298. rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
  2299. WARN_ON(!rx_bd);
  2300. rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
  2301. rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
  2302. rxq->sw_rx_prod++;
  2303. return 0;
  2304. }
  2305. static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
  2306. {
  2307. dma_addr_t mapping;
  2308. int i;
  2309. if (edev->gro_disable)
  2310. return 0;
  2311. if (edev->ndev->mtu > PAGE_SIZE) {
  2312. edev->gro_disable = 1;
  2313. return 0;
  2314. }
  2315. for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
  2316. struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
  2317. struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
  2318. replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
  2319. if (unlikely(!replace_buf->data)) {
  2320. DP_NOTICE(edev,
  2321. "Failed to allocate TPA skb pool [replacement buffer]\n");
  2322. goto err;
  2323. }
  2324. mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
  2325. PAGE_SIZE, DMA_FROM_DEVICE);
  2326. if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
  2327. DP_NOTICE(edev,
  2328. "Failed to map TPA replacement buffer\n");
  2329. goto err;
  2330. }
  2331. replace_buf->mapping = mapping;
  2332. tpa_info->replace_buf.page_offset = 0;
  2333. tpa_info->replace_buf_mapping = mapping;
  2334. tpa_info->agg_state = QEDE_AGG_STATE_NONE;
  2335. }
  2336. return 0;
  2337. err:
  2338. qede_free_sge_mem(edev, rxq);
  2339. edev->gro_disable = 1;
  2340. return -ENOMEM;
  2341. }
  2342. /* This function allocates all memory needed per Rx queue */
  2343. static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
  2344. {
  2345. int i, rc, size;
  2346. rxq->num_rx_buffers = edev->q_num_rx_buffers;
  2347. rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
  2348. if (rxq->rx_buf_size > PAGE_SIZE)
  2349. rxq->rx_buf_size = PAGE_SIZE;
  2350. /* Segment size to spilt a page in multiple equal parts */
  2351. rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
  2352. /* Allocate the parallel driver ring for Rx buffers */
  2353. size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
  2354. rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
  2355. if (!rxq->sw_rx_ring) {
  2356. DP_ERR(edev, "Rx buffers ring allocation failed\n");
  2357. rc = -ENOMEM;
  2358. goto err;
  2359. }
  2360. /* Allocate FW Rx ring */
  2361. rc = edev->ops->common->chain_alloc(edev->cdev,
  2362. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  2363. QED_CHAIN_MODE_NEXT_PTR,
  2364. QED_CHAIN_CNT_TYPE_U16,
  2365. RX_RING_SIZE,
  2366. sizeof(struct eth_rx_bd),
  2367. &rxq->rx_bd_ring);
  2368. if (rc)
  2369. goto err;
  2370. /* Allocate FW completion ring */
  2371. rc = edev->ops->common->chain_alloc(edev->cdev,
  2372. QED_CHAIN_USE_TO_CONSUME,
  2373. QED_CHAIN_MODE_PBL,
  2374. QED_CHAIN_CNT_TYPE_U16,
  2375. RX_RING_SIZE,
  2376. sizeof(union eth_rx_cqe),
  2377. &rxq->rx_comp_ring);
  2378. if (rc)
  2379. goto err;
  2380. /* Allocate buffers for the Rx ring */
  2381. for (i = 0; i < rxq->num_rx_buffers; i++) {
  2382. rc = qede_alloc_rx_buffer(edev, rxq);
  2383. if (rc) {
  2384. DP_ERR(edev,
  2385. "Rx buffers allocation failed at index %d\n", i);
  2386. goto err;
  2387. }
  2388. }
  2389. rc = qede_alloc_sge_mem(edev, rxq);
  2390. err:
  2391. return rc;
  2392. }
  2393. static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
  2394. {
  2395. /* Free the parallel SW ring */
  2396. kfree(txq->sw_tx_ring);
  2397. /* Free the real RQ ring used by FW */
  2398. edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
  2399. }
  2400. /* This function allocates all memory needed per Tx queue */
  2401. static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
  2402. {
  2403. int size, rc;
  2404. union eth_tx_bd_types *p_virt;
  2405. txq->num_tx_buffers = edev->q_num_tx_buffers;
  2406. /* Allocate the parallel driver ring for Tx buffers */
  2407. size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
  2408. txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
  2409. if (!txq->sw_tx_ring) {
  2410. DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
  2411. goto err;
  2412. }
  2413. rc = edev->ops->common->chain_alloc(edev->cdev,
  2414. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  2415. QED_CHAIN_MODE_PBL,
  2416. QED_CHAIN_CNT_TYPE_U16,
  2417. TX_RING_SIZE,
  2418. sizeof(*p_virt), &txq->tx_pbl);
  2419. if (rc)
  2420. goto err;
  2421. return 0;
  2422. err:
  2423. qede_free_mem_txq(edev, txq);
  2424. return -ENOMEM;
  2425. }
  2426. /* This function frees all memory of a single fp */
  2427. static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  2428. {
  2429. int tc;
  2430. qede_free_mem_sb(edev, fp->sb_info);
  2431. if (fp->type & QEDE_FASTPATH_RX)
  2432. qede_free_mem_rxq(edev, fp->rxq);
  2433. if (fp->type & QEDE_FASTPATH_TX)
  2434. for (tc = 0; tc < edev->num_tc; tc++)
  2435. qede_free_mem_txq(edev, &fp->txqs[tc]);
  2436. }
  2437. /* This function allocates all memory needed for a single fp (i.e. an entity
  2438. * which contains status block, one rx queue and/or multiple per-TC tx queues.
  2439. */
  2440. static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  2441. {
  2442. int rc, tc;
  2443. rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
  2444. if (rc)
  2445. goto err;
  2446. if (fp->type & QEDE_FASTPATH_RX) {
  2447. rc = qede_alloc_mem_rxq(edev, fp->rxq);
  2448. if (rc)
  2449. goto err;
  2450. }
  2451. if (fp->type & QEDE_FASTPATH_TX) {
  2452. for (tc = 0; tc < edev->num_tc; tc++) {
  2453. rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
  2454. if (rc)
  2455. goto err;
  2456. }
  2457. }
  2458. return 0;
  2459. err:
  2460. return rc;
  2461. }
  2462. static void qede_free_mem_load(struct qede_dev *edev)
  2463. {
  2464. int i;
  2465. for_each_queue(i) {
  2466. struct qede_fastpath *fp = &edev->fp_array[i];
  2467. qede_free_mem_fp(edev, fp);
  2468. }
  2469. }
  2470. /* This function allocates all qede memory at NIC load. */
  2471. static int qede_alloc_mem_load(struct qede_dev *edev)
  2472. {
  2473. int rc = 0, queue_id;
  2474. for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
  2475. struct qede_fastpath *fp = &edev->fp_array[queue_id];
  2476. rc = qede_alloc_mem_fp(edev, fp);
  2477. if (rc) {
  2478. DP_ERR(edev,
  2479. "Failed to allocate memory for fastpath - rss id = %d\n",
  2480. queue_id);
  2481. qede_free_mem_load(edev);
  2482. return rc;
  2483. }
  2484. }
  2485. return 0;
  2486. }
  2487. /* This function inits fp content and resets the SB, RXQ and TXQ structures */
  2488. static void qede_init_fp(struct qede_dev *edev)
  2489. {
  2490. int queue_id, rxq_index = 0, txq_index = 0, tc;
  2491. struct qede_fastpath *fp;
  2492. for_each_queue(queue_id) {
  2493. fp = &edev->fp_array[queue_id];
  2494. fp->edev = edev;
  2495. fp->id = queue_id;
  2496. memset((void *)&fp->napi, 0, sizeof(fp->napi));
  2497. memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
  2498. if (fp->type & QEDE_FASTPATH_RX) {
  2499. memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
  2500. fp->rxq->rxq_id = rxq_index++;
  2501. }
  2502. if (fp->type & QEDE_FASTPATH_TX) {
  2503. memset((void *)fp->txqs, 0,
  2504. (edev->num_tc * sizeof(*fp->txqs)));
  2505. for (tc = 0; tc < edev->num_tc; tc++) {
  2506. fp->txqs[tc].index = txq_index +
  2507. tc * QEDE_TSS_COUNT(edev);
  2508. if (edev->dev_info.is_legacy)
  2509. fp->txqs[tc].is_legacy = true;
  2510. }
  2511. txq_index++;
  2512. }
  2513. snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
  2514. edev->ndev->name, queue_id);
  2515. }
  2516. edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
  2517. }
  2518. static int qede_set_real_num_queues(struct qede_dev *edev)
  2519. {
  2520. int rc = 0;
  2521. rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
  2522. if (rc) {
  2523. DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
  2524. return rc;
  2525. }
  2526. rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
  2527. if (rc) {
  2528. DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
  2529. return rc;
  2530. }
  2531. return 0;
  2532. }
  2533. static void qede_napi_disable_remove(struct qede_dev *edev)
  2534. {
  2535. int i;
  2536. for_each_queue(i) {
  2537. napi_disable(&edev->fp_array[i].napi);
  2538. netif_napi_del(&edev->fp_array[i].napi);
  2539. }
  2540. }
  2541. static void qede_napi_add_enable(struct qede_dev *edev)
  2542. {
  2543. int i;
  2544. /* Add NAPI objects */
  2545. for_each_queue(i) {
  2546. netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
  2547. qede_poll, NAPI_POLL_WEIGHT);
  2548. napi_enable(&edev->fp_array[i].napi);
  2549. }
  2550. }
  2551. static void qede_sync_free_irqs(struct qede_dev *edev)
  2552. {
  2553. int i;
  2554. for (i = 0; i < edev->int_info.used_cnt; i++) {
  2555. if (edev->int_info.msix_cnt) {
  2556. synchronize_irq(edev->int_info.msix[i].vector);
  2557. free_irq(edev->int_info.msix[i].vector,
  2558. &edev->fp_array[i]);
  2559. } else {
  2560. edev->ops->common->simd_handler_clean(edev->cdev, i);
  2561. }
  2562. }
  2563. edev->int_info.used_cnt = 0;
  2564. }
  2565. static int qede_req_msix_irqs(struct qede_dev *edev)
  2566. {
  2567. int i, rc;
  2568. /* Sanitize number of interrupts == number of prepared RSS queues */
  2569. if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
  2570. DP_ERR(edev,
  2571. "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
  2572. QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
  2573. return -EINVAL;
  2574. }
  2575. for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
  2576. rc = request_irq(edev->int_info.msix[i].vector,
  2577. qede_msix_fp_int, 0, edev->fp_array[i].name,
  2578. &edev->fp_array[i]);
  2579. if (rc) {
  2580. DP_ERR(edev, "Request fp %d irq failed\n", i);
  2581. qede_sync_free_irqs(edev);
  2582. return rc;
  2583. }
  2584. DP_VERBOSE(edev, NETIF_MSG_INTR,
  2585. "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
  2586. edev->fp_array[i].name, i,
  2587. &edev->fp_array[i]);
  2588. edev->int_info.used_cnt++;
  2589. }
  2590. return 0;
  2591. }
  2592. static void qede_simd_fp_handler(void *cookie)
  2593. {
  2594. struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
  2595. napi_schedule_irqoff(&fp->napi);
  2596. }
  2597. static int qede_setup_irqs(struct qede_dev *edev)
  2598. {
  2599. int i, rc = 0;
  2600. /* Learn Interrupt configuration */
  2601. rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
  2602. if (rc)
  2603. return rc;
  2604. if (edev->int_info.msix_cnt) {
  2605. rc = qede_req_msix_irqs(edev);
  2606. if (rc)
  2607. return rc;
  2608. edev->ndev->irq = edev->int_info.msix[0].vector;
  2609. } else {
  2610. const struct qed_common_ops *ops;
  2611. /* qed should learn receive the RSS ids and callbacks */
  2612. ops = edev->ops->common;
  2613. for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
  2614. ops->simd_handler_config(edev->cdev,
  2615. &edev->fp_array[i], i,
  2616. qede_simd_fp_handler);
  2617. edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
  2618. }
  2619. return 0;
  2620. }
  2621. static int qede_drain_txq(struct qede_dev *edev,
  2622. struct qede_tx_queue *txq, bool allow_drain)
  2623. {
  2624. int rc, cnt = 1000;
  2625. while (txq->sw_tx_cons != txq->sw_tx_prod) {
  2626. if (!cnt) {
  2627. if (allow_drain) {
  2628. DP_NOTICE(edev,
  2629. "Tx queue[%d] is stuck, requesting MCP to drain\n",
  2630. txq->index);
  2631. rc = edev->ops->common->drain(edev->cdev);
  2632. if (rc)
  2633. return rc;
  2634. return qede_drain_txq(edev, txq, false);
  2635. }
  2636. DP_NOTICE(edev,
  2637. "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
  2638. txq->index, txq->sw_tx_prod,
  2639. txq->sw_tx_cons);
  2640. return -ENODEV;
  2641. }
  2642. cnt--;
  2643. usleep_range(1000, 2000);
  2644. barrier();
  2645. }
  2646. /* FW finished processing, wait for HW to transmit all tx packets */
  2647. usleep_range(1000, 2000);
  2648. return 0;
  2649. }
  2650. static int qede_stop_queues(struct qede_dev *edev)
  2651. {
  2652. struct qed_update_vport_params vport_update_params;
  2653. struct qed_dev *cdev = edev->cdev;
  2654. int rc, tc, i;
  2655. /* Disable the vport */
  2656. memset(&vport_update_params, 0, sizeof(vport_update_params));
  2657. vport_update_params.vport_id = 0;
  2658. vport_update_params.update_vport_active_flg = 1;
  2659. vport_update_params.vport_active_flg = 0;
  2660. vport_update_params.update_rss_flg = 0;
  2661. rc = edev->ops->vport_update(cdev, &vport_update_params);
  2662. if (rc) {
  2663. DP_ERR(edev, "Failed to update vport\n");
  2664. return rc;
  2665. }
  2666. /* Flush Tx queues. If needed, request drain from MCP */
  2667. for_each_queue(i) {
  2668. struct qede_fastpath *fp = &edev->fp_array[i];
  2669. if (fp->type & QEDE_FASTPATH_TX) {
  2670. for (tc = 0; tc < edev->num_tc; tc++) {
  2671. struct qede_tx_queue *txq = &fp->txqs[tc];
  2672. rc = qede_drain_txq(edev, txq, true);
  2673. if (rc)
  2674. return rc;
  2675. }
  2676. }
  2677. }
  2678. /* Stop all Queues in reverse order */
  2679. for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
  2680. struct qed_stop_rxq_params rx_params;
  2681. /* Stop the Tx Queue(s) */
  2682. if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
  2683. for (tc = 0; tc < edev->num_tc; tc++) {
  2684. struct qed_stop_txq_params tx_params;
  2685. u8 val;
  2686. tx_params.rss_id = i;
  2687. val = edev->fp_array[i].txqs[tc].index;
  2688. tx_params.tx_queue_id = val;
  2689. rc = edev->ops->q_tx_stop(cdev, &tx_params);
  2690. if (rc) {
  2691. DP_ERR(edev, "Failed to stop TXQ #%d\n",
  2692. tx_params.tx_queue_id);
  2693. return rc;
  2694. }
  2695. }
  2696. }
  2697. /* Stop the Rx Queue */
  2698. if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
  2699. memset(&rx_params, 0, sizeof(rx_params));
  2700. rx_params.rss_id = i;
  2701. rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
  2702. rc = edev->ops->q_rx_stop(cdev, &rx_params);
  2703. if (rc) {
  2704. DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
  2705. return rc;
  2706. }
  2707. }
  2708. }
  2709. /* Stop the vport */
  2710. rc = edev->ops->vport_stop(cdev, 0);
  2711. if (rc)
  2712. DP_ERR(edev, "Failed to stop VPORT\n");
  2713. return rc;
  2714. }
  2715. static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
  2716. {
  2717. int rc, tc, i;
  2718. int vlan_removal_en = 1;
  2719. struct qed_dev *cdev = edev->cdev;
  2720. struct qed_update_vport_params vport_update_params;
  2721. struct qed_queue_start_common_params q_params;
  2722. struct qed_dev_info *qed_info = &edev->dev_info.common;
  2723. struct qed_start_vport_params start = {0};
  2724. bool reset_rss_indir = false;
  2725. if (!edev->num_queues) {
  2726. DP_ERR(edev,
  2727. "Cannot update V-VPORT as active as there are no Rx queues\n");
  2728. return -EINVAL;
  2729. }
  2730. start.gro_enable = !edev->gro_disable;
  2731. start.mtu = edev->ndev->mtu;
  2732. start.vport_id = 0;
  2733. start.drop_ttl0 = true;
  2734. start.remove_inner_vlan = vlan_removal_en;
  2735. start.clear_stats = clear_stats;
  2736. rc = edev->ops->vport_start(cdev, &start);
  2737. if (rc) {
  2738. DP_ERR(edev, "Start V-PORT failed %d\n", rc);
  2739. return rc;
  2740. }
  2741. DP_VERBOSE(edev, NETIF_MSG_IFUP,
  2742. "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
  2743. start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
  2744. for_each_queue(i) {
  2745. struct qede_fastpath *fp = &edev->fp_array[i];
  2746. dma_addr_t p_phys_table;
  2747. u32 page_cnt;
  2748. if (fp->type & QEDE_FASTPATH_RX) {
  2749. struct qede_rx_queue *rxq = fp->rxq;
  2750. __le16 *val;
  2751. memset(&q_params, 0, sizeof(q_params));
  2752. q_params.rss_id = i;
  2753. q_params.queue_id = rxq->rxq_id;
  2754. q_params.vport_id = 0;
  2755. q_params.sb = fp->sb_info->igu_sb_id;
  2756. q_params.sb_idx = RX_PI;
  2757. p_phys_table =
  2758. qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
  2759. page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
  2760. rc = edev->ops->q_rx_start(cdev, &q_params,
  2761. rxq->rx_buf_size,
  2762. rxq->rx_bd_ring.p_phys_addr,
  2763. p_phys_table,
  2764. page_cnt,
  2765. &rxq->hw_rxq_prod_addr);
  2766. if (rc) {
  2767. DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
  2768. rc);
  2769. return rc;
  2770. }
  2771. val = &fp->sb_info->sb_virt->pi_array[RX_PI];
  2772. rxq->hw_cons_ptr = val;
  2773. qede_update_rx_prod(edev, rxq);
  2774. }
  2775. if (!(fp->type & QEDE_FASTPATH_TX))
  2776. continue;
  2777. for (tc = 0; tc < edev->num_tc; tc++) {
  2778. struct qede_tx_queue *txq = &fp->txqs[tc];
  2779. p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
  2780. page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
  2781. memset(&q_params, 0, sizeof(q_params));
  2782. q_params.rss_id = i;
  2783. q_params.queue_id = txq->index;
  2784. q_params.vport_id = 0;
  2785. q_params.sb = fp->sb_info->igu_sb_id;
  2786. q_params.sb_idx = TX_PI(tc);
  2787. rc = edev->ops->q_tx_start(cdev, &q_params,
  2788. p_phys_table, page_cnt,
  2789. &txq->doorbell_addr);
  2790. if (rc) {
  2791. DP_ERR(edev, "Start TXQ #%d failed %d\n",
  2792. txq->index, rc);
  2793. return rc;
  2794. }
  2795. txq->hw_cons_ptr =
  2796. &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
  2797. SET_FIELD(txq->tx_db.data.params,
  2798. ETH_DB_DATA_DEST, DB_DEST_XCM);
  2799. SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
  2800. DB_AGG_CMD_SET);
  2801. SET_FIELD(txq->tx_db.data.params,
  2802. ETH_DB_DATA_AGG_VAL_SEL,
  2803. DQ_XCM_ETH_TX_BD_PROD_CMD);
  2804. txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
  2805. }
  2806. }
  2807. /* Prepare and send the vport enable */
  2808. memset(&vport_update_params, 0, sizeof(vport_update_params));
  2809. vport_update_params.vport_id = start.vport_id;
  2810. vport_update_params.update_vport_active_flg = 1;
  2811. vport_update_params.vport_active_flg = 1;
  2812. if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
  2813. qed_info->tx_switching) {
  2814. vport_update_params.update_tx_switching_flg = 1;
  2815. vport_update_params.tx_switching_flg = 1;
  2816. }
  2817. /* Fill struct with RSS params */
  2818. if (QEDE_RSS_COUNT(edev) > 1) {
  2819. vport_update_params.update_rss_flg = 1;
  2820. /* Need to validate current RSS config uses valid entries */
  2821. for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
  2822. if (edev->rss_params.rss_ind_table[i] >=
  2823. QEDE_RSS_COUNT(edev)) {
  2824. reset_rss_indir = true;
  2825. break;
  2826. }
  2827. }
  2828. if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
  2829. reset_rss_indir) {
  2830. u16 val;
  2831. for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
  2832. u16 indir_val;
  2833. val = QEDE_RSS_COUNT(edev);
  2834. indir_val = ethtool_rxfh_indir_default(i, val);
  2835. edev->rss_params.rss_ind_table[i] = indir_val;
  2836. }
  2837. edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
  2838. }
  2839. if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
  2840. netdev_rss_key_fill(edev->rss_params.rss_key,
  2841. sizeof(edev->rss_params.rss_key));
  2842. edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
  2843. }
  2844. if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
  2845. edev->rss_params.rss_caps = QED_RSS_IPV4 |
  2846. QED_RSS_IPV6 |
  2847. QED_RSS_IPV4_TCP |
  2848. QED_RSS_IPV6_TCP;
  2849. edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
  2850. }
  2851. memcpy(&vport_update_params.rss_params, &edev->rss_params,
  2852. sizeof(vport_update_params.rss_params));
  2853. } else {
  2854. memset(&vport_update_params.rss_params, 0,
  2855. sizeof(vport_update_params.rss_params));
  2856. }
  2857. rc = edev->ops->vport_update(cdev, &vport_update_params);
  2858. if (rc) {
  2859. DP_ERR(edev, "Update V-PORT failed %d\n", rc);
  2860. return rc;
  2861. }
  2862. return 0;
  2863. }
  2864. static int qede_set_mcast_rx_mac(struct qede_dev *edev,
  2865. enum qed_filter_xcast_params_type opcode,
  2866. unsigned char *mac, int num_macs)
  2867. {
  2868. struct qed_filter_params filter_cmd;
  2869. int i;
  2870. memset(&filter_cmd, 0, sizeof(filter_cmd));
  2871. filter_cmd.type = QED_FILTER_TYPE_MCAST;
  2872. filter_cmd.filter.mcast.type = opcode;
  2873. filter_cmd.filter.mcast.num = num_macs;
  2874. for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
  2875. ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
  2876. return edev->ops->filter_config(edev->cdev, &filter_cmd);
  2877. }
  2878. enum qede_unload_mode {
  2879. QEDE_UNLOAD_NORMAL,
  2880. };
  2881. static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
  2882. {
  2883. struct qed_link_params link_params;
  2884. int rc;
  2885. DP_INFO(edev, "Starting qede unload\n");
  2886. qede_roce_dev_event_close(edev);
  2887. mutex_lock(&edev->qede_lock);
  2888. edev->state = QEDE_STATE_CLOSED;
  2889. /* Close OS Tx */
  2890. netif_tx_disable(edev->ndev);
  2891. netif_carrier_off(edev->ndev);
  2892. /* Reset the link */
  2893. memset(&link_params, 0, sizeof(link_params));
  2894. link_params.link_up = false;
  2895. edev->ops->common->set_link(edev->cdev, &link_params);
  2896. rc = qede_stop_queues(edev);
  2897. if (rc) {
  2898. qede_sync_free_irqs(edev);
  2899. goto out;
  2900. }
  2901. DP_INFO(edev, "Stopped Queues\n");
  2902. qede_vlan_mark_nonconfigured(edev);
  2903. edev->ops->fastpath_stop(edev->cdev);
  2904. /* Release the interrupts */
  2905. qede_sync_free_irqs(edev);
  2906. edev->ops->common->set_fp_int(edev->cdev, 0);
  2907. qede_napi_disable_remove(edev);
  2908. qede_free_mem_load(edev);
  2909. qede_free_fp_array(edev);
  2910. out:
  2911. mutex_unlock(&edev->qede_lock);
  2912. DP_INFO(edev, "Ending qede unload\n");
  2913. }
  2914. enum qede_load_mode {
  2915. QEDE_LOAD_NORMAL,
  2916. QEDE_LOAD_RELOAD,
  2917. };
  2918. static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
  2919. {
  2920. struct qed_link_params link_params;
  2921. struct qed_link_output link_output;
  2922. int rc;
  2923. DP_INFO(edev, "Starting qede load\n");
  2924. rc = qede_set_num_queues(edev);
  2925. if (rc)
  2926. goto err0;
  2927. rc = qede_alloc_fp_array(edev);
  2928. if (rc)
  2929. goto err0;
  2930. qede_init_fp(edev);
  2931. rc = qede_alloc_mem_load(edev);
  2932. if (rc)
  2933. goto err1;
  2934. DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
  2935. QEDE_QUEUE_CNT(edev), edev->num_tc);
  2936. rc = qede_set_real_num_queues(edev);
  2937. if (rc)
  2938. goto err2;
  2939. qede_napi_add_enable(edev);
  2940. DP_INFO(edev, "Napi added and enabled\n");
  2941. rc = qede_setup_irqs(edev);
  2942. if (rc)
  2943. goto err3;
  2944. DP_INFO(edev, "Setup IRQs succeeded\n");
  2945. rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
  2946. if (rc)
  2947. goto err4;
  2948. DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
  2949. /* Add primary mac and set Rx filters */
  2950. ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
  2951. mutex_lock(&edev->qede_lock);
  2952. edev->state = QEDE_STATE_OPEN;
  2953. mutex_unlock(&edev->qede_lock);
  2954. /* Program un-configured VLANs */
  2955. qede_configure_vlan_filters(edev);
  2956. /* Ask for link-up using current configuration */
  2957. memset(&link_params, 0, sizeof(link_params));
  2958. link_params.link_up = true;
  2959. edev->ops->common->set_link(edev->cdev, &link_params);
  2960. /* Query whether link is already-up */
  2961. memset(&link_output, 0, sizeof(link_output));
  2962. edev->ops->common->get_link(edev->cdev, &link_output);
  2963. qede_roce_dev_event_open(edev);
  2964. qede_link_update(edev, &link_output);
  2965. DP_INFO(edev, "Ending successfully qede load\n");
  2966. return 0;
  2967. err4:
  2968. qede_sync_free_irqs(edev);
  2969. memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
  2970. err3:
  2971. qede_napi_disable_remove(edev);
  2972. err2:
  2973. qede_free_mem_load(edev);
  2974. err1:
  2975. edev->ops->common->set_fp_int(edev->cdev, 0);
  2976. qede_free_fp_array(edev);
  2977. edev->num_queues = 0;
  2978. edev->fp_num_tx = 0;
  2979. edev->fp_num_rx = 0;
  2980. err0:
  2981. return rc;
  2982. }
  2983. void qede_reload(struct qede_dev *edev,
  2984. void (*func)(struct qede_dev *, union qede_reload_args *),
  2985. union qede_reload_args *args)
  2986. {
  2987. qede_unload(edev, QEDE_UNLOAD_NORMAL);
  2988. /* Call function handler to update parameters
  2989. * needed for function load.
  2990. */
  2991. if (func)
  2992. func(edev, args);
  2993. qede_load(edev, QEDE_LOAD_RELOAD);
  2994. mutex_lock(&edev->qede_lock);
  2995. qede_config_rx_mode(edev->ndev);
  2996. mutex_unlock(&edev->qede_lock);
  2997. }
  2998. /* called with rtnl_lock */
  2999. static int qede_open(struct net_device *ndev)
  3000. {
  3001. struct qede_dev *edev = netdev_priv(ndev);
  3002. int rc;
  3003. netif_carrier_off(ndev);
  3004. edev->ops->common->set_power_state(edev->cdev, PCI_D0);
  3005. rc = qede_load(edev, QEDE_LOAD_NORMAL);
  3006. if (rc)
  3007. return rc;
  3008. udp_tunnel_get_rx_info(ndev);
  3009. return 0;
  3010. }
  3011. static int qede_close(struct net_device *ndev)
  3012. {
  3013. struct qede_dev *edev = netdev_priv(ndev);
  3014. qede_unload(edev, QEDE_UNLOAD_NORMAL);
  3015. return 0;
  3016. }
  3017. static void qede_link_update(void *dev, struct qed_link_output *link)
  3018. {
  3019. struct qede_dev *edev = dev;
  3020. if (!netif_running(edev->ndev)) {
  3021. DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
  3022. return;
  3023. }
  3024. if (link->link_up) {
  3025. if (!netif_carrier_ok(edev->ndev)) {
  3026. DP_NOTICE(edev, "Link is up\n");
  3027. netif_tx_start_all_queues(edev->ndev);
  3028. netif_carrier_on(edev->ndev);
  3029. }
  3030. } else {
  3031. if (netif_carrier_ok(edev->ndev)) {
  3032. DP_NOTICE(edev, "Link is down\n");
  3033. netif_tx_disable(edev->ndev);
  3034. netif_carrier_off(edev->ndev);
  3035. }
  3036. }
  3037. }
  3038. static int qede_set_mac_addr(struct net_device *ndev, void *p)
  3039. {
  3040. struct qede_dev *edev = netdev_priv(ndev);
  3041. struct sockaddr *addr = p;
  3042. int rc;
  3043. ASSERT_RTNL(); /* @@@TBD To be removed */
  3044. DP_INFO(edev, "Set_mac_addr called\n");
  3045. if (!is_valid_ether_addr(addr->sa_data)) {
  3046. DP_NOTICE(edev, "The MAC address is not valid\n");
  3047. return -EFAULT;
  3048. }
  3049. if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
  3050. DP_NOTICE(edev, "qed prevents setting MAC\n");
  3051. return -EINVAL;
  3052. }
  3053. ether_addr_copy(ndev->dev_addr, addr->sa_data);
  3054. if (!netif_running(ndev)) {
  3055. DP_NOTICE(edev, "The device is currently down\n");
  3056. return 0;
  3057. }
  3058. /* Remove the previous primary mac */
  3059. rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
  3060. edev->primary_mac);
  3061. if (rc)
  3062. return rc;
  3063. /* Add MAC filter according to the new unicast HW MAC address */
  3064. ether_addr_copy(edev->primary_mac, ndev->dev_addr);
  3065. return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
  3066. edev->primary_mac);
  3067. }
  3068. static int
  3069. qede_configure_mcast_filtering(struct net_device *ndev,
  3070. enum qed_filter_rx_mode_type *accept_flags)
  3071. {
  3072. struct qede_dev *edev = netdev_priv(ndev);
  3073. unsigned char *mc_macs, *temp;
  3074. struct netdev_hw_addr *ha;
  3075. int rc = 0, mc_count;
  3076. size_t size;
  3077. size = 64 * ETH_ALEN;
  3078. mc_macs = kzalloc(size, GFP_KERNEL);
  3079. if (!mc_macs) {
  3080. DP_NOTICE(edev,
  3081. "Failed to allocate memory for multicast MACs\n");
  3082. rc = -ENOMEM;
  3083. goto exit;
  3084. }
  3085. temp = mc_macs;
  3086. /* Remove all previously configured MAC filters */
  3087. rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
  3088. mc_macs, 1);
  3089. if (rc)
  3090. goto exit;
  3091. netif_addr_lock_bh(ndev);
  3092. mc_count = netdev_mc_count(ndev);
  3093. if (mc_count < 64) {
  3094. netdev_for_each_mc_addr(ha, ndev) {
  3095. ether_addr_copy(temp, ha->addr);
  3096. temp += ETH_ALEN;
  3097. }
  3098. }
  3099. netif_addr_unlock_bh(ndev);
  3100. /* Check for all multicast @@@TBD resource allocation */
  3101. if ((ndev->flags & IFF_ALLMULTI) ||
  3102. (mc_count > 64)) {
  3103. if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
  3104. *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
  3105. } else {
  3106. /* Add all multicast MAC filters */
  3107. rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
  3108. mc_macs, mc_count);
  3109. }
  3110. exit:
  3111. kfree(mc_macs);
  3112. return rc;
  3113. }
  3114. static void qede_set_rx_mode(struct net_device *ndev)
  3115. {
  3116. struct qede_dev *edev = netdev_priv(ndev);
  3117. DP_INFO(edev, "qede_set_rx_mode called\n");
  3118. if (edev->state != QEDE_STATE_OPEN) {
  3119. DP_INFO(edev,
  3120. "qede_set_rx_mode called while interface is down\n");
  3121. } else {
  3122. set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
  3123. schedule_delayed_work(&edev->sp_task, 0);
  3124. }
  3125. }
  3126. /* Must be called with qede_lock held */
  3127. static void qede_config_rx_mode(struct net_device *ndev)
  3128. {
  3129. enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
  3130. struct qede_dev *edev = netdev_priv(ndev);
  3131. struct qed_filter_params rx_mode;
  3132. unsigned char *uc_macs, *temp;
  3133. struct netdev_hw_addr *ha;
  3134. int rc, uc_count;
  3135. size_t size;
  3136. netif_addr_lock_bh(ndev);
  3137. uc_count = netdev_uc_count(ndev);
  3138. size = uc_count * ETH_ALEN;
  3139. uc_macs = kzalloc(size, GFP_ATOMIC);
  3140. if (!uc_macs) {
  3141. DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
  3142. netif_addr_unlock_bh(ndev);
  3143. return;
  3144. }
  3145. temp = uc_macs;
  3146. netdev_for_each_uc_addr(ha, ndev) {
  3147. ether_addr_copy(temp, ha->addr);
  3148. temp += ETH_ALEN;
  3149. }
  3150. netif_addr_unlock_bh(ndev);
  3151. /* Configure the struct for the Rx mode */
  3152. memset(&rx_mode, 0, sizeof(struct qed_filter_params));
  3153. rx_mode.type = QED_FILTER_TYPE_RX_MODE;
  3154. /* Remove all previous unicast secondary macs and multicast macs
  3155. * (configrue / leave the primary mac)
  3156. */
  3157. rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
  3158. edev->primary_mac);
  3159. if (rc)
  3160. goto out;
  3161. /* Check for promiscuous */
  3162. if ((ndev->flags & IFF_PROMISC) ||
  3163. (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
  3164. accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
  3165. } else {
  3166. /* Add MAC filters according to the unicast secondary macs */
  3167. int i;
  3168. temp = uc_macs;
  3169. for (i = 0; i < uc_count; i++) {
  3170. rc = qede_set_ucast_rx_mac(edev,
  3171. QED_FILTER_XCAST_TYPE_ADD,
  3172. temp);
  3173. if (rc)
  3174. goto out;
  3175. temp += ETH_ALEN;
  3176. }
  3177. rc = qede_configure_mcast_filtering(ndev, &accept_flags);
  3178. if (rc)
  3179. goto out;
  3180. }
  3181. /* take care of VLAN mode */
  3182. if (ndev->flags & IFF_PROMISC) {
  3183. qede_config_accept_any_vlan(edev, true);
  3184. } else if (!edev->non_configured_vlans) {
  3185. /* It's possible that accept_any_vlan mode is set due to a
  3186. * previous setting of IFF_PROMISC. If vlan credits are
  3187. * sufficient, disable accept_any_vlan.
  3188. */
  3189. qede_config_accept_any_vlan(edev, false);
  3190. }
  3191. rx_mode.filter.accept_flags = accept_flags;
  3192. edev->ops->filter_config(edev->cdev, &rx_mode);
  3193. out:
  3194. kfree(uc_macs);
  3195. }