fm10k_pci.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2013 - 2018 Intel Corporation. */
  3. #include <linux/module.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/aer.h>
  6. #include "fm10k.h"
  7. static const struct fm10k_info *fm10k_info_tbl[] = {
  8. [fm10k_device_pf] = &fm10k_pf_info,
  9. [fm10k_device_vf] = &fm10k_vf_info,
  10. };
  11. /*
  12. * fm10k_pci_tbl - PCI Device ID Table
  13. *
  14. * Wildcard entries (PCI_ANY_ID) should come last
  15. * Last entry must be all 0s
  16. *
  17. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  18. * Class, Class Mask, private data (not used) }
  19. */
  20. static const struct pci_device_id fm10k_pci_tbl[] = {
  21. { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
  22. { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
  23. /* required last entry */
  24. { 0, }
  25. };
  26. MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
  27. u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
  28. {
  29. struct fm10k_intfc *interface = hw->back;
  30. u16 value = 0;
  31. if (FM10K_REMOVED(hw->hw_addr))
  32. return ~value;
  33. pci_read_config_word(interface->pdev, reg, &value);
  34. if (value == 0xFFFF)
  35. fm10k_write_flush(hw);
  36. return value;
  37. }
  38. u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
  39. {
  40. u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
  41. u32 value = 0;
  42. if (FM10K_REMOVED(hw_addr))
  43. return ~value;
  44. value = readl(&hw_addr[reg]);
  45. if (!(~value) && (!reg || !(~readl(hw_addr)))) {
  46. struct fm10k_intfc *interface = hw->back;
  47. struct net_device *netdev = interface->netdev;
  48. hw->hw_addr = NULL;
  49. netif_device_detach(netdev);
  50. netdev_err(netdev, "PCIe link lost, device now detached\n");
  51. }
  52. return value;
  53. }
  54. static int fm10k_hw_ready(struct fm10k_intfc *interface)
  55. {
  56. struct fm10k_hw *hw = &interface->hw;
  57. fm10k_write_flush(hw);
  58. return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
  59. }
  60. /**
  61. * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task
  62. * @interface: fm10k private interface structure
  63. *
  64. * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be
  65. * started immediately, request that it be restarted when possible.
  66. */
  67. void fm10k_macvlan_schedule(struct fm10k_intfc *interface)
  68. {
  69. /* Avoid processing the MAC/VLAN queue when the service task is
  70. * disabled, or when we're resetting the device.
  71. */
  72. if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) &&
  73. !test_and_set_bit(__FM10K_MACVLAN_SCHED, interface->state)) {
  74. clear_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  75. /* We delay the actual start of execution in order to allow
  76. * multiple MAC/VLAN updates to accumulate before handling
  77. * them, and to allow some time to let the mailbox drain
  78. * between runs.
  79. */
  80. queue_delayed_work(fm10k_workqueue,
  81. &interface->macvlan_task, 10);
  82. } else {
  83. set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  84. }
  85. }
  86. /**
  87. * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor
  88. * @interface: fm10k private interface structure
  89. *
  90. * Wait until the MAC/VLAN queue task has stopped, and cancel any future
  91. * requests.
  92. */
  93. static void fm10k_stop_macvlan_task(struct fm10k_intfc *interface)
  94. {
  95. /* Disable the MAC/VLAN work item */
  96. set_bit(__FM10K_MACVLAN_DISABLE, interface->state);
  97. /* Make sure we waited until any current invocations have stopped */
  98. cancel_delayed_work_sync(&interface->macvlan_task);
  99. /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task.
  100. * However, it may not be unset of the MAC/VLAN task never actually
  101. * got a chance to run. Since we've canceled the task here, and it
  102. * cannot be rescheuled right now, we need to ensure the scheduled bit
  103. * gets unset.
  104. */
  105. clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
  106. }
  107. /**
  108. * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor
  109. * @interface: fm10k private interface structure
  110. *
  111. * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule
  112. * the MAC/VLAN work monitor.
  113. */
  114. static void fm10k_resume_macvlan_task(struct fm10k_intfc *interface)
  115. {
  116. /* Re-enable the MAC/VLAN work item */
  117. clear_bit(__FM10K_MACVLAN_DISABLE, interface->state);
  118. /* We might have received a MAC/VLAN request while disabled. If so,
  119. * kick off the queue now.
  120. */
  121. if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
  122. fm10k_macvlan_schedule(interface);
  123. }
  124. void fm10k_service_event_schedule(struct fm10k_intfc *interface)
  125. {
  126. if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) &&
  127. !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) {
  128. clear_bit(__FM10K_SERVICE_REQUEST, interface->state);
  129. queue_work(fm10k_workqueue, &interface->service_task);
  130. } else {
  131. set_bit(__FM10K_SERVICE_REQUEST, interface->state);
  132. }
  133. }
  134. static void fm10k_service_event_complete(struct fm10k_intfc *interface)
  135. {
  136. WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state));
  137. /* flush memory to make sure state is correct before next watchog */
  138. smp_mb__before_atomic();
  139. clear_bit(__FM10K_SERVICE_SCHED, interface->state);
  140. /* If a service event was requested since we started, immediately
  141. * re-schedule now. This ensures we don't drop a request until the
  142. * next timer event.
  143. */
  144. if (test_bit(__FM10K_SERVICE_REQUEST, interface->state))
  145. fm10k_service_event_schedule(interface);
  146. }
  147. static void fm10k_stop_service_event(struct fm10k_intfc *interface)
  148. {
  149. set_bit(__FM10K_SERVICE_DISABLE, interface->state);
  150. cancel_work_sync(&interface->service_task);
  151. /* It's possible that cancel_work_sync stopped the service task from
  152. * running before it could actually start. In this case the
  153. * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that
  154. * the service task cannot be running at this point, we need to clear
  155. * the scheduled bit, as otherwise the service task may never be
  156. * restarted.
  157. */
  158. clear_bit(__FM10K_SERVICE_SCHED, interface->state);
  159. }
  160. static void fm10k_start_service_event(struct fm10k_intfc *interface)
  161. {
  162. clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
  163. fm10k_service_event_schedule(interface);
  164. }
  165. /**
  166. * fm10k_service_timer - Timer Call-back
  167. * @t: pointer to timer data
  168. **/
  169. static void fm10k_service_timer(struct timer_list *t)
  170. {
  171. struct fm10k_intfc *interface = from_timer(interface, t,
  172. service_timer);
  173. /* Reset the timer */
  174. mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
  175. fm10k_service_event_schedule(interface);
  176. }
  177. /**
  178. * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset
  179. * @interface: fm10k private data structure
  180. *
  181. * This function prepares for a device reset by shutting as much down as we
  182. * can. It does nothing and returns false if __FM10K_RESETTING was already set
  183. * prior to calling this function. It returns true if it actually did work.
  184. */
  185. static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
  186. {
  187. struct net_device *netdev = interface->netdev;
  188. WARN_ON(in_interrupt());
  189. /* put off any impending NetWatchDogTimeout */
  190. netif_trans_update(netdev);
  191. /* Nothing to do if a reset is already in progress */
  192. if (test_and_set_bit(__FM10K_RESETTING, interface->state))
  193. return false;
  194. /* As the MAC/VLAN task will be accessing registers it must not be
  195. * running while we reset. Although the task will not be scheduled
  196. * once we start resetting it may already be running
  197. */
  198. fm10k_stop_macvlan_task(interface);
  199. rtnl_lock();
  200. fm10k_iov_suspend(interface->pdev);
  201. if (netif_running(netdev))
  202. fm10k_close(netdev);
  203. fm10k_mbx_free_irq(interface);
  204. /* free interrupts */
  205. fm10k_clear_queueing_scheme(interface);
  206. /* delay any future reset requests */
  207. interface->last_reset = jiffies + (10 * HZ);
  208. rtnl_unlock();
  209. return true;
  210. }
  211. static int fm10k_handle_reset(struct fm10k_intfc *interface)
  212. {
  213. struct net_device *netdev = interface->netdev;
  214. struct fm10k_hw *hw = &interface->hw;
  215. int err;
  216. WARN_ON(!test_bit(__FM10K_RESETTING, interface->state));
  217. rtnl_lock();
  218. pci_set_master(interface->pdev);
  219. /* reset and initialize the hardware so it is in a known state */
  220. err = hw->mac.ops.reset_hw(hw);
  221. if (err) {
  222. dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
  223. goto reinit_err;
  224. }
  225. err = hw->mac.ops.init_hw(hw);
  226. if (err) {
  227. dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
  228. goto reinit_err;
  229. }
  230. err = fm10k_init_queueing_scheme(interface);
  231. if (err) {
  232. dev_err(&interface->pdev->dev,
  233. "init_queueing_scheme failed: %d\n", err);
  234. goto reinit_err;
  235. }
  236. /* re-associate interrupts */
  237. err = fm10k_mbx_request_irq(interface);
  238. if (err)
  239. goto err_mbx_irq;
  240. err = fm10k_hw_ready(interface);
  241. if (err)
  242. goto err_open;
  243. /* update hardware address for VFs if perm_addr has changed */
  244. if (hw->mac.type == fm10k_mac_vf) {
  245. if (is_valid_ether_addr(hw->mac.perm_addr)) {
  246. ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
  247. ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
  248. ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
  249. netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
  250. }
  251. if (hw->mac.vlan_override)
  252. netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
  253. else
  254. netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  255. }
  256. err = netif_running(netdev) ? fm10k_open(netdev) : 0;
  257. if (err)
  258. goto err_open;
  259. fm10k_iov_resume(interface->pdev);
  260. rtnl_unlock();
  261. fm10k_resume_macvlan_task(interface);
  262. clear_bit(__FM10K_RESETTING, interface->state);
  263. return err;
  264. err_open:
  265. fm10k_mbx_free_irq(interface);
  266. err_mbx_irq:
  267. fm10k_clear_queueing_scheme(interface);
  268. reinit_err:
  269. netif_device_detach(netdev);
  270. rtnl_unlock();
  271. clear_bit(__FM10K_RESETTING, interface->state);
  272. return err;
  273. }
  274. static void fm10k_detach_subtask(struct fm10k_intfc *interface)
  275. {
  276. struct net_device *netdev = interface->netdev;
  277. u32 __iomem *hw_addr;
  278. u32 value;
  279. int err;
  280. /* do nothing if netdev is still present or hw_addr is set */
  281. if (netif_device_present(netdev) || interface->hw.hw_addr)
  282. return;
  283. /* We've lost the PCIe register space, and can no longer access the
  284. * device. Shut everything except the detach subtask down and prepare
  285. * to reset the device in case we recover. If we actually prepare for
  286. * reset, indicate that we're detached.
  287. */
  288. if (fm10k_prepare_for_reset(interface))
  289. set_bit(__FM10K_RESET_DETACHED, interface->state);
  290. /* check the real address space to see if we've recovered */
  291. hw_addr = READ_ONCE(interface->uc_addr);
  292. value = readl(hw_addr);
  293. if (~value) {
  294. /* Make sure the reset was initiated because we detached,
  295. * otherwise we might race with a different reset flow.
  296. */
  297. if (!test_and_clear_bit(__FM10K_RESET_DETACHED,
  298. interface->state))
  299. return;
  300. /* Restore the hardware address */
  301. interface->hw.hw_addr = interface->uc_addr;
  302. /* PCIe link has been restored, and the device is active
  303. * again. Restore everything and reset the device.
  304. */
  305. err = fm10k_handle_reset(interface);
  306. if (err) {
  307. netdev_err(netdev, "Unable to reset device: %d\n", err);
  308. interface->hw.hw_addr = NULL;
  309. return;
  310. }
  311. /* Re-attach the netdev */
  312. netif_device_attach(netdev);
  313. netdev_warn(netdev, "PCIe link restored, device now attached\n");
  314. return;
  315. }
  316. }
  317. static void fm10k_reset_subtask(struct fm10k_intfc *interface)
  318. {
  319. int err;
  320. if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
  321. interface->flags))
  322. return;
  323. /* If another thread has already prepared to reset the device, we
  324. * should not attempt to handle a reset here, since we'd race with
  325. * that thread. This may happen if we suspend the device or if the
  326. * PCIe link is lost. In this case, we'll just ignore the RESET
  327. * request, as it will (eventually) be taken care of when the thread
  328. * which actually started the reset is finished.
  329. */
  330. if (!fm10k_prepare_for_reset(interface))
  331. return;
  332. netdev_err(interface->netdev, "Reset interface\n");
  333. err = fm10k_handle_reset(interface);
  334. if (err)
  335. dev_err(&interface->pdev->dev,
  336. "fm10k_handle_reset failed: %d\n", err);
  337. }
  338. /**
  339. * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
  340. * @interface: board private structure
  341. *
  342. * Configure the SWPRI to PC mapping for the port.
  343. **/
  344. static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
  345. {
  346. struct net_device *netdev = interface->netdev;
  347. struct fm10k_hw *hw = &interface->hw;
  348. int i;
  349. /* clear flag indicating update is needed */
  350. clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
  351. /* these registers are only available on the PF */
  352. if (hw->mac.type != fm10k_mac_pf)
  353. return;
  354. /* configure SWPRI to PC map */
  355. for (i = 0; i < FM10K_SWPRI_MAX; i++)
  356. fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
  357. netdev_get_prio_tc_map(netdev, i));
  358. }
  359. /**
  360. * fm10k_watchdog_update_host_state - Update the link status based on host.
  361. * @interface: board private structure
  362. **/
  363. static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
  364. {
  365. struct fm10k_hw *hw = &interface->hw;
  366. s32 err;
  367. if (test_bit(__FM10K_LINK_DOWN, interface->state)) {
  368. interface->host_ready = false;
  369. if (time_is_after_jiffies(interface->link_down_event))
  370. return;
  371. clear_bit(__FM10K_LINK_DOWN, interface->state);
  372. }
  373. if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) {
  374. if (rtnl_trylock()) {
  375. fm10k_configure_swpri_map(interface);
  376. rtnl_unlock();
  377. }
  378. }
  379. /* lock the mailbox for transmit and receive */
  380. fm10k_mbx_lock(interface);
  381. err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
  382. if (err && time_is_before_jiffies(interface->last_reset))
  383. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  384. /* free the lock */
  385. fm10k_mbx_unlock(interface);
  386. }
  387. /**
  388. * fm10k_mbx_subtask - Process upstream and downstream mailboxes
  389. * @interface: board private structure
  390. *
  391. * This function will process both the upstream and downstream mailboxes.
  392. **/
  393. static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
  394. {
  395. /* If we're resetting, bail out */
  396. if (test_bit(__FM10K_RESETTING, interface->state))
  397. return;
  398. /* process upstream mailbox and update device state */
  399. fm10k_watchdog_update_host_state(interface);
  400. /* process downstream mailboxes */
  401. fm10k_iov_mbx(interface);
  402. }
  403. /**
  404. * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
  405. * @interface: board private structure
  406. **/
  407. static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
  408. {
  409. struct net_device *netdev = interface->netdev;
  410. /* only continue if link state is currently down */
  411. if (netif_carrier_ok(netdev))
  412. return;
  413. netif_info(interface, drv, netdev, "NIC Link is up\n");
  414. netif_carrier_on(netdev);
  415. netif_tx_wake_all_queues(netdev);
  416. }
  417. /**
  418. * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
  419. * @interface: board private structure
  420. **/
  421. static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
  422. {
  423. struct net_device *netdev = interface->netdev;
  424. /* only continue if link state is currently up */
  425. if (!netif_carrier_ok(netdev))
  426. return;
  427. netif_info(interface, drv, netdev, "NIC Link is down\n");
  428. netif_carrier_off(netdev);
  429. netif_tx_stop_all_queues(netdev);
  430. }
  431. /**
  432. * fm10k_update_stats - Update the board statistics counters.
  433. * @interface: board private structure
  434. **/
  435. void fm10k_update_stats(struct fm10k_intfc *interface)
  436. {
  437. struct net_device_stats *net_stats = &interface->netdev->stats;
  438. struct fm10k_hw *hw = &interface->hw;
  439. u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0;
  440. u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0;
  441. u64 rx_link_errors = 0;
  442. u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
  443. u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
  444. u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
  445. u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
  446. u64 bytes, pkts;
  447. int i;
  448. /* ensure only one thread updates stats at a time */
  449. if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
  450. return;
  451. /* do not allow stats update via service task for next second */
  452. interface->next_stats_update = jiffies + HZ;
  453. /* gather some stats to the interface struct that are per queue */
  454. for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
  455. struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
  456. if (!tx_ring)
  457. continue;
  458. restart_queue += tx_ring->tx_stats.restart_queue;
  459. tx_busy += tx_ring->tx_stats.tx_busy;
  460. tx_csum_errors += tx_ring->tx_stats.csum_err;
  461. bytes += tx_ring->stats.bytes;
  462. pkts += tx_ring->stats.packets;
  463. hw_csum_tx_good += tx_ring->tx_stats.csum_good;
  464. }
  465. interface->restart_queue = restart_queue;
  466. interface->tx_busy = tx_busy;
  467. net_stats->tx_bytes = bytes;
  468. net_stats->tx_packets = pkts;
  469. interface->tx_csum_errors = tx_csum_errors;
  470. interface->hw_csum_tx_good = hw_csum_tx_good;
  471. /* gather some stats to the interface struct that are per queue */
  472. for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
  473. struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
  474. if (!rx_ring)
  475. continue;
  476. bytes += rx_ring->stats.bytes;
  477. pkts += rx_ring->stats.packets;
  478. alloc_failed += rx_ring->rx_stats.alloc_failed;
  479. rx_csum_errors += rx_ring->rx_stats.csum_err;
  480. rx_errors += rx_ring->rx_stats.errors;
  481. hw_csum_rx_good += rx_ring->rx_stats.csum_good;
  482. rx_switch_errors += rx_ring->rx_stats.switch_errors;
  483. rx_drops += rx_ring->rx_stats.drops;
  484. rx_pp_errors += rx_ring->rx_stats.pp_errors;
  485. rx_link_errors += rx_ring->rx_stats.link_errors;
  486. rx_length_errors += rx_ring->rx_stats.length_errors;
  487. }
  488. net_stats->rx_bytes = bytes;
  489. net_stats->rx_packets = pkts;
  490. interface->alloc_failed = alloc_failed;
  491. interface->rx_csum_errors = rx_csum_errors;
  492. interface->hw_csum_rx_good = hw_csum_rx_good;
  493. interface->rx_switch_errors = rx_switch_errors;
  494. interface->rx_drops = rx_drops;
  495. interface->rx_pp_errors = rx_pp_errors;
  496. interface->rx_link_errors = rx_link_errors;
  497. interface->rx_length_errors = rx_length_errors;
  498. hw->mac.ops.update_hw_stats(hw, &interface->stats);
  499. for (i = 0; i < hw->mac.max_queues; i++) {
  500. struct fm10k_hw_stats_q *q = &interface->stats.q[i];
  501. tx_bytes_nic += q->tx_bytes.count;
  502. tx_pkts_nic += q->tx_packets.count;
  503. rx_bytes_nic += q->rx_bytes.count;
  504. rx_pkts_nic += q->rx_packets.count;
  505. rx_drops_nic += q->rx_drops.count;
  506. }
  507. interface->tx_bytes_nic = tx_bytes_nic;
  508. interface->tx_packets_nic = tx_pkts_nic;
  509. interface->rx_bytes_nic = rx_bytes_nic;
  510. interface->rx_packets_nic = rx_pkts_nic;
  511. interface->rx_drops_nic = rx_drops_nic;
  512. /* Fill out the OS statistics structure */
  513. net_stats->rx_errors = rx_errors;
  514. net_stats->rx_dropped = interface->stats.nodesc_drop.count;
  515. clear_bit(__FM10K_UPDATING_STATS, interface->state);
  516. }
  517. /**
  518. * fm10k_watchdog_flush_tx - flush queues on host not ready
  519. * @interface: pointer to the device interface structure
  520. **/
  521. static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
  522. {
  523. int some_tx_pending = 0;
  524. int i;
  525. /* nothing to do if carrier is up */
  526. if (netif_carrier_ok(interface->netdev))
  527. return;
  528. for (i = 0; i < interface->num_tx_queues; i++) {
  529. struct fm10k_ring *tx_ring = interface->tx_ring[i];
  530. if (tx_ring->next_to_use != tx_ring->next_to_clean) {
  531. some_tx_pending = 1;
  532. break;
  533. }
  534. }
  535. /* We've lost link, so the controller stops DMA, but we've got
  536. * queued Tx work that's never going to get done, so reset
  537. * controller to flush Tx.
  538. */
  539. if (some_tx_pending)
  540. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  541. }
  542. /**
  543. * fm10k_watchdog_subtask - check and bring link up
  544. * @interface: pointer to the device interface structure
  545. **/
  546. static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
  547. {
  548. /* if interface is down do nothing */
  549. if (test_bit(__FM10K_DOWN, interface->state) ||
  550. test_bit(__FM10K_RESETTING, interface->state))
  551. return;
  552. if (interface->host_ready)
  553. fm10k_watchdog_host_is_ready(interface);
  554. else
  555. fm10k_watchdog_host_not_ready(interface);
  556. /* update stats only once every second */
  557. if (time_is_before_jiffies(interface->next_stats_update))
  558. fm10k_update_stats(interface);
  559. /* flush any uncompleted work */
  560. fm10k_watchdog_flush_tx(interface);
  561. }
  562. /**
  563. * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
  564. * @interface: pointer to the device interface structure
  565. *
  566. * This function serves two purposes. First it strobes the interrupt lines
  567. * in order to make certain interrupts are occurring. Secondly it sets the
  568. * bits needed to check for TX hangs. As a result we should immediately
  569. * determine if a hang has occurred.
  570. */
  571. static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
  572. {
  573. int i;
  574. /* If we're down or resetting, just bail */
  575. if (test_bit(__FM10K_DOWN, interface->state) ||
  576. test_bit(__FM10K_RESETTING, interface->state))
  577. return;
  578. /* rate limit tx hang checks to only once every 2 seconds */
  579. if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
  580. return;
  581. interface->next_tx_hang_check = jiffies + (2 * HZ);
  582. if (netif_carrier_ok(interface->netdev)) {
  583. /* Force detection of hung controller */
  584. for (i = 0; i < interface->num_tx_queues; i++)
  585. set_check_for_tx_hang(interface->tx_ring[i]);
  586. /* Rearm all in-use q_vectors for immediate firing */
  587. for (i = 0; i < interface->num_q_vectors; i++) {
  588. struct fm10k_q_vector *qv = interface->q_vector[i];
  589. if (!qv->tx.count && !qv->rx.count)
  590. continue;
  591. writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
  592. }
  593. }
  594. }
  595. /**
  596. * fm10k_service_task - manages and runs subtasks
  597. * @work: pointer to work_struct containing our data
  598. **/
  599. static void fm10k_service_task(struct work_struct *work)
  600. {
  601. struct fm10k_intfc *interface;
  602. interface = container_of(work, struct fm10k_intfc, service_task);
  603. /* Check whether we're detached first */
  604. fm10k_detach_subtask(interface);
  605. /* tasks run even when interface is down */
  606. fm10k_mbx_subtask(interface);
  607. fm10k_reset_subtask(interface);
  608. /* tasks only run when interface is up */
  609. fm10k_watchdog_subtask(interface);
  610. fm10k_check_hang_subtask(interface);
  611. /* release lock on service events to allow scheduling next event */
  612. fm10k_service_event_complete(interface);
  613. }
  614. /**
  615. * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager
  616. * @work: pointer to work_struct containing our data
  617. *
  618. * This work item handles sending MAC/VLAN updates to the switch manager. When
  619. * the interface is up, it will attempt to queue mailbox messages to the
  620. * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the
  621. * mailbox is full, it will reschedule itself to try again in a short while.
  622. * This ensures that the driver does not overload the switch mailbox with too
  623. * many simultaneous requests, causing an unnecessary reset.
  624. **/
  625. static void fm10k_macvlan_task(struct work_struct *work)
  626. {
  627. struct fm10k_macvlan_request *item;
  628. struct fm10k_intfc *interface;
  629. struct delayed_work *dwork;
  630. struct list_head *requests;
  631. struct fm10k_hw *hw;
  632. unsigned long flags;
  633. dwork = to_delayed_work(work);
  634. interface = container_of(dwork, struct fm10k_intfc, macvlan_task);
  635. hw = &interface->hw;
  636. requests = &interface->macvlan_requests;
  637. do {
  638. /* Pop the first item off the list */
  639. spin_lock_irqsave(&interface->macvlan_lock, flags);
  640. item = list_first_entry_or_null(requests,
  641. struct fm10k_macvlan_request,
  642. list);
  643. if (item)
  644. list_del_init(&item->list);
  645. spin_unlock_irqrestore(&interface->macvlan_lock, flags);
  646. /* We have no more items to process */
  647. if (!item)
  648. goto done;
  649. fm10k_mbx_lock(interface);
  650. /* Check that we have plenty of space to send the message. We
  651. * want to ensure that the mailbox stays low enough to avoid a
  652. * change in the host state, otherwise we may see spurious
  653. * link up / link down notifications.
  654. */
  655. if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) {
  656. hw->mbx.ops.process(hw, &hw->mbx);
  657. set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
  658. fm10k_mbx_unlock(interface);
  659. /* Put the request back on the list */
  660. spin_lock_irqsave(&interface->macvlan_lock, flags);
  661. list_add(&item->list, requests);
  662. spin_unlock_irqrestore(&interface->macvlan_lock, flags);
  663. break;
  664. }
  665. switch (item->type) {
  666. case FM10K_MC_MAC_REQUEST:
  667. hw->mac.ops.update_mc_addr(hw,
  668. item->mac.glort,
  669. item->mac.addr,
  670. item->mac.vid,
  671. item->set);
  672. break;
  673. case FM10K_UC_MAC_REQUEST:
  674. hw->mac.ops.update_uc_addr(hw,
  675. item->mac.glort,
  676. item->mac.addr,
  677. item->mac.vid,
  678. item->set,
  679. 0);
  680. break;
  681. case FM10K_VLAN_REQUEST:
  682. hw->mac.ops.update_vlan(hw,
  683. item->vlan.vid,
  684. item->vlan.vsi,
  685. item->set);
  686. break;
  687. default:
  688. break;
  689. }
  690. fm10k_mbx_unlock(interface);
  691. /* Free the item now that we've sent the update */
  692. kfree(item);
  693. } while (true);
  694. done:
  695. WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED, interface->state));
  696. /* flush memory to make sure state is correct */
  697. smp_mb__before_atomic();
  698. clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
  699. /* If a MAC/VLAN request was scheduled since we started, we should
  700. * re-schedule. However, there is no reason to re-schedule if there is
  701. * no work to do.
  702. */
  703. if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
  704. fm10k_macvlan_schedule(interface);
  705. }
  706. /**
  707. * fm10k_configure_tx_ring - Configure Tx ring after Reset
  708. * @interface: board private structure
  709. * @ring: structure containing ring specific data
  710. *
  711. * Configure the Tx descriptor ring after a reset.
  712. **/
  713. static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
  714. struct fm10k_ring *ring)
  715. {
  716. struct fm10k_hw *hw = &interface->hw;
  717. u64 tdba = ring->dma;
  718. u32 size = ring->count * sizeof(struct fm10k_tx_desc);
  719. u32 txint = FM10K_INT_MAP_DISABLE;
  720. u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
  721. u8 reg_idx = ring->reg_idx;
  722. /* disable queue to avoid issues while updating state */
  723. fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
  724. fm10k_write_flush(hw);
  725. /* possible poll here to verify ring resources have been cleaned */
  726. /* set location and size for descriptor ring */
  727. fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
  728. fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
  729. fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
  730. /* reset head and tail pointers */
  731. fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
  732. fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
  733. /* store tail pointer */
  734. ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
  735. /* reset ntu and ntc to place SW in sync with hardware */
  736. ring->next_to_clean = 0;
  737. ring->next_to_use = 0;
  738. /* Map interrupt */
  739. if (ring->q_vector) {
  740. txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
  741. txint |= FM10K_INT_MAP_TIMER0;
  742. }
  743. fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint);
  744. /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
  745. fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
  746. FM10K_PFVTCTL_FTAG_DESC_ENABLE);
  747. /* Initialize XPS */
  748. if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) &&
  749. ring->q_vector)
  750. netif_set_xps_queue(ring->netdev,
  751. &ring->q_vector->affinity_mask,
  752. ring->queue_index);
  753. /* enable queue */
  754. fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
  755. }
  756. /**
  757. * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
  758. * @interface: board private structure
  759. * @ring: structure containing ring specific data
  760. *
  761. * Verify the Tx descriptor ring is ready for transmit.
  762. **/
  763. static void fm10k_enable_tx_ring(struct fm10k_intfc *interface,
  764. struct fm10k_ring *ring)
  765. {
  766. struct fm10k_hw *hw = &interface->hw;
  767. int wait_loop = 10;
  768. u32 txdctl;
  769. u8 reg_idx = ring->reg_idx;
  770. /* if we are already enabled just exit */
  771. if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE)
  772. return;
  773. /* poll to verify queue is enabled */
  774. do {
  775. usleep_range(1000, 2000);
  776. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
  777. } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop);
  778. if (!wait_loop)
  779. netif_err(interface, drv, interface->netdev,
  780. "Could not enable Tx Queue %d\n", reg_idx);
  781. }
  782. /**
  783. * fm10k_configure_tx - Configure Transmit Unit after Reset
  784. * @interface: board private structure
  785. *
  786. * Configure the Tx unit of the MAC after a reset.
  787. **/
  788. static void fm10k_configure_tx(struct fm10k_intfc *interface)
  789. {
  790. int i;
  791. /* Setup the HW Tx Head and Tail descriptor pointers */
  792. for (i = 0; i < interface->num_tx_queues; i++)
  793. fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
  794. /* poll here to verify that Tx rings are now enabled */
  795. for (i = 0; i < interface->num_tx_queues; i++)
  796. fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
  797. }
  798. /**
  799. * fm10k_configure_rx_ring - Configure Rx ring after Reset
  800. * @interface: board private structure
  801. * @ring: structure containing ring specific data
  802. *
  803. * Configure the Rx descriptor ring after a reset.
  804. **/
  805. static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
  806. struct fm10k_ring *ring)
  807. {
  808. u64 rdba = ring->dma;
  809. struct fm10k_hw *hw = &interface->hw;
  810. u32 size = ring->count * sizeof(union fm10k_rx_desc);
  811. u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  812. u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
  813. u32 rxint = FM10K_INT_MAP_DISABLE;
  814. u8 rx_pause = interface->rx_pause;
  815. u8 reg_idx = ring->reg_idx;
  816. /* disable queue to avoid issues while updating state */
  817. rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
  818. rxqctl &= ~FM10K_RXQCTL_ENABLE;
  819. fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
  820. fm10k_write_flush(hw);
  821. /* possible poll here to verify ring resources have been cleaned */
  822. /* set location and size for descriptor ring */
  823. fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
  824. fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
  825. fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
  826. /* reset head and tail pointers */
  827. fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
  828. fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
  829. /* store tail pointer */
  830. ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
  831. /* reset ntu and ntc to place SW in sync with hardware */
  832. ring->next_to_clean = 0;
  833. ring->next_to_use = 0;
  834. ring->next_to_alloc = 0;
  835. /* Configure the Rx buffer size for one buff without split */
  836. srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
  837. /* Configure the Rx ring to suppress loopback packets */
  838. srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
  839. fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
  840. /* Enable drop on empty */
  841. #ifdef CONFIG_DCB
  842. if (interface->pfc_en)
  843. rx_pause = interface->pfc_en;
  844. #endif
  845. if (!(rx_pause & BIT(ring->qos_pc)))
  846. rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
  847. fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
  848. /* assign default VLAN to queue */
  849. ring->vid = hw->mac.default_vid;
  850. /* if we have an active VLAN, disable default VLAN ID */
  851. if (test_bit(hw->mac.default_vid, interface->active_vlans))
  852. ring->vid |= FM10K_VLAN_CLEAR;
  853. /* Map interrupt */
  854. if (ring->q_vector) {
  855. rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
  856. rxint |= FM10K_INT_MAP_TIMER1;
  857. }
  858. fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
  859. /* enable queue */
  860. rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
  861. rxqctl |= FM10K_RXQCTL_ENABLE;
  862. fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
  863. /* place buffers on ring for receive data */
  864. fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring));
  865. }
  866. /**
  867. * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
  868. * @interface: board private structure
  869. *
  870. * Configure the drop enable bits for the Rx rings.
  871. **/
  872. void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
  873. {
  874. struct fm10k_hw *hw = &interface->hw;
  875. u8 rx_pause = interface->rx_pause;
  876. int i;
  877. #ifdef CONFIG_DCB
  878. if (interface->pfc_en)
  879. rx_pause = interface->pfc_en;
  880. #endif
  881. for (i = 0; i < interface->num_rx_queues; i++) {
  882. struct fm10k_ring *ring = interface->rx_ring[i];
  883. u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  884. u8 reg_idx = ring->reg_idx;
  885. if (!(rx_pause & BIT(ring->qos_pc)))
  886. rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
  887. fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
  888. }
  889. }
  890. /**
  891. * fm10k_configure_dglort - Configure Receive DGLORT after reset
  892. * @interface: board private structure
  893. *
  894. * Configure the DGLORT description and RSS tables.
  895. **/
  896. static void fm10k_configure_dglort(struct fm10k_intfc *interface)
  897. {
  898. struct fm10k_dglort_cfg dglort = { 0 };
  899. struct fm10k_hw *hw = &interface->hw;
  900. int i;
  901. u32 mrqc;
  902. /* Fill out hash function seeds */
  903. for (i = 0; i < FM10K_RSSRK_SIZE; i++)
  904. fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]);
  905. /* Write RETA table to hardware */
  906. for (i = 0; i < FM10K_RETA_SIZE; i++)
  907. fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]);
  908. /* Generate RSS hash based on packet types, TCP/UDP
  909. * port numbers and/or IPv4/v6 src and dst addresses
  910. */
  911. mrqc = FM10K_MRQC_IPV4 |
  912. FM10K_MRQC_TCP_IPV4 |
  913. FM10K_MRQC_IPV6 |
  914. FM10K_MRQC_TCP_IPV6;
  915. if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags))
  916. mrqc |= FM10K_MRQC_UDP_IPV4;
  917. if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags))
  918. mrqc |= FM10K_MRQC_UDP_IPV6;
  919. fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
  920. /* configure default DGLORT mapping for RSS/DCB */
  921. dglort.inner_rss = 1;
  922. dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
  923. dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
  924. hw->mac.ops.configure_dglort_map(hw, &dglort);
  925. /* assign GLORT per queue for queue mapped testing */
  926. if (interface->glort_count > 64) {
  927. memset(&dglort, 0, sizeof(dglort));
  928. dglort.inner_rss = 1;
  929. dglort.glort = interface->glort + 64;
  930. dglort.idx = fm10k_dglort_pf_queue;
  931. dglort.queue_l = fls(interface->num_rx_queues - 1);
  932. hw->mac.ops.configure_dglort_map(hw, &dglort);
  933. }
  934. /* assign glort value for RSS/DCB specific to this interface */
  935. memset(&dglort, 0, sizeof(dglort));
  936. dglort.inner_rss = 1;
  937. dglort.glort = interface->glort;
  938. dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
  939. dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
  940. /* configure DGLORT mapping for RSS/DCB */
  941. dglort.idx = fm10k_dglort_pf_rss;
  942. if (interface->l2_accel)
  943. dglort.shared_l = fls(interface->l2_accel->size);
  944. hw->mac.ops.configure_dglort_map(hw, &dglort);
  945. }
  946. /**
  947. * fm10k_configure_rx - Configure Receive Unit after Reset
  948. * @interface: board private structure
  949. *
  950. * Configure the Rx unit of the MAC after a reset.
  951. **/
  952. static void fm10k_configure_rx(struct fm10k_intfc *interface)
  953. {
  954. int i;
  955. /* Configure SWPRI to PC map */
  956. fm10k_configure_swpri_map(interface);
  957. /* Configure RSS and DGLORT map */
  958. fm10k_configure_dglort(interface);
  959. /* Setup the HW Rx Head and Tail descriptor pointers */
  960. for (i = 0; i < interface->num_rx_queues; i++)
  961. fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
  962. /* possible poll here to verify that Rx rings are now enabled */
  963. }
  964. static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
  965. {
  966. struct fm10k_q_vector *q_vector;
  967. int q_idx;
  968. for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
  969. q_vector = interface->q_vector[q_idx];
  970. napi_enable(&q_vector->napi);
  971. }
  972. }
  973. static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data)
  974. {
  975. struct fm10k_q_vector *q_vector = data;
  976. if (q_vector->rx.count || q_vector->tx.count)
  977. napi_schedule_irqoff(&q_vector->napi);
  978. return IRQ_HANDLED;
  979. }
  980. static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
  981. {
  982. struct fm10k_intfc *interface = data;
  983. struct fm10k_hw *hw = &interface->hw;
  984. struct fm10k_mbx_info *mbx = &hw->mbx;
  985. /* re-enable mailbox interrupt and indicate 20us delay */
  986. fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
  987. (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
  988. FM10K_ITR_ENABLE);
  989. /* service upstream mailbox */
  990. if (fm10k_mbx_trylock(interface)) {
  991. mbx->ops.process(hw, mbx);
  992. fm10k_mbx_unlock(interface);
  993. }
  994. hw->mac.get_host_state = true;
  995. fm10k_service_event_schedule(interface);
  996. return IRQ_HANDLED;
  997. }
  998. #define FM10K_ERR_MSG(type) case (type): error = #type; break
  999. static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
  1000. struct fm10k_fault *fault)
  1001. {
  1002. struct pci_dev *pdev = interface->pdev;
  1003. struct fm10k_hw *hw = &interface->hw;
  1004. struct fm10k_iov_data *iov_data = interface->iov_data;
  1005. char *error;
  1006. switch (type) {
  1007. case FM10K_PCA_FAULT:
  1008. switch (fault->type) {
  1009. default:
  1010. error = "Unknown PCA error";
  1011. break;
  1012. FM10K_ERR_MSG(PCA_NO_FAULT);
  1013. FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
  1014. FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
  1015. FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
  1016. FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
  1017. FM10K_ERR_MSG(PCA_POISONED_TLP);
  1018. FM10K_ERR_MSG(PCA_TLP_ABORT);
  1019. }
  1020. break;
  1021. case FM10K_THI_FAULT:
  1022. switch (fault->type) {
  1023. default:
  1024. error = "Unknown THI error";
  1025. break;
  1026. FM10K_ERR_MSG(THI_NO_FAULT);
  1027. FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
  1028. }
  1029. break;
  1030. case FM10K_FUM_FAULT:
  1031. switch (fault->type) {
  1032. default:
  1033. error = "Unknown FUM error";
  1034. break;
  1035. FM10K_ERR_MSG(FUM_NO_FAULT);
  1036. FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
  1037. FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
  1038. FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
  1039. FM10K_ERR_MSG(FUM_RO_ERROR);
  1040. FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
  1041. FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
  1042. FM10K_ERR_MSG(FUM_INVALID_TYPE);
  1043. FM10K_ERR_MSG(FUM_INVALID_LENGTH);
  1044. FM10K_ERR_MSG(FUM_INVALID_BE);
  1045. FM10K_ERR_MSG(FUM_INVALID_ALIGN);
  1046. }
  1047. break;
  1048. default:
  1049. error = "Undocumented fault";
  1050. break;
  1051. }
  1052. dev_warn(&pdev->dev,
  1053. "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
  1054. error, fault->address, fault->specinfo,
  1055. PCI_SLOT(fault->func), PCI_FUNC(fault->func));
  1056. /* For VF faults, clear out the respective LPORT, reset the queue
  1057. * resources, and then reconnect to the mailbox. This allows the
  1058. * VF in question to resume behavior. For transient faults that are
  1059. * the result of non-malicious behavior this will log the fault and
  1060. * allow the VF to resume functionality. Obviously for malicious VFs
  1061. * they will be able to attempt malicious behavior again. In this
  1062. * case, the system administrator will need to step in and manually
  1063. * remove or disable the VF in question.
  1064. */
  1065. if (fault->func && iov_data) {
  1066. int vf = fault->func - 1;
  1067. struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
  1068. hw->iov.ops.reset_lport(hw, vf_info);
  1069. hw->iov.ops.reset_resources(hw, vf_info);
  1070. /* reset_lport disables the VF, so re-enable it */
  1071. hw->iov.ops.set_lport(hw, vf_info, vf,
  1072. FM10K_VF_FLAG_MULTI_CAPABLE);
  1073. /* reset_resources will disconnect from the mbx */
  1074. vf_info->mbx.ops.connect(hw, &vf_info->mbx);
  1075. }
  1076. }
  1077. static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
  1078. {
  1079. struct fm10k_hw *hw = &interface->hw;
  1080. struct fm10k_fault fault = { 0 };
  1081. int type, err;
  1082. for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
  1083. eicr;
  1084. eicr >>= 1, type += FM10K_FAULT_SIZE) {
  1085. /* only check if there is an error reported */
  1086. if (!(eicr & 0x1))
  1087. continue;
  1088. /* retrieve fault info */
  1089. err = hw->mac.ops.get_fault(hw, type, &fault);
  1090. if (err) {
  1091. dev_err(&interface->pdev->dev,
  1092. "error reading fault\n");
  1093. continue;
  1094. }
  1095. fm10k_handle_fault(interface, type, &fault);
  1096. }
  1097. }
  1098. static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
  1099. {
  1100. struct fm10k_hw *hw = &interface->hw;
  1101. const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
  1102. u32 maxholdq;
  1103. int q;
  1104. if (!(eicr & FM10K_EICR_MAXHOLDTIME))
  1105. return;
  1106. maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
  1107. if (maxholdq)
  1108. fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
  1109. for (q = 255;;) {
  1110. if (maxholdq & BIT(31)) {
  1111. if (q < FM10K_MAX_QUEUES_PF) {
  1112. interface->rx_overrun_pf++;
  1113. fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
  1114. } else {
  1115. interface->rx_overrun_vf++;
  1116. }
  1117. }
  1118. maxholdq *= 2;
  1119. if (!maxholdq)
  1120. q &= ~(32 - 1);
  1121. if (!q)
  1122. break;
  1123. if (q-- % 32)
  1124. continue;
  1125. maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
  1126. if (maxholdq)
  1127. fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
  1128. }
  1129. }
  1130. static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
  1131. {
  1132. struct fm10k_intfc *interface = data;
  1133. struct fm10k_hw *hw = &interface->hw;
  1134. struct fm10k_mbx_info *mbx = &hw->mbx;
  1135. u32 eicr;
  1136. s32 err = 0;
  1137. /* unmask any set bits related to this interrupt */
  1138. eicr = fm10k_read_reg(hw, FM10K_EICR);
  1139. fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
  1140. FM10K_EICR_SWITCHREADY |
  1141. FM10K_EICR_SWITCHNOTREADY));
  1142. /* report any faults found to the message log */
  1143. fm10k_report_fault(interface, eicr);
  1144. /* reset any queues disabled due to receiver overrun */
  1145. fm10k_reset_drop_on_empty(interface, eicr);
  1146. /* service mailboxes */
  1147. if (fm10k_mbx_trylock(interface)) {
  1148. err = mbx->ops.process(hw, mbx);
  1149. /* handle VFLRE events */
  1150. fm10k_iov_event(interface);
  1151. fm10k_mbx_unlock(interface);
  1152. }
  1153. if (err == FM10K_ERR_RESET_REQUESTED)
  1154. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1155. /* if switch toggled state we should reset GLORTs */
  1156. if (eicr & FM10K_EICR_SWITCHNOTREADY) {
  1157. /* force link down for at least 4 seconds */
  1158. interface->link_down_event = jiffies + (4 * HZ);
  1159. set_bit(__FM10K_LINK_DOWN, interface->state);
  1160. /* reset dglort_map back to no config */
  1161. hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
  1162. }
  1163. /* we should validate host state after interrupt event */
  1164. hw->mac.get_host_state = true;
  1165. /* validate host state, and handle VF mailboxes in the service task */
  1166. fm10k_service_event_schedule(interface);
  1167. /* re-enable mailbox interrupt and indicate 20us delay */
  1168. fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
  1169. (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
  1170. FM10K_ITR_ENABLE);
  1171. return IRQ_HANDLED;
  1172. }
  1173. void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
  1174. {
  1175. struct fm10k_hw *hw = &interface->hw;
  1176. struct msix_entry *entry;
  1177. int itr_reg;
  1178. /* no mailbox IRQ to free if MSI-X is not enabled */
  1179. if (!interface->msix_entries)
  1180. return;
  1181. entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1182. /* disconnect the mailbox */
  1183. hw->mbx.ops.disconnect(hw, &hw->mbx);
  1184. /* disable Mailbox cause */
  1185. if (hw->mac.type == fm10k_mac_pf) {
  1186. fm10k_write_reg(hw, FM10K_EIMR,
  1187. FM10K_EIMR_DISABLE(PCA_FAULT) |
  1188. FM10K_EIMR_DISABLE(FUM_FAULT) |
  1189. FM10K_EIMR_DISABLE(MAILBOX) |
  1190. FM10K_EIMR_DISABLE(SWITCHREADY) |
  1191. FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
  1192. FM10K_EIMR_DISABLE(SRAMERROR) |
  1193. FM10K_EIMR_DISABLE(VFLR) |
  1194. FM10K_EIMR_DISABLE(MAXHOLDTIME));
  1195. itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
  1196. } else {
  1197. itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR);
  1198. }
  1199. fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
  1200. free_irq(entry->vector, interface);
  1201. }
  1202. static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
  1203. struct fm10k_mbx_info *mbx)
  1204. {
  1205. bool vlan_override = hw->mac.vlan_override;
  1206. u16 default_vid = hw->mac.default_vid;
  1207. struct fm10k_intfc *interface;
  1208. s32 err;
  1209. err = fm10k_msg_mac_vlan_vf(hw, results, mbx);
  1210. if (err)
  1211. return err;
  1212. interface = container_of(hw, struct fm10k_intfc, hw);
  1213. /* MAC was changed so we need reset */
  1214. if (is_valid_ether_addr(hw->mac.perm_addr) &&
  1215. !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
  1216. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1217. /* VLAN override was changed, or default VLAN changed */
  1218. if ((vlan_override != hw->mac.vlan_override) ||
  1219. (default_vid != hw->mac.default_vid))
  1220. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1221. return 0;
  1222. }
  1223. /* generic error handler for mailbox issues */
  1224. static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
  1225. struct fm10k_mbx_info __always_unused *mbx)
  1226. {
  1227. struct fm10k_intfc *interface;
  1228. struct pci_dev *pdev;
  1229. interface = container_of(hw, struct fm10k_intfc, hw);
  1230. pdev = interface->pdev;
  1231. dev_err(&pdev->dev, "Unknown message ID %u\n",
  1232. **results & FM10K_TLV_ID_MASK);
  1233. return 0;
  1234. }
  1235. static const struct fm10k_msg_data vf_mbx_data[] = {
  1236. FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
  1237. FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
  1238. FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
  1239. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
  1240. };
  1241. static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
  1242. {
  1243. struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1244. struct net_device *dev = interface->netdev;
  1245. struct fm10k_hw *hw = &interface->hw;
  1246. int err;
  1247. /* Use timer0 for interrupt moderation on the mailbox */
  1248. u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
  1249. /* register mailbox handlers */
  1250. err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
  1251. if (err)
  1252. return err;
  1253. /* request the IRQ */
  1254. err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0,
  1255. dev->name, interface);
  1256. if (err) {
  1257. netif_err(interface, probe, dev,
  1258. "request_irq for msix_mbx failed: %d\n", err);
  1259. return err;
  1260. }
  1261. /* map all of the interrupt sources */
  1262. fm10k_write_reg(hw, FM10K_VFINT_MAP, itr);
  1263. /* enable interrupt */
  1264. fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE);
  1265. return 0;
  1266. }
  1267. static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
  1268. struct fm10k_mbx_info *mbx)
  1269. {
  1270. struct fm10k_intfc *interface;
  1271. u32 dglort_map = hw->mac.dglort_map;
  1272. s32 err;
  1273. interface = container_of(hw, struct fm10k_intfc, hw);
  1274. err = fm10k_msg_err_pf(hw, results, mbx);
  1275. if (!err && hw->swapi.status) {
  1276. /* force link down for a reasonable delay */
  1277. interface->link_down_event = jiffies + (2 * HZ);
  1278. set_bit(__FM10K_LINK_DOWN, interface->state);
  1279. /* reset dglort_map back to no config */
  1280. hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
  1281. fm10k_service_event_schedule(interface);
  1282. /* prevent overloading kernel message buffer */
  1283. if (interface->lport_map_failed)
  1284. return 0;
  1285. interface->lport_map_failed = true;
  1286. if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
  1287. dev_warn(&interface->pdev->dev,
  1288. "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
  1289. dev_warn(&interface->pdev->dev,
  1290. "request logical port map failed: %d\n",
  1291. hw->swapi.status);
  1292. return 0;
  1293. }
  1294. err = fm10k_msg_lport_map_pf(hw, results, mbx);
  1295. if (err)
  1296. return err;
  1297. interface->lport_map_failed = false;
  1298. /* we need to reset if port count was just updated */
  1299. if (dglort_map != hw->mac.dglort_map)
  1300. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1301. return 0;
  1302. }
  1303. static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
  1304. struct fm10k_mbx_info __always_unused *mbx)
  1305. {
  1306. struct fm10k_intfc *interface;
  1307. u16 glort, pvid;
  1308. u32 pvid_update;
  1309. s32 err;
  1310. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
  1311. &pvid_update);
  1312. if (err)
  1313. return err;
  1314. /* extract values from the pvid update */
  1315. glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
  1316. pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
  1317. /* if glort is not valid return error */
  1318. if (!fm10k_glort_valid_pf(hw, glort))
  1319. return FM10K_ERR_PARAM;
  1320. /* verify VLAN ID is valid */
  1321. if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
  1322. return FM10K_ERR_PARAM;
  1323. interface = container_of(hw, struct fm10k_intfc, hw);
  1324. /* check to see if this belongs to one of the VFs */
  1325. err = fm10k_iov_update_pvid(interface, glort, pvid);
  1326. if (!err)
  1327. return 0;
  1328. /* we need to reset if default VLAN was just updated */
  1329. if (pvid != hw->mac.default_vid)
  1330. set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
  1331. hw->mac.default_vid = pvid;
  1332. return 0;
  1333. }
  1334. static const struct fm10k_msg_data pf_mbx_data[] = {
  1335. FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
  1336. FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
  1337. FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
  1338. FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
  1339. FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
  1340. FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
  1341. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
  1342. };
  1343. static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
  1344. {
  1345. struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
  1346. struct net_device *dev = interface->netdev;
  1347. struct fm10k_hw *hw = &interface->hw;
  1348. int err;
  1349. /* Use timer0 for interrupt moderation on the mailbox */
  1350. u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
  1351. u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
  1352. /* register mailbox handlers */
  1353. err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
  1354. if (err)
  1355. return err;
  1356. /* request the IRQ */
  1357. err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
  1358. dev->name, interface);
  1359. if (err) {
  1360. netif_err(interface, probe, dev,
  1361. "request_irq for msix_mbx failed: %d\n", err);
  1362. return err;
  1363. }
  1364. /* Enable interrupts w/ no moderation for "other" interrupts */
  1365. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr);
  1366. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr);
  1367. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr);
  1368. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr);
  1369. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr);
  1370. /* Enable interrupts w/ moderation for mailbox */
  1371. fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr);
  1372. /* Enable individual interrupt causes */
  1373. fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
  1374. FM10K_EIMR_ENABLE(FUM_FAULT) |
  1375. FM10K_EIMR_ENABLE(MAILBOX) |
  1376. FM10K_EIMR_ENABLE(SWITCHREADY) |
  1377. FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
  1378. FM10K_EIMR_ENABLE(SRAMERROR) |
  1379. FM10K_EIMR_ENABLE(VFLR) |
  1380. FM10K_EIMR_ENABLE(MAXHOLDTIME));
  1381. /* enable interrupt */
  1382. fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
  1383. return 0;
  1384. }
  1385. int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
  1386. {
  1387. struct fm10k_hw *hw = &interface->hw;
  1388. int err;
  1389. /* enable Mailbox cause */
  1390. if (hw->mac.type == fm10k_mac_pf)
  1391. err = fm10k_mbx_request_irq_pf(interface);
  1392. else
  1393. err = fm10k_mbx_request_irq_vf(interface);
  1394. if (err)
  1395. return err;
  1396. /* connect mailbox */
  1397. err = hw->mbx.ops.connect(hw, &hw->mbx);
  1398. /* if the mailbox failed to connect, then free IRQ */
  1399. if (err)
  1400. fm10k_mbx_free_irq(interface);
  1401. return err;
  1402. }
  1403. /**
  1404. * fm10k_qv_free_irq - release interrupts associated with queue vectors
  1405. * @interface: board private structure
  1406. *
  1407. * Release all interrupts associated with this interface
  1408. **/
  1409. void fm10k_qv_free_irq(struct fm10k_intfc *interface)
  1410. {
  1411. int vector = interface->num_q_vectors;
  1412. struct fm10k_hw *hw = &interface->hw;
  1413. struct msix_entry *entry;
  1414. entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
  1415. while (vector) {
  1416. struct fm10k_q_vector *q_vector;
  1417. vector--;
  1418. entry--;
  1419. q_vector = interface->q_vector[vector];
  1420. if (!q_vector->tx.count && !q_vector->rx.count)
  1421. continue;
  1422. /* clear the affinity_mask in the IRQ descriptor */
  1423. irq_set_affinity_hint(entry->vector, NULL);
  1424. /* disable interrupts */
  1425. writel(FM10K_ITR_MASK_SET, q_vector->itr);
  1426. free_irq(entry->vector, q_vector);
  1427. }
  1428. }
  1429. /**
  1430. * fm10k_qv_request_irq - initialize interrupts for queue vectors
  1431. * @interface: board private structure
  1432. *
  1433. * Attempts to configure interrupts using the best available
  1434. * capabilities of the hardware and kernel.
  1435. **/
  1436. int fm10k_qv_request_irq(struct fm10k_intfc *interface)
  1437. {
  1438. struct net_device *dev = interface->netdev;
  1439. struct fm10k_hw *hw = &interface->hw;
  1440. struct msix_entry *entry;
  1441. unsigned int ri = 0, ti = 0;
  1442. int vector, err;
  1443. entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
  1444. for (vector = 0; vector < interface->num_q_vectors; vector++) {
  1445. struct fm10k_q_vector *q_vector = interface->q_vector[vector];
  1446. /* name the vector */
  1447. if (q_vector->tx.count && q_vector->rx.count) {
  1448. snprintf(q_vector->name, sizeof(q_vector->name),
  1449. "%s-TxRx-%u", dev->name, ri++);
  1450. ti++;
  1451. } else if (q_vector->rx.count) {
  1452. snprintf(q_vector->name, sizeof(q_vector->name),
  1453. "%s-rx-%u", dev->name, ri++);
  1454. } else if (q_vector->tx.count) {
  1455. snprintf(q_vector->name, sizeof(q_vector->name),
  1456. "%s-tx-%u", dev->name, ti++);
  1457. } else {
  1458. /* skip this unused q_vector */
  1459. continue;
  1460. }
  1461. /* Assign ITR register to q_vector */
  1462. q_vector->itr = (hw->mac.type == fm10k_mac_pf) ?
  1463. &interface->uc_addr[FM10K_ITR(entry->entry)] :
  1464. &interface->uc_addr[FM10K_VFITR(entry->entry)];
  1465. /* request the IRQ */
  1466. err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
  1467. q_vector->name, q_vector);
  1468. if (err) {
  1469. netif_err(interface, probe, dev,
  1470. "request_irq failed for MSIX interrupt Error: %d\n",
  1471. err);
  1472. goto err_out;
  1473. }
  1474. /* assign the mask for this irq */
  1475. irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
  1476. /* Enable q_vector */
  1477. writel(FM10K_ITR_ENABLE, q_vector->itr);
  1478. entry++;
  1479. }
  1480. return 0;
  1481. err_out:
  1482. /* wind through the ring freeing all entries and vectors */
  1483. while (vector) {
  1484. struct fm10k_q_vector *q_vector;
  1485. entry--;
  1486. vector--;
  1487. q_vector = interface->q_vector[vector];
  1488. if (!q_vector->tx.count && !q_vector->rx.count)
  1489. continue;
  1490. /* clear the affinity_mask in the IRQ descriptor */
  1491. irq_set_affinity_hint(entry->vector, NULL);
  1492. /* disable interrupts */
  1493. writel(FM10K_ITR_MASK_SET, q_vector->itr);
  1494. free_irq(entry->vector, q_vector);
  1495. }
  1496. return err;
  1497. }
  1498. void fm10k_up(struct fm10k_intfc *interface)
  1499. {
  1500. struct fm10k_hw *hw = &interface->hw;
  1501. /* Enable Tx/Rx DMA */
  1502. hw->mac.ops.start_hw(hw);
  1503. /* configure Tx descriptor rings */
  1504. fm10k_configure_tx(interface);
  1505. /* configure Rx descriptor rings */
  1506. fm10k_configure_rx(interface);
  1507. /* configure interrupts */
  1508. hw->mac.ops.update_int_moderator(hw);
  1509. /* enable statistics capture again */
  1510. clear_bit(__FM10K_UPDATING_STATS, interface->state);
  1511. /* clear down bit to indicate we are ready to go */
  1512. clear_bit(__FM10K_DOWN, interface->state);
  1513. /* enable polling cleanups */
  1514. fm10k_napi_enable_all(interface);
  1515. /* re-establish Rx filters */
  1516. fm10k_restore_rx_state(interface);
  1517. /* enable transmits */
  1518. netif_tx_start_all_queues(interface->netdev);
  1519. /* kick off the service timer now */
  1520. hw->mac.get_host_state = true;
  1521. mod_timer(&interface->service_timer, jiffies);
  1522. }
  1523. static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
  1524. {
  1525. struct fm10k_q_vector *q_vector;
  1526. int q_idx;
  1527. for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
  1528. q_vector = interface->q_vector[q_idx];
  1529. napi_disable(&q_vector->napi);
  1530. }
  1531. }
  1532. void fm10k_down(struct fm10k_intfc *interface)
  1533. {
  1534. struct net_device *netdev = interface->netdev;
  1535. struct fm10k_hw *hw = &interface->hw;
  1536. int err, i = 0, count = 0;
  1537. /* signal that we are down to the interrupt handler and service task */
  1538. if (test_and_set_bit(__FM10K_DOWN, interface->state))
  1539. return;
  1540. /* call carrier off first to avoid false dev_watchdog timeouts */
  1541. netif_carrier_off(netdev);
  1542. /* disable transmits */
  1543. netif_tx_stop_all_queues(netdev);
  1544. netif_tx_disable(netdev);
  1545. /* reset Rx filters */
  1546. fm10k_reset_rx_state(interface);
  1547. /* disable polling routines */
  1548. fm10k_napi_disable_all(interface);
  1549. /* capture stats one last time before stopping interface */
  1550. fm10k_update_stats(interface);
  1551. /* prevent updating statistics while we're down */
  1552. while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
  1553. usleep_range(1000, 2000);
  1554. /* skip waiting for TX DMA if we lost PCIe link */
  1555. if (FM10K_REMOVED(hw->hw_addr))
  1556. goto skip_tx_dma_drain;
  1557. /* In some rare circumstances it can take a while for Tx queues to
  1558. * quiesce and be fully disabled. Attempt to .stop_hw() first, and
  1559. * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
  1560. * until the Tx queues have emptied, or until a number of retries. If
  1561. * we fail to clear within the retry loop, we will issue a warning
  1562. * indicating that Tx DMA is probably hung. Note this means we call
  1563. * .stop_hw() twice but this shouldn't cause any problems.
  1564. */
  1565. err = hw->mac.ops.stop_hw(hw);
  1566. if (err != FM10K_ERR_REQUESTS_PENDING)
  1567. goto skip_tx_dma_drain;
  1568. #define TX_DMA_DRAIN_RETRIES 25
  1569. for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) {
  1570. usleep_range(10000, 20000);
  1571. /* start checking at the last ring to have pending Tx */
  1572. for (; i < interface->num_tx_queues; i++)
  1573. if (fm10k_get_tx_pending(interface->tx_ring[i], false))
  1574. break;
  1575. /* if all the queues are drained, we can break now */
  1576. if (i == interface->num_tx_queues)
  1577. break;
  1578. }
  1579. if (count >= TX_DMA_DRAIN_RETRIES)
  1580. dev_err(&interface->pdev->dev,
  1581. "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
  1582. count);
  1583. skip_tx_dma_drain:
  1584. /* Disable DMA engine for Tx/Rx */
  1585. err = hw->mac.ops.stop_hw(hw);
  1586. if (err == FM10K_ERR_REQUESTS_PENDING)
  1587. dev_err(&interface->pdev->dev,
  1588. "due to pending requests hw was not shut down gracefully\n");
  1589. else if (err)
  1590. dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
  1591. /* free any buffers still on the rings */
  1592. fm10k_clean_all_tx_rings(interface);
  1593. fm10k_clean_all_rx_rings(interface);
  1594. }
  1595. /**
  1596. * fm10k_sw_init - Initialize general software structures
  1597. * @interface: host interface private structure to initialize
  1598. * @ent: PCI device ID entry
  1599. *
  1600. * fm10k_sw_init initializes the interface private data structure.
  1601. * Fields are initialized based on PCI device information and
  1602. * OS network device settings (MTU size).
  1603. **/
  1604. static int fm10k_sw_init(struct fm10k_intfc *interface,
  1605. const struct pci_device_id *ent)
  1606. {
  1607. const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
  1608. struct fm10k_hw *hw = &interface->hw;
  1609. struct pci_dev *pdev = interface->pdev;
  1610. struct net_device *netdev = interface->netdev;
  1611. u32 rss_key[FM10K_RSSRK_SIZE];
  1612. unsigned int rss;
  1613. int err;
  1614. /* initialize back pointer */
  1615. hw->back = interface;
  1616. hw->hw_addr = interface->uc_addr;
  1617. /* PCI config space info */
  1618. hw->vendor_id = pdev->vendor;
  1619. hw->device_id = pdev->device;
  1620. hw->revision_id = pdev->revision;
  1621. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  1622. hw->subsystem_device_id = pdev->subsystem_device;
  1623. /* Setup hw api */
  1624. memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
  1625. hw->mac.type = fi->mac;
  1626. /* Setup IOV handlers */
  1627. if (fi->iov_ops)
  1628. memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops));
  1629. /* Set common capability flags and settings */
  1630. rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
  1631. interface->ring_feature[RING_F_RSS].limit = rss;
  1632. fi->get_invariants(hw);
  1633. /* pick up the PCIe bus settings for reporting later */
  1634. if (hw->mac.ops.get_bus_info)
  1635. hw->mac.ops.get_bus_info(hw);
  1636. /* limit the usable DMA range */
  1637. if (hw->mac.ops.set_dma_mask)
  1638. hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
  1639. /* update netdev with DMA restrictions */
  1640. if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
  1641. netdev->features |= NETIF_F_HIGHDMA;
  1642. netdev->vlan_features |= NETIF_F_HIGHDMA;
  1643. }
  1644. /* reset and initialize the hardware so it is in a known state */
  1645. err = hw->mac.ops.reset_hw(hw);
  1646. if (err) {
  1647. dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
  1648. return err;
  1649. }
  1650. err = hw->mac.ops.init_hw(hw);
  1651. if (err) {
  1652. dev_err(&pdev->dev, "init_hw failed: %d\n", err);
  1653. return err;
  1654. }
  1655. /* initialize hardware statistics */
  1656. hw->mac.ops.update_hw_stats(hw, &interface->stats);
  1657. /* Set upper limit on IOV VFs that can be allocated */
  1658. pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs);
  1659. /* Start with random Ethernet address */
  1660. eth_random_addr(hw->mac.addr);
  1661. /* Initialize MAC address from hardware */
  1662. err = hw->mac.ops.read_mac_addr(hw);
  1663. if (err) {
  1664. dev_warn(&pdev->dev,
  1665. "Failed to obtain MAC address defaulting to random\n");
  1666. /* tag address assignment as random */
  1667. netdev->addr_assign_type |= NET_ADDR_RANDOM;
  1668. }
  1669. ether_addr_copy(netdev->dev_addr, hw->mac.addr);
  1670. ether_addr_copy(netdev->perm_addr, hw->mac.addr);
  1671. if (!is_valid_ether_addr(netdev->perm_addr)) {
  1672. dev_err(&pdev->dev, "Invalid MAC Address\n");
  1673. return -EIO;
  1674. }
  1675. /* initialize DCBNL interface */
  1676. fm10k_dcbnl_set_ops(netdev);
  1677. /* set default ring sizes */
  1678. interface->tx_ring_count = FM10K_DEFAULT_TXD;
  1679. interface->rx_ring_count = FM10K_DEFAULT_RXD;
  1680. /* set default interrupt moderation */
  1681. interface->tx_itr = FM10K_TX_ITR_DEFAULT;
  1682. interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
  1683. /* initialize udp port lists */
  1684. INIT_LIST_HEAD(&interface->vxlan_port);
  1685. INIT_LIST_HEAD(&interface->geneve_port);
  1686. /* Initialize the MAC/VLAN queue */
  1687. INIT_LIST_HEAD(&interface->macvlan_requests);
  1688. netdev_rss_key_fill(rss_key, sizeof(rss_key));
  1689. memcpy(interface->rssrk, rss_key, sizeof(rss_key));
  1690. /* Initialize the mailbox lock */
  1691. spin_lock_init(&interface->mbx_lock);
  1692. spin_lock_init(&interface->macvlan_lock);
  1693. /* Start off interface as being down */
  1694. set_bit(__FM10K_DOWN, interface->state);
  1695. set_bit(__FM10K_UPDATING_STATS, interface->state);
  1696. return 0;
  1697. }
  1698. /**
  1699. * fm10k_probe - Device Initialization Routine
  1700. * @pdev: PCI device information struct
  1701. * @ent: entry in fm10k_pci_tbl
  1702. *
  1703. * Returns 0 on success, negative on failure
  1704. *
  1705. * fm10k_probe initializes an interface identified by a pci_dev structure.
  1706. * The OS initialization, configuring of the interface private structure,
  1707. * and a hardware reset occur.
  1708. **/
  1709. static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1710. {
  1711. struct net_device *netdev;
  1712. struct fm10k_intfc *interface;
  1713. int err;
  1714. if (pdev->error_state != pci_channel_io_normal) {
  1715. dev_err(&pdev->dev,
  1716. "PCI device still in an error state. Unable to load...\n");
  1717. return -EIO;
  1718. }
  1719. err = pci_enable_device_mem(pdev);
  1720. if (err) {
  1721. dev_err(&pdev->dev,
  1722. "PCI enable device failed: %d\n", err);
  1723. return err;
  1724. }
  1725. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
  1726. if (err)
  1727. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  1728. if (err) {
  1729. dev_err(&pdev->dev,
  1730. "DMA configuration failed: %d\n", err);
  1731. goto err_dma;
  1732. }
  1733. err = pci_request_mem_regions(pdev, fm10k_driver_name);
  1734. if (err) {
  1735. dev_err(&pdev->dev,
  1736. "pci_request_selected_regions failed: %d\n", err);
  1737. goto err_pci_reg;
  1738. }
  1739. pci_enable_pcie_error_reporting(pdev);
  1740. pci_set_master(pdev);
  1741. pci_save_state(pdev);
  1742. netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]);
  1743. if (!netdev) {
  1744. err = -ENOMEM;
  1745. goto err_alloc_netdev;
  1746. }
  1747. SET_NETDEV_DEV(netdev, &pdev->dev);
  1748. interface = netdev_priv(netdev);
  1749. pci_set_drvdata(pdev, interface);
  1750. interface->netdev = netdev;
  1751. interface->pdev = pdev;
  1752. interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
  1753. FM10K_UC_ADDR_SIZE);
  1754. if (!interface->uc_addr) {
  1755. err = -EIO;
  1756. goto err_ioremap;
  1757. }
  1758. err = fm10k_sw_init(interface, ent);
  1759. if (err)
  1760. goto err_sw_init;
  1761. /* enable debugfs support */
  1762. fm10k_dbg_intfc_init(interface);
  1763. err = fm10k_init_queueing_scheme(interface);
  1764. if (err)
  1765. goto err_sw_init;
  1766. /* the mbx interrupt might attempt to schedule the service task, so we
  1767. * must ensure it is disabled since we haven't yet requested the timer
  1768. * or work item.
  1769. */
  1770. set_bit(__FM10K_SERVICE_DISABLE, interface->state);
  1771. err = fm10k_mbx_request_irq(interface);
  1772. if (err)
  1773. goto err_mbx_interrupt;
  1774. /* final check of hardware state before registering the interface */
  1775. err = fm10k_hw_ready(interface);
  1776. if (err)
  1777. goto err_register;
  1778. err = register_netdev(netdev);
  1779. if (err)
  1780. goto err_register;
  1781. /* carrier off reporting is important to ethtool even BEFORE open */
  1782. netif_carrier_off(netdev);
  1783. /* stop all the transmit queues from transmitting until link is up */
  1784. netif_tx_stop_all_queues(netdev);
  1785. /* Initialize service timer and service task late in order to avoid
  1786. * cleanup issues.
  1787. */
  1788. timer_setup(&interface->service_timer, fm10k_service_timer, 0);
  1789. INIT_WORK(&interface->service_task, fm10k_service_task);
  1790. /* Setup the MAC/VLAN queue */
  1791. INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task);
  1792. /* kick off service timer now, even when interface is down */
  1793. mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
  1794. /* print warning for non-optimal configurations */
  1795. pcie_print_link_status(interface->pdev);
  1796. /* report MAC address for logging */
  1797. dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
  1798. /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
  1799. fm10k_iov_configure(pdev, 0);
  1800. /* clear the service task disable bit and kick off service task */
  1801. clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
  1802. fm10k_service_event_schedule(interface);
  1803. return 0;
  1804. err_register:
  1805. fm10k_mbx_free_irq(interface);
  1806. err_mbx_interrupt:
  1807. fm10k_clear_queueing_scheme(interface);
  1808. err_sw_init:
  1809. if (interface->sw_addr)
  1810. iounmap(interface->sw_addr);
  1811. iounmap(interface->uc_addr);
  1812. err_ioremap:
  1813. free_netdev(netdev);
  1814. err_alloc_netdev:
  1815. pci_release_mem_regions(pdev);
  1816. err_pci_reg:
  1817. err_dma:
  1818. pci_disable_device(pdev);
  1819. return err;
  1820. }
  1821. /**
  1822. * fm10k_remove - Device Removal Routine
  1823. * @pdev: PCI device information struct
  1824. *
  1825. * fm10k_remove is called by the PCI subsystem to alert the driver
  1826. * that it should release a PCI device. The could be caused by a
  1827. * Hot-Plug event, or because the driver is going to be removed from
  1828. * memory.
  1829. **/
  1830. static void fm10k_remove(struct pci_dev *pdev)
  1831. {
  1832. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  1833. struct net_device *netdev = interface->netdev;
  1834. del_timer_sync(&interface->service_timer);
  1835. fm10k_stop_service_event(interface);
  1836. fm10k_stop_macvlan_task(interface);
  1837. /* Remove all pending MAC/VLAN requests */
  1838. fm10k_clear_macvlan_queue(interface, interface->glort, true);
  1839. /* free netdev, this may bounce the interrupts due to setup_tc */
  1840. if (netdev->reg_state == NETREG_REGISTERED)
  1841. unregister_netdev(netdev);
  1842. /* release VFs */
  1843. fm10k_iov_disable(pdev);
  1844. /* disable mailbox interrupt */
  1845. fm10k_mbx_free_irq(interface);
  1846. /* free interrupts */
  1847. fm10k_clear_queueing_scheme(interface);
  1848. /* remove any debugfs interfaces */
  1849. fm10k_dbg_intfc_exit(interface);
  1850. if (interface->sw_addr)
  1851. iounmap(interface->sw_addr);
  1852. iounmap(interface->uc_addr);
  1853. free_netdev(netdev);
  1854. pci_release_mem_regions(pdev);
  1855. pci_disable_pcie_error_reporting(pdev);
  1856. pci_disable_device(pdev);
  1857. }
  1858. static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
  1859. {
  1860. /* the watchdog task reads from registers, which might appear like
  1861. * a surprise remove if the PCIe device is disabled while we're
  1862. * stopped. We stop the watchdog task until after we resume software
  1863. * activity.
  1864. *
  1865. * Note that the MAC/VLAN task will be stopped as part of preparing
  1866. * for reset so we don't need to handle it here.
  1867. */
  1868. fm10k_stop_service_event(interface);
  1869. if (fm10k_prepare_for_reset(interface))
  1870. set_bit(__FM10K_RESET_SUSPENDED, interface->state);
  1871. }
  1872. static int fm10k_handle_resume(struct fm10k_intfc *interface)
  1873. {
  1874. struct fm10k_hw *hw = &interface->hw;
  1875. int err;
  1876. /* Even if we didn't properly prepare for reset in
  1877. * fm10k_prepare_suspend, we'll attempt to resume anyways.
  1878. */
  1879. if (!test_and_clear_bit(__FM10K_RESET_SUSPENDED, interface->state))
  1880. dev_warn(&interface->pdev->dev,
  1881. "Device was shut down as part of suspend... Attempting to recover\n");
  1882. /* reset statistics starting values */
  1883. hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
  1884. err = fm10k_handle_reset(interface);
  1885. if (err)
  1886. return err;
  1887. /* assume host is not ready, to prevent race with watchdog in case we
  1888. * actually don't have connection to the switch
  1889. */
  1890. interface->host_ready = false;
  1891. fm10k_watchdog_host_not_ready(interface);
  1892. /* force link to stay down for a second to prevent link flutter */
  1893. interface->link_down_event = jiffies + (HZ);
  1894. set_bit(__FM10K_LINK_DOWN, interface->state);
  1895. /* restart the service task */
  1896. fm10k_start_service_event(interface);
  1897. /* Restart the MAC/VLAN request queue in-case of outstanding events */
  1898. fm10k_macvlan_schedule(interface);
  1899. return err;
  1900. }
  1901. /**
  1902. * fm10k_resume - Generic PM resume hook
  1903. * @dev: generic device structure
  1904. *
  1905. * Generic PM hook used when waking the device from a low power state after
  1906. * suspend or hibernation. This function does not need to handle lower PCIe
  1907. * device state as the stack takes care of that for us.
  1908. **/
  1909. static int __maybe_unused fm10k_resume(struct device *dev)
  1910. {
  1911. struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
  1912. struct net_device *netdev = interface->netdev;
  1913. struct fm10k_hw *hw = &interface->hw;
  1914. int err;
  1915. /* refresh hw_addr in case it was dropped */
  1916. hw->hw_addr = interface->uc_addr;
  1917. err = fm10k_handle_resume(interface);
  1918. if (err)
  1919. return err;
  1920. netif_device_attach(netdev);
  1921. return 0;
  1922. }
  1923. /**
  1924. * fm10k_suspend - Generic PM suspend hook
  1925. * @dev: generic device structure
  1926. *
  1927. * Generic PM hook used when setting the device into a low power state for
  1928. * system suspend or hibernation. This function does not need to handle lower
  1929. * PCIe device state as the stack takes care of that for us.
  1930. **/
  1931. static int __maybe_unused fm10k_suspend(struct device *dev)
  1932. {
  1933. struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
  1934. struct net_device *netdev = interface->netdev;
  1935. netif_device_detach(netdev);
  1936. fm10k_prepare_suspend(interface);
  1937. return 0;
  1938. }
  1939. /**
  1940. * fm10k_io_error_detected - called when PCI error is detected
  1941. * @pdev: Pointer to PCI device
  1942. * @state: The current pci connection state
  1943. *
  1944. * This function is called after a PCI bus error affecting
  1945. * this device has been detected.
  1946. */
  1947. static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
  1948. pci_channel_state_t state)
  1949. {
  1950. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  1951. struct net_device *netdev = interface->netdev;
  1952. netif_device_detach(netdev);
  1953. if (state == pci_channel_io_perm_failure)
  1954. return PCI_ERS_RESULT_DISCONNECT;
  1955. fm10k_prepare_suspend(interface);
  1956. /* Request a slot reset. */
  1957. return PCI_ERS_RESULT_NEED_RESET;
  1958. }
  1959. /**
  1960. * fm10k_io_slot_reset - called after the pci bus has been reset.
  1961. * @pdev: Pointer to PCI device
  1962. *
  1963. * Restart the card from scratch, as if from a cold-boot.
  1964. */
  1965. static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
  1966. {
  1967. pci_ers_result_t result;
  1968. if (pci_reenable_device(pdev)) {
  1969. dev_err(&pdev->dev,
  1970. "Cannot re-enable PCI device after reset.\n");
  1971. result = PCI_ERS_RESULT_DISCONNECT;
  1972. } else {
  1973. pci_set_master(pdev);
  1974. pci_restore_state(pdev);
  1975. /* After second error pci->state_saved is false, this
  1976. * resets it so EEH doesn't break.
  1977. */
  1978. pci_save_state(pdev);
  1979. pci_wake_from_d3(pdev, false);
  1980. result = PCI_ERS_RESULT_RECOVERED;
  1981. }
  1982. pci_cleanup_aer_uncorrect_error_status(pdev);
  1983. return result;
  1984. }
  1985. /**
  1986. * fm10k_io_resume - called when traffic can start flowing again.
  1987. * @pdev: Pointer to PCI device
  1988. *
  1989. * This callback is called when the error recovery driver tells us that
  1990. * its OK to resume normal operation.
  1991. */
  1992. static void fm10k_io_resume(struct pci_dev *pdev)
  1993. {
  1994. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  1995. struct net_device *netdev = interface->netdev;
  1996. int err;
  1997. err = fm10k_handle_resume(interface);
  1998. if (err)
  1999. dev_warn(&pdev->dev,
  2000. "%s failed: %d\n", __func__, err);
  2001. else
  2002. netif_device_attach(netdev);
  2003. }
  2004. /**
  2005. * fm10k_io_reset_prepare - called when PCI function is about to be reset
  2006. * @pdev: Pointer to PCI device
  2007. *
  2008. * This callback is called when the PCI function is about to be reset,
  2009. * allowing the device driver to prepare for it.
  2010. */
  2011. static void fm10k_io_reset_prepare(struct pci_dev *pdev)
  2012. {
  2013. /* warn incase we have any active VF devices */
  2014. if (pci_num_vf(pdev))
  2015. dev_warn(&pdev->dev,
  2016. "PCIe FLR may cause issues for any active VF devices\n");
  2017. fm10k_prepare_suspend(pci_get_drvdata(pdev));
  2018. }
  2019. /**
  2020. * fm10k_io_reset_done - called when PCI function has finished resetting
  2021. * @pdev: Pointer to PCI device
  2022. *
  2023. * This callback is called just after the PCI function is reset, such as via
  2024. * /sys/class/net/<enpX>/device/reset or similar.
  2025. */
  2026. static void fm10k_io_reset_done(struct pci_dev *pdev)
  2027. {
  2028. struct fm10k_intfc *interface = pci_get_drvdata(pdev);
  2029. int err = fm10k_handle_resume(interface);
  2030. if (err) {
  2031. dev_warn(&pdev->dev,
  2032. "%s failed: %d\n", __func__, err);
  2033. netif_device_detach(interface->netdev);
  2034. }
  2035. }
  2036. static const struct pci_error_handlers fm10k_err_handler = {
  2037. .error_detected = fm10k_io_error_detected,
  2038. .slot_reset = fm10k_io_slot_reset,
  2039. .resume = fm10k_io_resume,
  2040. .reset_prepare = fm10k_io_reset_prepare,
  2041. .reset_done = fm10k_io_reset_done,
  2042. };
  2043. static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
  2044. static struct pci_driver fm10k_driver = {
  2045. .name = fm10k_driver_name,
  2046. .id_table = fm10k_pci_tbl,
  2047. .probe = fm10k_probe,
  2048. .remove = fm10k_remove,
  2049. .driver = {
  2050. .pm = &fm10k_pm_ops,
  2051. },
  2052. .sriov_configure = fm10k_iov_configure,
  2053. .err_handler = &fm10k_err_handler
  2054. };
  2055. /**
  2056. * fm10k_register_pci_driver - register driver interface
  2057. *
  2058. * This function is called on module load in order to register the driver.
  2059. **/
  2060. int fm10k_register_pci_driver(void)
  2061. {
  2062. return pci_register_driver(&fm10k_driver);
  2063. }
  2064. /**
  2065. * fm10k_unregister_pci_driver - unregister driver interface
  2066. *
  2067. * This function is called on module unload in order to remove the driver.
  2068. **/
  2069. void fm10k_unregister_pci_driver(void)
  2070. {
  2071. pci_unregister_driver(&fm10k_driver);
  2072. }