ixgb_main.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369
  1. /*******************************************************************************
  2. Intel PRO/10GbE Linux driver
  3. Copyright(c) 1999 - 2008 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22. #include <linux/prefetch.h>
  23. #include "ixgb.h"
  24. char ixgb_driver_name[] = "ixgb";
  25. static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
  26. #define DRIVERNAPI "-NAPI"
  27. #define DRV_VERSION "1.0.135-k2" DRIVERNAPI
  28. const char ixgb_driver_version[] = DRV_VERSION;
  29. static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
  30. #define IXGB_CB_LENGTH 256
  31. static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
  32. module_param(copybreak, uint, 0644);
  33. MODULE_PARM_DESC(copybreak,
  34. "Maximum size of packet that is copied to a new buffer on receive");
  35. /* ixgb_pci_tbl - PCI Device ID Table
  36. *
  37. * Wildcard entries (PCI_ANY_ID) should come last
  38. * Last entry must be all 0s
  39. *
  40. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  41. * Class, Class Mask, private data (not used) }
  42. */
  43. static const struct pci_device_id ixgb_pci_tbl[] = {
  44. {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
  45. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  46. {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
  47. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  48. {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
  49. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  50. {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
  51. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  52. /* required last entry */
  53. {0,}
  54. };
  55. MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
  56. /* Local Function Prototypes */
  57. static int ixgb_init_module(void);
  58. static void ixgb_exit_module(void);
  59. static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  60. static void ixgb_remove(struct pci_dev *pdev);
  61. static int ixgb_sw_init(struct ixgb_adapter *adapter);
  62. static int ixgb_open(struct net_device *netdev);
  63. static int ixgb_close(struct net_device *netdev);
  64. static void ixgb_configure_tx(struct ixgb_adapter *adapter);
  65. static void ixgb_configure_rx(struct ixgb_adapter *adapter);
  66. static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
  67. static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
  68. static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
  69. static void ixgb_set_multi(struct net_device *netdev);
  70. static void ixgb_watchdog(unsigned long data);
  71. static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
  72. struct net_device *netdev);
  73. static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
  74. static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
  75. static int ixgb_set_mac(struct net_device *netdev, void *p);
  76. static irqreturn_t ixgb_intr(int irq, void *data);
  77. static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
  78. static int ixgb_clean(struct napi_struct *, int);
  79. static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
  80. static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
  81. static void ixgb_tx_timeout(struct net_device *dev);
  82. static void ixgb_tx_timeout_task(struct work_struct *work);
  83. static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
  84. static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
  85. static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
  86. __be16 proto, u16 vid);
  87. static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
  88. __be16 proto, u16 vid);
  89. static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
  90. #ifdef CONFIG_NET_POLL_CONTROLLER
  91. /* for netdump / net console */
  92. static void ixgb_netpoll(struct net_device *dev);
  93. #endif
  94. static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
  95. enum pci_channel_state state);
  96. static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
  97. static void ixgb_io_resume (struct pci_dev *pdev);
  98. static const struct pci_error_handlers ixgb_err_handler = {
  99. .error_detected = ixgb_io_error_detected,
  100. .slot_reset = ixgb_io_slot_reset,
  101. .resume = ixgb_io_resume,
  102. };
  103. static struct pci_driver ixgb_driver = {
  104. .name = ixgb_driver_name,
  105. .id_table = ixgb_pci_tbl,
  106. .probe = ixgb_probe,
  107. .remove = ixgb_remove,
  108. .err_handler = &ixgb_err_handler
  109. };
  110. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  111. MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
  112. MODULE_LICENSE("GPL");
  113. MODULE_VERSION(DRV_VERSION);
  114. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  115. static int debug = -1;
  116. module_param(debug, int, 0);
  117. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  118. /**
  119. * ixgb_init_module - Driver Registration Routine
  120. *
  121. * ixgb_init_module is the first routine called when the driver is
  122. * loaded. All it does is register with the PCI subsystem.
  123. **/
  124. static int __init
  125. ixgb_init_module(void)
  126. {
  127. pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
  128. pr_info("%s\n", ixgb_copyright);
  129. return pci_register_driver(&ixgb_driver);
  130. }
  131. module_init(ixgb_init_module);
  132. /**
  133. * ixgb_exit_module - Driver Exit Cleanup Routine
  134. *
  135. * ixgb_exit_module is called just before the driver is removed
  136. * from memory.
  137. **/
  138. static void __exit
  139. ixgb_exit_module(void)
  140. {
  141. pci_unregister_driver(&ixgb_driver);
  142. }
  143. module_exit(ixgb_exit_module);
  144. /**
  145. * ixgb_irq_disable - Mask off interrupt generation on the NIC
  146. * @adapter: board private structure
  147. **/
  148. static void
  149. ixgb_irq_disable(struct ixgb_adapter *adapter)
  150. {
  151. IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
  152. IXGB_WRITE_FLUSH(&adapter->hw);
  153. synchronize_irq(adapter->pdev->irq);
  154. }
  155. /**
  156. * ixgb_irq_enable - Enable default interrupt generation settings
  157. * @adapter: board private structure
  158. **/
  159. static void
  160. ixgb_irq_enable(struct ixgb_adapter *adapter)
  161. {
  162. u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
  163. IXGB_INT_TXDW | IXGB_INT_LSC;
  164. if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
  165. val |= IXGB_INT_GPI0;
  166. IXGB_WRITE_REG(&adapter->hw, IMS, val);
  167. IXGB_WRITE_FLUSH(&adapter->hw);
  168. }
  169. int
  170. ixgb_up(struct ixgb_adapter *adapter)
  171. {
  172. struct net_device *netdev = adapter->netdev;
  173. int err, irq_flags = IRQF_SHARED;
  174. int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  175. struct ixgb_hw *hw = &adapter->hw;
  176. /* hardware has been reset, we need to reload some things */
  177. ixgb_rar_set(hw, netdev->dev_addr, 0);
  178. ixgb_set_multi(netdev);
  179. ixgb_restore_vlan(adapter);
  180. ixgb_configure_tx(adapter);
  181. ixgb_setup_rctl(adapter);
  182. ixgb_configure_rx(adapter);
  183. ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
  184. /* disable interrupts and get the hardware into a known state */
  185. IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
  186. /* only enable MSI if bus is in PCI-X mode */
  187. if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
  188. err = pci_enable_msi(adapter->pdev);
  189. if (!err) {
  190. adapter->have_msi = true;
  191. irq_flags = 0;
  192. }
  193. /* proceed to try to request regular interrupt */
  194. }
  195. err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
  196. netdev->name, netdev);
  197. if (err) {
  198. if (adapter->have_msi)
  199. pci_disable_msi(adapter->pdev);
  200. netif_err(adapter, probe, adapter->netdev,
  201. "Unable to allocate interrupt Error: %d\n", err);
  202. return err;
  203. }
  204. if ((hw->max_frame_size != max_frame) ||
  205. (hw->max_frame_size !=
  206. (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
  207. hw->max_frame_size = max_frame;
  208. IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
  209. if (hw->max_frame_size >
  210. IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
  211. u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
  212. if (!(ctrl0 & IXGB_CTRL0_JFE)) {
  213. ctrl0 |= IXGB_CTRL0_JFE;
  214. IXGB_WRITE_REG(hw, CTRL0, ctrl0);
  215. }
  216. }
  217. }
  218. clear_bit(__IXGB_DOWN, &adapter->flags);
  219. napi_enable(&adapter->napi);
  220. ixgb_irq_enable(adapter);
  221. netif_wake_queue(netdev);
  222. mod_timer(&adapter->watchdog_timer, jiffies);
  223. return 0;
  224. }
  225. void
  226. ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
  227. {
  228. struct net_device *netdev = adapter->netdev;
  229. /* prevent the interrupt handler from restarting watchdog */
  230. set_bit(__IXGB_DOWN, &adapter->flags);
  231. netif_carrier_off(netdev);
  232. napi_disable(&adapter->napi);
  233. /* waiting for NAPI to complete can re-enable interrupts */
  234. ixgb_irq_disable(adapter);
  235. free_irq(adapter->pdev->irq, netdev);
  236. if (adapter->have_msi)
  237. pci_disable_msi(adapter->pdev);
  238. if (kill_watchdog)
  239. del_timer_sync(&adapter->watchdog_timer);
  240. adapter->link_speed = 0;
  241. adapter->link_duplex = 0;
  242. netif_stop_queue(netdev);
  243. ixgb_reset(adapter);
  244. ixgb_clean_tx_ring(adapter);
  245. ixgb_clean_rx_ring(adapter);
  246. }
  247. void
  248. ixgb_reset(struct ixgb_adapter *adapter)
  249. {
  250. struct ixgb_hw *hw = &adapter->hw;
  251. ixgb_adapter_stop(hw);
  252. if (!ixgb_init_hw(hw))
  253. netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
  254. /* restore frame size information */
  255. IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
  256. if (hw->max_frame_size >
  257. IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
  258. u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
  259. if (!(ctrl0 & IXGB_CTRL0_JFE)) {
  260. ctrl0 |= IXGB_CTRL0_JFE;
  261. IXGB_WRITE_REG(hw, CTRL0, ctrl0);
  262. }
  263. }
  264. }
  265. static netdev_features_t
  266. ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
  267. {
  268. /*
  269. * Tx VLAN insertion does not work per HW design when Rx stripping is
  270. * disabled.
  271. */
  272. if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
  273. features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  274. return features;
  275. }
  276. static int
  277. ixgb_set_features(struct net_device *netdev, netdev_features_t features)
  278. {
  279. struct ixgb_adapter *adapter = netdev_priv(netdev);
  280. netdev_features_t changed = features ^ netdev->features;
  281. if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
  282. return 0;
  283. adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
  284. if (netif_running(netdev)) {
  285. ixgb_down(adapter, true);
  286. ixgb_up(adapter);
  287. ixgb_set_speed_duplex(netdev);
  288. } else
  289. ixgb_reset(adapter);
  290. return 0;
  291. }
  292. static const struct net_device_ops ixgb_netdev_ops = {
  293. .ndo_open = ixgb_open,
  294. .ndo_stop = ixgb_close,
  295. .ndo_start_xmit = ixgb_xmit_frame,
  296. .ndo_get_stats = ixgb_get_stats,
  297. .ndo_set_rx_mode = ixgb_set_multi,
  298. .ndo_validate_addr = eth_validate_addr,
  299. .ndo_set_mac_address = ixgb_set_mac,
  300. .ndo_change_mtu = ixgb_change_mtu,
  301. .ndo_tx_timeout = ixgb_tx_timeout,
  302. .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
  303. .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
  304. #ifdef CONFIG_NET_POLL_CONTROLLER
  305. .ndo_poll_controller = ixgb_netpoll,
  306. #endif
  307. .ndo_fix_features = ixgb_fix_features,
  308. .ndo_set_features = ixgb_set_features,
  309. };
  310. /**
  311. * ixgb_probe - Device Initialization Routine
  312. * @pdev: PCI device information struct
  313. * @ent: entry in ixgb_pci_tbl
  314. *
  315. * Returns 0 on success, negative on failure
  316. *
  317. * ixgb_probe initializes an adapter identified by a pci_dev structure.
  318. * The OS initialization, configuring of the adapter private structure,
  319. * and a hardware reset occur.
  320. **/
  321. static int
  322. ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  323. {
  324. struct net_device *netdev = NULL;
  325. struct ixgb_adapter *adapter;
  326. static int cards_found = 0;
  327. int pci_using_dac;
  328. int i;
  329. int err;
  330. err = pci_enable_device(pdev);
  331. if (err)
  332. return err;
  333. pci_using_dac = 0;
  334. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  335. if (!err) {
  336. pci_using_dac = 1;
  337. } else {
  338. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  339. if (err) {
  340. pr_err("No usable DMA configuration, aborting\n");
  341. goto err_dma_mask;
  342. }
  343. }
  344. err = pci_request_regions(pdev, ixgb_driver_name);
  345. if (err)
  346. goto err_request_regions;
  347. pci_set_master(pdev);
  348. netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
  349. if (!netdev) {
  350. err = -ENOMEM;
  351. goto err_alloc_etherdev;
  352. }
  353. SET_NETDEV_DEV(netdev, &pdev->dev);
  354. pci_set_drvdata(pdev, netdev);
  355. adapter = netdev_priv(netdev);
  356. adapter->netdev = netdev;
  357. adapter->pdev = pdev;
  358. adapter->hw.back = adapter;
  359. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  360. adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
  361. if (!adapter->hw.hw_addr) {
  362. err = -EIO;
  363. goto err_ioremap;
  364. }
  365. for (i = BAR_1; i <= BAR_5; i++) {
  366. if (pci_resource_len(pdev, i) == 0)
  367. continue;
  368. if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  369. adapter->hw.io_base = pci_resource_start(pdev, i);
  370. break;
  371. }
  372. }
  373. netdev->netdev_ops = &ixgb_netdev_ops;
  374. ixgb_set_ethtool_ops(netdev);
  375. netdev->watchdog_timeo = 5 * HZ;
  376. netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
  377. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  378. adapter->bd_number = cards_found;
  379. adapter->link_speed = 0;
  380. adapter->link_duplex = 0;
  381. /* setup the private structure */
  382. err = ixgb_sw_init(adapter);
  383. if (err)
  384. goto err_sw_init;
  385. netdev->hw_features = NETIF_F_SG |
  386. NETIF_F_TSO |
  387. NETIF_F_HW_CSUM |
  388. NETIF_F_HW_VLAN_CTAG_TX |
  389. NETIF_F_HW_VLAN_CTAG_RX;
  390. netdev->features = netdev->hw_features |
  391. NETIF_F_HW_VLAN_CTAG_FILTER;
  392. netdev->hw_features |= NETIF_F_RXCSUM;
  393. if (pci_using_dac) {
  394. netdev->features |= NETIF_F_HIGHDMA;
  395. netdev->vlan_features |= NETIF_F_HIGHDMA;
  396. }
  397. /* make sure the EEPROM is good */
  398. if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
  399. netif_err(adapter, probe, adapter->netdev,
  400. "The EEPROM Checksum Is Not Valid\n");
  401. err = -EIO;
  402. goto err_eeprom;
  403. }
  404. ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
  405. if (!is_valid_ether_addr(netdev->dev_addr)) {
  406. netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
  407. err = -EIO;
  408. goto err_eeprom;
  409. }
  410. adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
  411. init_timer(&adapter->watchdog_timer);
  412. adapter->watchdog_timer.function = ixgb_watchdog;
  413. adapter->watchdog_timer.data = (unsigned long)adapter;
  414. INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
  415. strcpy(netdev->name, "eth%d");
  416. err = register_netdev(netdev);
  417. if (err)
  418. goto err_register;
  419. /* carrier off reporting is important to ethtool even BEFORE open */
  420. netif_carrier_off(netdev);
  421. netif_info(adapter, probe, adapter->netdev,
  422. "Intel(R) PRO/10GbE Network Connection\n");
  423. ixgb_check_options(adapter);
  424. /* reset the hardware with the new settings */
  425. ixgb_reset(adapter);
  426. cards_found++;
  427. return 0;
  428. err_register:
  429. err_sw_init:
  430. err_eeprom:
  431. iounmap(adapter->hw.hw_addr);
  432. err_ioremap:
  433. free_netdev(netdev);
  434. err_alloc_etherdev:
  435. pci_release_regions(pdev);
  436. err_request_regions:
  437. err_dma_mask:
  438. pci_disable_device(pdev);
  439. return err;
  440. }
  441. /**
  442. * ixgb_remove - Device Removal Routine
  443. * @pdev: PCI device information struct
  444. *
  445. * ixgb_remove is called by the PCI subsystem to alert the driver
  446. * that it should release a PCI device. The could be caused by a
  447. * Hot-Plug event, or because the driver is going to be removed from
  448. * memory.
  449. **/
  450. static void
  451. ixgb_remove(struct pci_dev *pdev)
  452. {
  453. struct net_device *netdev = pci_get_drvdata(pdev);
  454. struct ixgb_adapter *adapter = netdev_priv(netdev);
  455. cancel_work_sync(&adapter->tx_timeout_task);
  456. unregister_netdev(netdev);
  457. iounmap(adapter->hw.hw_addr);
  458. pci_release_regions(pdev);
  459. free_netdev(netdev);
  460. pci_disable_device(pdev);
  461. }
  462. /**
  463. * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
  464. * @adapter: board private structure to initialize
  465. *
  466. * ixgb_sw_init initializes the Adapter private data structure.
  467. * Fields are initialized based on PCI device information and
  468. * OS network device settings (MTU size).
  469. **/
  470. static int
  471. ixgb_sw_init(struct ixgb_adapter *adapter)
  472. {
  473. struct ixgb_hw *hw = &adapter->hw;
  474. struct net_device *netdev = adapter->netdev;
  475. struct pci_dev *pdev = adapter->pdev;
  476. /* PCI config space info */
  477. hw->vendor_id = pdev->vendor;
  478. hw->device_id = pdev->device;
  479. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  480. hw->subsystem_id = pdev->subsystem_device;
  481. hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  482. adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
  483. if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
  484. (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
  485. (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
  486. (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
  487. hw->mac_type = ixgb_82597;
  488. else {
  489. /* should never have loaded on this device */
  490. netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
  491. }
  492. /* enable flow control to be programmed */
  493. hw->fc.send_xon = 1;
  494. set_bit(__IXGB_DOWN, &adapter->flags);
  495. return 0;
  496. }
  497. /**
  498. * ixgb_open - Called when a network interface is made active
  499. * @netdev: network interface device structure
  500. *
  501. * Returns 0 on success, negative value on failure
  502. *
  503. * The open entry point is called when a network interface is made
  504. * active by the system (IFF_UP). At this point all resources needed
  505. * for transmit and receive operations are allocated, the interrupt
  506. * handler is registered with the OS, the watchdog timer is started,
  507. * and the stack is notified that the interface is ready.
  508. **/
  509. static int
  510. ixgb_open(struct net_device *netdev)
  511. {
  512. struct ixgb_adapter *adapter = netdev_priv(netdev);
  513. int err;
  514. /* allocate transmit descriptors */
  515. err = ixgb_setup_tx_resources(adapter);
  516. if (err)
  517. goto err_setup_tx;
  518. netif_carrier_off(netdev);
  519. /* allocate receive descriptors */
  520. err = ixgb_setup_rx_resources(adapter);
  521. if (err)
  522. goto err_setup_rx;
  523. err = ixgb_up(adapter);
  524. if (err)
  525. goto err_up;
  526. netif_start_queue(netdev);
  527. return 0;
  528. err_up:
  529. ixgb_free_rx_resources(adapter);
  530. err_setup_rx:
  531. ixgb_free_tx_resources(adapter);
  532. err_setup_tx:
  533. ixgb_reset(adapter);
  534. return err;
  535. }
  536. /**
  537. * ixgb_close - Disables a network interface
  538. * @netdev: network interface device structure
  539. *
  540. * Returns 0, this is not allowed to fail
  541. *
  542. * The close entry point is called when an interface is de-activated
  543. * by the OS. The hardware is still under the drivers control, but
  544. * needs to be disabled. A global MAC reset is issued to stop the
  545. * hardware, and all transmit and receive resources are freed.
  546. **/
  547. static int
  548. ixgb_close(struct net_device *netdev)
  549. {
  550. struct ixgb_adapter *adapter = netdev_priv(netdev);
  551. ixgb_down(adapter, true);
  552. ixgb_free_tx_resources(adapter);
  553. ixgb_free_rx_resources(adapter);
  554. return 0;
  555. }
  556. /**
  557. * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
  558. * @adapter: board private structure
  559. *
  560. * Return 0 on success, negative on failure
  561. **/
  562. int
  563. ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
  564. {
  565. struct ixgb_desc_ring *txdr = &adapter->tx_ring;
  566. struct pci_dev *pdev = adapter->pdev;
  567. int size;
  568. size = sizeof(struct ixgb_buffer) * txdr->count;
  569. txdr->buffer_info = vzalloc(size);
  570. if (!txdr->buffer_info)
  571. return -ENOMEM;
  572. /* round up to nearest 4K */
  573. txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
  574. txdr->size = ALIGN(txdr->size, 4096);
  575. txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
  576. GFP_KERNEL);
  577. if (!txdr->desc) {
  578. vfree(txdr->buffer_info);
  579. return -ENOMEM;
  580. }
  581. txdr->next_to_use = 0;
  582. txdr->next_to_clean = 0;
  583. return 0;
  584. }
  585. /**
  586. * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
  587. * @adapter: board private structure
  588. *
  589. * Configure the Tx unit of the MAC after a reset.
  590. **/
  591. static void
  592. ixgb_configure_tx(struct ixgb_adapter *adapter)
  593. {
  594. u64 tdba = adapter->tx_ring.dma;
  595. u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
  596. u32 tctl;
  597. struct ixgb_hw *hw = &adapter->hw;
  598. /* Setup the Base and Length of the Tx Descriptor Ring
  599. * tx_ring.dma can be either a 32 or 64 bit value
  600. */
  601. IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
  602. IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
  603. IXGB_WRITE_REG(hw, TDLEN, tdlen);
  604. /* Setup the HW Tx Head and Tail descriptor pointers */
  605. IXGB_WRITE_REG(hw, TDH, 0);
  606. IXGB_WRITE_REG(hw, TDT, 0);
  607. /* don't set up txdctl, it induces performance problems if configured
  608. * incorrectly */
  609. /* Set the Tx Interrupt Delay register */
  610. IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
  611. /* Program the Transmit Control Register */
  612. tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
  613. IXGB_WRITE_REG(hw, TCTL, tctl);
  614. /* Setup Transmit Descriptor Settings for this adapter */
  615. adapter->tx_cmd_type =
  616. IXGB_TX_DESC_TYPE |
  617. (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
  618. }
  619. /**
  620. * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
  621. * @adapter: board private structure
  622. *
  623. * Returns 0 on success, negative on failure
  624. **/
  625. int
  626. ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
  627. {
  628. struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
  629. struct pci_dev *pdev = adapter->pdev;
  630. int size;
  631. size = sizeof(struct ixgb_buffer) * rxdr->count;
  632. rxdr->buffer_info = vzalloc(size);
  633. if (!rxdr->buffer_info)
  634. return -ENOMEM;
  635. /* Round up to nearest 4K */
  636. rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
  637. rxdr->size = ALIGN(rxdr->size, 4096);
  638. rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
  639. GFP_KERNEL);
  640. if (!rxdr->desc) {
  641. vfree(rxdr->buffer_info);
  642. return -ENOMEM;
  643. }
  644. memset(rxdr->desc, 0, rxdr->size);
  645. rxdr->next_to_clean = 0;
  646. rxdr->next_to_use = 0;
  647. return 0;
  648. }
  649. /**
  650. * ixgb_setup_rctl - configure the receive control register
  651. * @adapter: Board private structure
  652. **/
  653. static void
  654. ixgb_setup_rctl(struct ixgb_adapter *adapter)
  655. {
  656. u32 rctl;
  657. rctl = IXGB_READ_REG(&adapter->hw, RCTL);
  658. rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
  659. rctl |=
  660. IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
  661. IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
  662. (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
  663. rctl |= IXGB_RCTL_SECRC;
  664. if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
  665. rctl |= IXGB_RCTL_BSIZE_2048;
  666. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
  667. rctl |= IXGB_RCTL_BSIZE_4096;
  668. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
  669. rctl |= IXGB_RCTL_BSIZE_8192;
  670. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
  671. rctl |= IXGB_RCTL_BSIZE_16384;
  672. IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
  673. }
  674. /**
  675. * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
  676. * @adapter: board private structure
  677. *
  678. * Configure the Rx unit of the MAC after a reset.
  679. **/
  680. static void
  681. ixgb_configure_rx(struct ixgb_adapter *adapter)
  682. {
  683. u64 rdba = adapter->rx_ring.dma;
  684. u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
  685. struct ixgb_hw *hw = &adapter->hw;
  686. u32 rctl;
  687. u32 rxcsum;
  688. /* make sure receives are disabled while setting up the descriptors */
  689. rctl = IXGB_READ_REG(hw, RCTL);
  690. IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
  691. /* set the Receive Delay Timer Register */
  692. IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
  693. /* Setup the Base and Length of the Rx Descriptor Ring */
  694. IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
  695. IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
  696. IXGB_WRITE_REG(hw, RDLEN, rdlen);
  697. /* Setup the HW Rx Head and Tail Descriptor Pointers */
  698. IXGB_WRITE_REG(hw, RDH, 0);
  699. IXGB_WRITE_REG(hw, RDT, 0);
  700. /* due to the hardware errata with RXDCTL, we are unable to use any of
  701. * the performance enhancing features of it without causing other
  702. * subtle bugs, some of the bugs could include receive length
  703. * corruption at high data rates (WTHRESH > 0) and/or receive
  704. * descriptor ring irregularites (particularly in hardware cache) */
  705. IXGB_WRITE_REG(hw, RXDCTL, 0);
  706. /* Enable Receive Checksum Offload for TCP and UDP */
  707. if (adapter->rx_csum) {
  708. rxcsum = IXGB_READ_REG(hw, RXCSUM);
  709. rxcsum |= IXGB_RXCSUM_TUOFL;
  710. IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
  711. }
  712. /* Enable Receives */
  713. IXGB_WRITE_REG(hw, RCTL, rctl);
  714. }
  715. /**
  716. * ixgb_free_tx_resources - Free Tx Resources
  717. * @adapter: board private structure
  718. *
  719. * Free all transmit software resources
  720. **/
  721. void
  722. ixgb_free_tx_resources(struct ixgb_adapter *adapter)
  723. {
  724. struct pci_dev *pdev = adapter->pdev;
  725. ixgb_clean_tx_ring(adapter);
  726. vfree(adapter->tx_ring.buffer_info);
  727. adapter->tx_ring.buffer_info = NULL;
  728. dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
  729. adapter->tx_ring.desc, adapter->tx_ring.dma);
  730. adapter->tx_ring.desc = NULL;
  731. }
  732. static void
  733. ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
  734. struct ixgb_buffer *buffer_info)
  735. {
  736. if (buffer_info->dma) {
  737. if (buffer_info->mapped_as_page)
  738. dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
  739. buffer_info->length, DMA_TO_DEVICE);
  740. else
  741. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  742. buffer_info->length, DMA_TO_DEVICE);
  743. buffer_info->dma = 0;
  744. }
  745. if (buffer_info->skb) {
  746. dev_kfree_skb_any(buffer_info->skb);
  747. buffer_info->skb = NULL;
  748. }
  749. buffer_info->time_stamp = 0;
  750. /* these fields must always be initialized in tx
  751. * buffer_info->length = 0;
  752. * buffer_info->next_to_watch = 0; */
  753. }
  754. /**
  755. * ixgb_clean_tx_ring - Free Tx Buffers
  756. * @adapter: board private structure
  757. **/
  758. static void
  759. ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
  760. {
  761. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  762. struct ixgb_buffer *buffer_info;
  763. unsigned long size;
  764. unsigned int i;
  765. /* Free all the Tx ring sk_buffs */
  766. for (i = 0; i < tx_ring->count; i++) {
  767. buffer_info = &tx_ring->buffer_info[i];
  768. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  769. }
  770. size = sizeof(struct ixgb_buffer) * tx_ring->count;
  771. memset(tx_ring->buffer_info, 0, size);
  772. /* Zero out the descriptor ring */
  773. memset(tx_ring->desc, 0, tx_ring->size);
  774. tx_ring->next_to_use = 0;
  775. tx_ring->next_to_clean = 0;
  776. IXGB_WRITE_REG(&adapter->hw, TDH, 0);
  777. IXGB_WRITE_REG(&adapter->hw, TDT, 0);
  778. }
  779. /**
  780. * ixgb_free_rx_resources - Free Rx Resources
  781. * @adapter: board private structure
  782. *
  783. * Free all receive software resources
  784. **/
  785. void
  786. ixgb_free_rx_resources(struct ixgb_adapter *adapter)
  787. {
  788. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  789. struct pci_dev *pdev = adapter->pdev;
  790. ixgb_clean_rx_ring(adapter);
  791. vfree(rx_ring->buffer_info);
  792. rx_ring->buffer_info = NULL;
  793. dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
  794. rx_ring->dma);
  795. rx_ring->desc = NULL;
  796. }
  797. /**
  798. * ixgb_clean_rx_ring - Free Rx Buffers
  799. * @adapter: board private structure
  800. **/
  801. static void
  802. ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
  803. {
  804. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  805. struct ixgb_buffer *buffer_info;
  806. struct pci_dev *pdev = adapter->pdev;
  807. unsigned long size;
  808. unsigned int i;
  809. /* Free all the Rx ring sk_buffs */
  810. for (i = 0; i < rx_ring->count; i++) {
  811. buffer_info = &rx_ring->buffer_info[i];
  812. if (buffer_info->dma) {
  813. dma_unmap_single(&pdev->dev,
  814. buffer_info->dma,
  815. buffer_info->length,
  816. DMA_FROM_DEVICE);
  817. buffer_info->dma = 0;
  818. buffer_info->length = 0;
  819. }
  820. if (buffer_info->skb) {
  821. dev_kfree_skb(buffer_info->skb);
  822. buffer_info->skb = NULL;
  823. }
  824. }
  825. size = sizeof(struct ixgb_buffer) * rx_ring->count;
  826. memset(rx_ring->buffer_info, 0, size);
  827. /* Zero out the descriptor ring */
  828. memset(rx_ring->desc, 0, rx_ring->size);
  829. rx_ring->next_to_clean = 0;
  830. rx_ring->next_to_use = 0;
  831. IXGB_WRITE_REG(&adapter->hw, RDH, 0);
  832. IXGB_WRITE_REG(&adapter->hw, RDT, 0);
  833. }
  834. /**
  835. * ixgb_set_mac - Change the Ethernet Address of the NIC
  836. * @netdev: network interface device structure
  837. * @p: pointer to an address structure
  838. *
  839. * Returns 0 on success, negative on failure
  840. **/
  841. static int
  842. ixgb_set_mac(struct net_device *netdev, void *p)
  843. {
  844. struct ixgb_adapter *adapter = netdev_priv(netdev);
  845. struct sockaddr *addr = p;
  846. if (!is_valid_ether_addr(addr->sa_data))
  847. return -EADDRNOTAVAIL;
  848. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  849. ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
  850. return 0;
  851. }
  852. /**
  853. * ixgb_set_multi - Multicast and Promiscuous mode set
  854. * @netdev: network interface device structure
  855. *
  856. * The set_multi entry point is called whenever the multicast address
  857. * list or the network interface flags are updated. This routine is
  858. * responsible for configuring the hardware for proper multicast,
  859. * promiscuous mode, and all-multi behavior.
  860. **/
  861. static void
  862. ixgb_set_multi(struct net_device *netdev)
  863. {
  864. struct ixgb_adapter *adapter = netdev_priv(netdev);
  865. struct ixgb_hw *hw = &adapter->hw;
  866. struct netdev_hw_addr *ha;
  867. u32 rctl;
  868. /* Check for Promiscuous and All Multicast modes */
  869. rctl = IXGB_READ_REG(hw, RCTL);
  870. if (netdev->flags & IFF_PROMISC) {
  871. rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
  872. /* disable VLAN filtering */
  873. rctl &= ~IXGB_RCTL_CFIEN;
  874. rctl &= ~IXGB_RCTL_VFE;
  875. } else {
  876. if (netdev->flags & IFF_ALLMULTI) {
  877. rctl |= IXGB_RCTL_MPE;
  878. rctl &= ~IXGB_RCTL_UPE;
  879. } else {
  880. rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
  881. }
  882. /* enable VLAN filtering */
  883. rctl |= IXGB_RCTL_VFE;
  884. rctl &= ~IXGB_RCTL_CFIEN;
  885. }
  886. if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
  887. rctl |= IXGB_RCTL_MPE;
  888. IXGB_WRITE_REG(hw, RCTL, rctl);
  889. } else {
  890. u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES *
  891. ETH_ALEN, GFP_ATOMIC);
  892. u8 *addr;
  893. if (!mta)
  894. goto alloc_failed;
  895. IXGB_WRITE_REG(hw, RCTL, rctl);
  896. addr = mta;
  897. netdev_for_each_mc_addr(ha, netdev) {
  898. memcpy(addr, ha->addr, ETH_ALEN);
  899. addr += ETH_ALEN;
  900. }
  901. ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
  902. kfree(mta);
  903. }
  904. alloc_failed:
  905. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  906. ixgb_vlan_strip_enable(adapter);
  907. else
  908. ixgb_vlan_strip_disable(adapter);
  909. }
  910. /**
  911. * ixgb_watchdog - Timer Call-back
  912. * @data: pointer to netdev cast into an unsigned long
  913. **/
  914. static void
  915. ixgb_watchdog(unsigned long data)
  916. {
  917. struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
  918. struct net_device *netdev = adapter->netdev;
  919. struct ixgb_desc_ring *txdr = &adapter->tx_ring;
  920. ixgb_check_for_link(&adapter->hw);
  921. if (ixgb_check_for_bad_link(&adapter->hw)) {
  922. /* force the reset path */
  923. netif_stop_queue(netdev);
  924. }
  925. if (adapter->hw.link_up) {
  926. if (!netif_carrier_ok(netdev)) {
  927. netdev_info(netdev,
  928. "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
  929. (adapter->hw.fc.type == ixgb_fc_full) ?
  930. "RX/TX" :
  931. (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
  932. "RX" :
  933. (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
  934. "TX" : "None");
  935. adapter->link_speed = 10000;
  936. adapter->link_duplex = FULL_DUPLEX;
  937. netif_carrier_on(netdev);
  938. }
  939. } else {
  940. if (netif_carrier_ok(netdev)) {
  941. adapter->link_speed = 0;
  942. adapter->link_duplex = 0;
  943. netdev_info(netdev, "NIC Link is Down\n");
  944. netif_carrier_off(netdev);
  945. }
  946. }
  947. ixgb_update_stats(adapter);
  948. if (!netif_carrier_ok(netdev)) {
  949. if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
  950. /* We've lost link, so the controller stops DMA,
  951. * but we've got queued Tx work that's never going
  952. * to get done, so reset controller to flush Tx.
  953. * (Do the reset outside of interrupt context). */
  954. schedule_work(&adapter->tx_timeout_task);
  955. /* return immediately since reset is imminent */
  956. return;
  957. }
  958. }
  959. /* Force detection of hung controller every watchdog period */
  960. adapter->detect_tx_hung = true;
  961. /* generate an interrupt to force clean up of any stragglers */
  962. IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
  963. /* Reset the timer */
  964. mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
  965. }
  966. #define IXGB_TX_FLAGS_CSUM 0x00000001
  967. #define IXGB_TX_FLAGS_VLAN 0x00000002
  968. #define IXGB_TX_FLAGS_TSO 0x00000004
  969. static int
  970. ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
  971. {
  972. struct ixgb_context_desc *context_desc;
  973. unsigned int i;
  974. u8 ipcss, ipcso, tucss, tucso, hdr_len;
  975. u16 ipcse, tucse, mss;
  976. if (likely(skb_is_gso(skb))) {
  977. struct ixgb_buffer *buffer_info;
  978. struct iphdr *iph;
  979. int err;
  980. err = skb_cow_head(skb, 0);
  981. if (err < 0)
  982. return err;
  983. hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  984. mss = skb_shinfo(skb)->gso_size;
  985. iph = ip_hdr(skb);
  986. iph->tot_len = 0;
  987. iph->check = 0;
  988. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  989. iph->daddr, 0,
  990. IPPROTO_TCP, 0);
  991. ipcss = skb_network_offset(skb);
  992. ipcso = (void *)&(iph->check) - (void *)skb->data;
  993. ipcse = skb_transport_offset(skb) - 1;
  994. tucss = skb_transport_offset(skb);
  995. tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
  996. tucse = 0;
  997. i = adapter->tx_ring.next_to_use;
  998. context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
  999. buffer_info = &adapter->tx_ring.buffer_info[i];
  1000. WARN_ON(buffer_info->dma != 0);
  1001. context_desc->ipcss = ipcss;
  1002. context_desc->ipcso = ipcso;
  1003. context_desc->ipcse = cpu_to_le16(ipcse);
  1004. context_desc->tucss = tucss;
  1005. context_desc->tucso = tucso;
  1006. context_desc->tucse = cpu_to_le16(tucse);
  1007. context_desc->mss = cpu_to_le16(mss);
  1008. context_desc->hdr_len = hdr_len;
  1009. context_desc->status = 0;
  1010. context_desc->cmd_type_len = cpu_to_le32(
  1011. IXGB_CONTEXT_DESC_TYPE
  1012. | IXGB_CONTEXT_DESC_CMD_TSE
  1013. | IXGB_CONTEXT_DESC_CMD_IP
  1014. | IXGB_CONTEXT_DESC_CMD_TCP
  1015. | IXGB_CONTEXT_DESC_CMD_IDE
  1016. | (skb->len - (hdr_len)));
  1017. if (++i == adapter->tx_ring.count) i = 0;
  1018. adapter->tx_ring.next_to_use = i;
  1019. return 1;
  1020. }
  1021. return 0;
  1022. }
  1023. static bool
  1024. ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
  1025. {
  1026. struct ixgb_context_desc *context_desc;
  1027. unsigned int i;
  1028. u8 css, cso;
  1029. if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  1030. struct ixgb_buffer *buffer_info;
  1031. css = skb_checksum_start_offset(skb);
  1032. cso = css + skb->csum_offset;
  1033. i = adapter->tx_ring.next_to_use;
  1034. context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
  1035. buffer_info = &adapter->tx_ring.buffer_info[i];
  1036. WARN_ON(buffer_info->dma != 0);
  1037. context_desc->tucss = css;
  1038. context_desc->tucso = cso;
  1039. context_desc->tucse = 0;
  1040. /* zero out any previously existing data in one instruction */
  1041. *(u32 *)&(context_desc->ipcss) = 0;
  1042. context_desc->status = 0;
  1043. context_desc->hdr_len = 0;
  1044. context_desc->mss = 0;
  1045. context_desc->cmd_type_len =
  1046. cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
  1047. | IXGB_TX_DESC_CMD_IDE);
  1048. if (++i == adapter->tx_ring.count) i = 0;
  1049. adapter->tx_ring.next_to_use = i;
  1050. return true;
  1051. }
  1052. return false;
  1053. }
  1054. #define IXGB_MAX_TXD_PWR 14
  1055. #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
  1056. static int
  1057. ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
  1058. unsigned int first)
  1059. {
  1060. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1061. struct pci_dev *pdev = adapter->pdev;
  1062. struct ixgb_buffer *buffer_info;
  1063. int len = skb_headlen(skb);
  1064. unsigned int offset = 0, size, count = 0, i;
  1065. unsigned int mss = skb_shinfo(skb)->gso_size;
  1066. unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
  1067. unsigned int f;
  1068. i = tx_ring->next_to_use;
  1069. while (len) {
  1070. buffer_info = &tx_ring->buffer_info[i];
  1071. size = min(len, IXGB_MAX_DATA_PER_TXD);
  1072. /* Workaround for premature desc write-backs
  1073. * in TSO mode. Append 4-byte sentinel desc */
  1074. if (unlikely(mss && !nr_frags && size == len && size > 8))
  1075. size -= 4;
  1076. buffer_info->length = size;
  1077. WARN_ON(buffer_info->dma != 0);
  1078. buffer_info->time_stamp = jiffies;
  1079. buffer_info->mapped_as_page = false;
  1080. buffer_info->dma = dma_map_single(&pdev->dev,
  1081. skb->data + offset,
  1082. size, DMA_TO_DEVICE);
  1083. if (dma_mapping_error(&pdev->dev, buffer_info->dma))
  1084. goto dma_error;
  1085. buffer_info->next_to_watch = 0;
  1086. len -= size;
  1087. offset += size;
  1088. count++;
  1089. if (len) {
  1090. i++;
  1091. if (i == tx_ring->count)
  1092. i = 0;
  1093. }
  1094. }
  1095. for (f = 0; f < nr_frags; f++) {
  1096. const struct skb_frag_struct *frag;
  1097. frag = &skb_shinfo(skb)->frags[f];
  1098. len = skb_frag_size(frag);
  1099. offset = 0;
  1100. while (len) {
  1101. i++;
  1102. if (i == tx_ring->count)
  1103. i = 0;
  1104. buffer_info = &tx_ring->buffer_info[i];
  1105. size = min(len, IXGB_MAX_DATA_PER_TXD);
  1106. /* Workaround for premature desc write-backs
  1107. * in TSO mode. Append 4-byte sentinel desc */
  1108. if (unlikely(mss && (f == (nr_frags - 1))
  1109. && size == len && size > 8))
  1110. size -= 4;
  1111. buffer_info->length = size;
  1112. buffer_info->time_stamp = jiffies;
  1113. buffer_info->mapped_as_page = true;
  1114. buffer_info->dma =
  1115. skb_frag_dma_map(&pdev->dev, frag, offset, size,
  1116. DMA_TO_DEVICE);
  1117. if (dma_mapping_error(&pdev->dev, buffer_info->dma))
  1118. goto dma_error;
  1119. buffer_info->next_to_watch = 0;
  1120. len -= size;
  1121. offset += size;
  1122. count++;
  1123. }
  1124. }
  1125. tx_ring->buffer_info[i].skb = skb;
  1126. tx_ring->buffer_info[first].next_to_watch = i;
  1127. return count;
  1128. dma_error:
  1129. dev_err(&pdev->dev, "TX DMA map failed\n");
  1130. buffer_info->dma = 0;
  1131. if (count)
  1132. count--;
  1133. while (count--) {
  1134. if (i==0)
  1135. i += tx_ring->count;
  1136. i--;
  1137. buffer_info = &tx_ring->buffer_info[i];
  1138. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  1139. }
  1140. return 0;
  1141. }
  1142. static void
  1143. ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
  1144. {
  1145. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1146. struct ixgb_tx_desc *tx_desc = NULL;
  1147. struct ixgb_buffer *buffer_info;
  1148. u32 cmd_type_len = adapter->tx_cmd_type;
  1149. u8 status = 0;
  1150. u8 popts = 0;
  1151. unsigned int i;
  1152. if (tx_flags & IXGB_TX_FLAGS_TSO) {
  1153. cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
  1154. popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
  1155. }
  1156. if (tx_flags & IXGB_TX_FLAGS_CSUM)
  1157. popts |= IXGB_TX_DESC_POPTS_TXSM;
  1158. if (tx_flags & IXGB_TX_FLAGS_VLAN)
  1159. cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
  1160. i = tx_ring->next_to_use;
  1161. while (count--) {
  1162. buffer_info = &tx_ring->buffer_info[i];
  1163. tx_desc = IXGB_TX_DESC(*tx_ring, i);
  1164. tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
  1165. tx_desc->cmd_type_len =
  1166. cpu_to_le32(cmd_type_len | buffer_info->length);
  1167. tx_desc->status = status;
  1168. tx_desc->popts = popts;
  1169. tx_desc->vlan = cpu_to_le16(vlan_id);
  1170. if (++i == tx_ring->count) i = 0;
  1171. }
  1172. tx_desc->cmd_type_len |=
  1173. cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
  1174. /* Force memory writes to complete before letting h/w
  1175. * know there are new descriptors to fetch. (Only
  1176. * applicable for weak-ordered memory model archs,
  1177. * such as IA-64). */
  1178. wmb();
  1179. tx_ring->next_to_use = i;
  1180. IXGB_WRITE_REG(&adapter->hw, TDT, i);
  1181. }
  1182. static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
  1183. {
  1184. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1185. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1186. netif_stop_queue(netdev);
  1187. /* Herbert's original patch had:
  1188. * smp_mb__after_netif_stop_queue();
  1189. * but since that doesn't exist yet, just open code it. */
  1190. smp_mb();
  1191. /* We need to check again in a case another CPU has just
  1192. * made room available. */
  1193. if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
  1194. return -EBUSY;
  1195. /* A reprieve! */
  1196. netif_start_queue(netdev);
  1197. ++adapter->restart_queue;
  1198. return 0;
  1199. }
  1200. static int ixgb_maybe_stop_tx(struct net_device *netdev,
  1201. struct ixgb_desc_ring *tx_ring, int size)
  1202. {
  1203. if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
  1204. return 0;
  1205. return __ixgb_maybe_stop_tx(netdev, size);
  1206. }
  1207. /* Tx Descriptors needed, worst case */
  1208. #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
  1209. (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
  1210. #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
  1211. MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
  1212. + 1 /* one more needed for sentinel TSO workaround */
  1213. static netdev_tx_t
  1214. ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  1215. {
  1216. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1217. unsigned int first;
  1218. unsigned int tx_flags = 0;
  1219. int vlan_id = 0;
  1220. int count = 0;
  1221. int tso;
  1222. if (test_bit(__IXGB_DOWN, &adapter->flags)) {
  1223. dev_kfree_skb_any(skb);
  1224. return NETDEV_TX_OK;
  1225. }
  1226. if (skb->len <= 0) {
  1227. dev_kfree_skb_any(skb);
  1228. return NETDEV_TX_OK;
  1229. }
  1230. if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
  1231. DESC_NEEDED)))
  1232. return NETDEV_TX_BUSY;
  1233. if (skb_vlan_tag_present(skb)) {
  1234. tx_flags |= IXGB_TX_FLAGS_VLAN;
  1235. vlan_id = skb_vlan_tag_get(skb);
  1236. }
  1237. first = adapter->tx_ring.next_to_use;
  1238. tso = ixgb_tso(adapter, skb);
  1239. if (tso < 0) {
  1240. dev_kfree_skb_any(skb);
  1241. return NETDEV_TX_OK;
  1242. }
  1243. if (likely(tso))
  1244. tx_flags |= IXGB_TX_FLAGS_TSO;
  1245. else if (ixgb_tx_csum(adapter, skb))
  1246. tx_flags |= IXGB_TX_FLAGS_CSUM;
  1247. count = ixgb_tx_map(adapter, skb, first);
  1248. if (count) {
  1249. ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
  1250. /* Make sure there is space in the ring for the next send. */
  1251. ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
  1252. } else {
  1253. dev_kfree_skb_any(skb);
  1254. adapter->tx_ring.buffer_info[first].time_stamp = 0;
  1255. adapter->tx_ring.next_to_use = first;
  1256. }
  1257. return NETDEV_TX_OK;
  1258. }
  1259. /**
  1260. * ixgb_tx_timeout - Respond to a Tx Hang
  1261. * @netdev: network interface device structure
  1262. **/
  1263. static void
  1264. ixgb_tx_timeout(struct net_device *netdev)
  1265. {
  1266. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1267. /* Do the reset outside of interrupt context */
  1268. schedule_work(&adapter->tx_timeout_task);
  1269. }
  1270. static void
  1271. ixgb_tx_timeout_task(struct work_struct *work)
  1272. {
  1273. struct ixgb_adapter *adapter =
  1274. container_of(work, struct ixgb_adapter, tx_timeout_task);
  1275. adapter->tx_timeout_count++;
  1276. ixgb_down(adapter, true);
  1277. ixgb_up(adapter);
  1278. }
  1279. /**
  1280. * ixgb_get_stats - Get System Network Statistics
  1281. * @netdev: network interface device structure
  1282. *
  1283. * Returns the address of the device statistics structure.
  1284. * The statistics are actually updated from the timer callback.
  1285. **/
  1286. static struct net_device_stats *
  1287. ixgb_get_stats(struct net_device *netdev)
  1288. {
  1289. return &netdev->stats;
  1290. }
  1291. /**
  1292. * ixgb_change_mtu - Change the Maximum Transfer Unit
  1293. * @netdev: network interface device structure
  1294. * @new_mtu: new value for maximum frame size
  1295. *
  1296. * Returns 0 on success, negative on failure
  1297. **/
  1298. static int
  1299. ixgb_change_mtu(struct net_device *netdev, int new_mtu)
  1300. {
  1301. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1302. int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  1303. int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  1304. /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
  1305. if ((new_mtu < 68) ||
  1306. (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
  1307. netif_err(adapter, probe, adapter->netdev,
  1308. "Invalid MTU setting %d\n", new_mtu);
  1309. return -EINVAL;
  1310. }
  1311. if (old_max_frame == max_frame)
  1312. return 0;
  1313. if (netif_running(netdev))
  1314. ixgb_down(adapter, true);
  1315. adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
  1316. netdev->mtu = new_mtu;
  1317. if (netif_running(netdev))
  1318. ixgb_up(adapter);
  1319. return 0;
  1320. }
  1321. /**
  1322. * ixgb_update_stats - Update the board statistics counters.
  1323. * @adapter: board private structure
  1324. **/
  1325. void
  1326. ixgb_update_stats(struct ixgb_adapter *adapter)
  1327. {
  1328. struct net_device *netdev = adapter->netdev;
  1329. struct pci_dev *pdev = adapter->pdev;
  1330. /* Prevent stats update while adapter is being reset */
  1331. if (pci_channel_offline(pdev))
  1332. return;
  1333. if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
  1334. (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
  1335. u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
  1336. u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
  1337. u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
  1338. u64 bcast = ((u64)bcast_h << 32) | bcast_l;
  1339. multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
  1340. /* fix up multicast stats by removing broadcasts */
  1341. if (multi >= bcast)
  1342. multi -= bcast;
  1343. adapter->stats.mprcl += (multi & 0xFFFFFFFF);
  1344. adapter->stats.mprch += (multi >> 32);
  1345. adapter->stats.bprcl += bcast_l;
  1346. adapter->stats.bprch += bcast_h;
  1347. } else {
  1348. adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
  1349. adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
  1350. adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
  1351. adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
  1352. }
  1353. adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
  1354. adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
  1355. adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
  1356. adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
  1357. adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
  1358. adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
  1359. adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
  1360. adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
  1361. adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
  1362. adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
  1363. adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
  1364. adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
  1365. adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
  1366. adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
  1367. adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
  1368. adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
  1369. adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
  1370. adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
  1371. adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
  1372. adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
  1373. adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
  1374. adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
  1375. adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
  1376. adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
  1377. adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
  1378. adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
  1379. adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
  1380. adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
  1381. adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
  1382. adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
  1383. adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
  1384. adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
  1385. adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
  1386. adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
  1387. adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
  1388. adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
  1389. adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
  1390. adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
  1391. adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
  1392. adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
  1393. adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
  1394. adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
  1395. adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
  1396. adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
  1397. adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
  1398. adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
  1399. adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
  1400. adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
  1401. adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
  1402. adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
  1403. adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
  1404. adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
  1405. adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
  1406. adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
  1407. adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
  1408. adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
  1409. /* Fill out the OS statistics structure */
  1410. netdev->stats.rx_packets = adapter->stats.gprcl;
  1411. netdev->stats.tx_packets = adapter->stats.gptcl;
  1412. netdev->stats.rx_bytes = adapter->stats.gorcl;
  1413. netdev->stats.tx_bytes = adapter->stats.gotcl;
  1414. netdev->stats.multicast = adapter->stats.mprcl;
  1415. netdev->stats.collisions = 0;
  1416. /* ignore RLEC as it reports errors for padded (<64bytes) frames
  1417. * with a length in the type/len field */
  1418. netdev->stats.rx_errors =
  1419. /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
  1420. adapter->stats.ruc +
  1421. adapter->stats.roc /*+ adapter->stats.rlec */ +
  1422. adapter->stats.icbc +
  1423. adapter->stats.ecbc + adapter->stats.mpc;
  1424. /* see above
  1425. * netdev->stats.rx_length_errors = adapter->stats.rlec;
  1426. */
  1427. netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
  1428. netdev->stats.rx_fifo_errors = adapter->stats.mpc;
  1429. netdev->stats.rx_missed_errors = adapter->stats.mpc;
  1430. netdev->stats.rx_over_errors = adapter->stats.mpc;
  1431. netdev->stats.tx_errors = 0;
  1432. netdev->stats.rx_frame_errors = 0;
  1433. netdev->stats.tx_aborted_errors = 0;
  1434. netdev->stats.tx_carrier_errors = 0;
  1435. netdev->stats.tx_fifo_errors = 0;
  1436. netdev->stats.tx_heartbeat_errors = 0;
  1437. netdev->stats.tx_window_errors = 0;
  1438. }
  1439. #define IXGB_MAX_INTR 10
  1440. /**
  1441. * ixgb_intr - Interrupt Handler
  1442. * @irq: interrupt number
  1443. * @data: pointer to a network interface device structure
  1444. **/
  1445. static irqreturn_t
  1446. ixgb_intr(int irq, void *data)
  1447. {
  1448. struct net_device *netdev = data;
  1449. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1450. struct ixgb_hw *hw = &adapter->hw;
  1451. u32 icr = IXGB_READ_REG(hw, ICR);
  1452. if (unlikely(!icr))
  1453. return IRQ_NONE; /* Not our interrupt */
  1454. if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
  1455. if (!test_bit(__IXGB_DOWN, &adapter->flags))
  1456. mod_timer(&adapter->watchdog_timer, jiffies);
  1457. if (napi_schedule_prep(&adapter->napi)) {
  1458. /* Disable interrupts and register for poll. The flush
  1459. of the posted write is intentionally left out.
  1460. */
  1461. IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
  1462. __napi_schedule(&adapter->napi);
  1463. }
  1464. return IRQ_HANDLED;
  1465. }
  1466. /**
  1467. * ixgb_clean - NAPI Rx polling callback
  1468. * @adapter: board private structure
  1469. **/
  1470. static int
  1471. ixgb_clean(struct napi_struct *napi, int budget)
  1472. {
  1473. struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
  1474. int work_done = 0;
  1475. ixgb_clean_tx_irq(adapter);
  1476. ixgb_clean_rx_irq(adapter, &work_done, budget);
  1477. /* If budget not fully consumed, exit the polling mode */
  1478. if (work_done < budget) {
  1479. napi_complete(napi);
  1480. if (!test_bit(__IXGB_DOWN, &adapter->flags))
  1481. ixgb_irq_enable(adapter);
  1482. }
  1483. return work_done;
  1484. }
  1485. /**
  1486. * ixgb_clean_tx_irq - Reclaim resources after transmit completes
  1487. * @adapter: board private structure
  1488. **/
  1489. static bool
  1490. ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
  1491. {
  1492. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1493. struct net_device *netdev = adapter->netdev;
  1494. struct ixgb_tx_desc *tx_desc, *eop_desc;
  1495. struct ixgb_buffer *buffer_info;
  1496. unsigned int i, eop;
  1497. bool cleaned = false;
  1498. i = tx_ring->next_to_clean;
  1499. eop = tx_ring->buffer_info[i].next_to_watch;
  1500. eop_desc = IXGB_TX_DESC(*tx_ring, eop);
  1501. while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
  1502. rmb(); /* read buffer_info after eop_desc */
  1503. for (cleaned = false; !cleaned; ) {
  1504. tx_desc = IXGB_TX_DESC(*tx_ring, i);
  1505. buffer_info = &tx_ring->buffer_info[i];
  1506. if (tx_desc->popts &
  1507. (IXGB_TX_DESC_POPTS_TXSM |
  1508. IXGB_TX_DESC_POPTS_IXSM))
  1509. adapter->hw_csum_tx_good++;
  1510. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  1511. *(u32 *)&(tx_desc->status) = 0;
  1512. cleaned = (i == eop);
  1513. if (++i == tx_ring->count) i = 0;
  1514. }
  1515. eop = tx_ring->buffer_info[i].next_to_watch;
  1516. eop_desc = IXGB_TX_DESC(*tx_ring, eop);
  1517. }
  1518. tx_ring->next_to_clean = i;
  1519. if (unlikely(cleaned && netif_carrier_ok(netdev) &&
  1520. IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
  1521. /* Make sure that anybody stopping the queue after this
  1522. * sees the new next_to_clean. */
  1523. smp_mb();
  1524. if (netif_queue_stopped(netdev) &&
  1525. !(test_bit(__IXGB_DOWN, &adapter->flags))) {
  1526. netif_wake_queue(netdev);
  1527. ++adapter->restart_queue;
  1528. }
  1529. }
  1530. if (adapter->detect_tx_hung) {
  1531. /* detect a transmit hang in hardware, this serializes the
  1532. * check with the clearing of time_stamp and movement of i */
  1533. adapter->detect_tx_hung = false;
  1534. if (tx_ring->buffer_info[eop].time_stamp &&
  1535. time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
  1536. && !(IXGB_READ_REG(&adapter->hw, STATUS) &
  1537. IXGB_STATUS_TXOFF)) {
  1538. /* detected Tx unit hang */
  1539. netif_err(adapter, drv, adapter->netdev,
  1540. "Detected Tx Unit Hang\n"
  1541. " TDH <%x>\n"
  1542. " TDT <%x>\n"
  1543. " next_to_use <%x>\n"
  1544. " next_to_clean <%x>\n"
  1545. "buffer_info[next_to_clean]\n"
  1546. " time_stamp <%lx>\n"
  1547. " next_to_watch <%x>\n"
  1548. " jiffies <%lx>\n"
  1549. " next_to_watch.status <%x>\n",
  1550. IXGB_READ_REG(&adapter->hw, TDH),
  1551. IXGB_READ_REG(&adapter->hw, TDT),
  1552. tx_ring->next_to_use,
  1553. tx_ring->next_to_clean,
  1554. tx_ring->buffer_info[eop].time_stamp,
  1555. eop,
  1556. jiffies,
  1557. eop_desc->status);
  1558. netif_stop_queue(netdev);
  1559. }
  1560. }
  1561. return cleaned;
  1562. }
  1563. /**
  1564. * ixgb_rx_checksum - Receive Checksum Offload for 82597.
  1565. * @adapter: board private structure
  1566. * @rx_desc: receive descriptor
  1567. * @sk_buff: socket buffer with received data
  1568. **/
  1569. static void
  1570. ixgb_rx_checksum(struct ixgb_adapter *adapter,
  1571. struct ixgb_rx_desc *rx_desc,
  1572. struct sk_buff *skb)
  1573. {
  1574. /* Ignore Checksum bit is set OR
  1575. * TCP Checksum has not been calculated
  1576. */
  1577. if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
  1578. (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
  1579. skb_checksum_none_assert(skb);
  1580. return;
  1581. }
  1582. /* At this point we know the hardware did the TCP checksum */
  1583. /* now look at the TCP checksum error bit */
  1584. if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
  1585. /* let the stack verify checksum errors */
  1586. skb_checksum_none_assert(skb);
  1587. adapter->hw_csum_rx_error++;
  1588. } else {
  1589. /* TCP checksum is good */
  1590. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1591. adapter->hw_csum_rx_good++;
  1592. }
  1593. }
  1594. /*
  1595. * this should improve performance for small packets with large amounts
  1596. * of reassembly being done in the stack
  1597. */
  1598. static void ixgb_check_copybreak(struct napi_struct *napi,
  1599. struct ixgb_buffer *buffer_info,
  1600. u32 length, struct sk_buff **skb)
  1601. {
  1602. struct sk_buff *new_skb;
  1603. if (length > copybreak)
  1604. return;
  1605. new_skb = napi_alloc_skb(napi, length);
  1606. if (!new_skb)
  1607. return;
  1608. skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
  1609. (*skb)->data - NET_IP_ALIGN,
  1610. length + NET_IP_ALIGN);
  1611. /* save the skb in buffer_info as good */
  1612. buffer_info->skb = *skb;
  1613. *skb = new_skb;
  1614. }
  1615. /**
  1616. * ixgb_clean_rx_irq - Send received data up the network stack,
  1617. * @adapter: board private structure
  1618. **/
  1619. static bool
  1620. ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
  1621. {
  1622. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  1623. struct net_device *netdev = adapter->netdev;
  1624. struct pci_dev *pdev = adapter->pdev;
  1625. struct ixgb_rx_desc *rx_desc, *next_rxd;
  1626. struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
  1627. u32 length;
  1628. unsigned int i, j;
  1629. int cleaned_count = 0;
  1630. bool cleaned = false;
  1631. i = rx_ring->next_to_clean;
  1632. rx_desc = IXGB_RX_DESC(*rx_ring, i);
  1633. buffer_info = &rx_ring->buffer_info[i];
  1634. while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
  1635. struct sk_buff *skb;
  1636. u8 status;
  1637. if (*work_done >= work_to_do)
  1638. break;
  1639. (*work_done)++;
  1640. rmb(); /* read descriptor and rx_buffer_info after status DD */
  1641. status = rx_desc->status;
  1642. skb = buffer_info->skb;
  1643. buffer_info->skb = NULL;
  1644. prefetch(skb->data - NET_IP_ALIGN);
  1645. if (++i == rx_ring->count)
  1646. i = 0;
  1647. next_rxd = IXGB_RX_DESC(*rx_ring, i);
  1648. prefetch(next_rxd);
  1649. j = i + 1;
  1650. if (j == rx_ring->count)
  1651. j = 0;
  1652. next2_buffer = &rx_ring->buffer_info[j];
  1653. prefetch(next2_buffer);
  1654. next_buffer = &rx_ring->buffer_info[i];
  1655. cleaned = true;
  1656. cleaned_count++;
  1657. dma_unmap_single(&pdev->dev,
  1658. buffer_info->dma,
  1659. buffer_info->length,
  1660. DMA_FROM_DEVICE);
  1661. buffer_info->dma = 0;
  1662. length = le16_to_cpu(rx_desc->length);
  1663. rx_desc->length = 0;
  1664. if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
  1665. /* All receives must fit into a single buffer */
  1666. pr_debug("Receive packet consumed multiple buffers length<%x>\n",
  1667. length);
  1668. dev_kfree_skb_irq(skb);
  1669. goto rxdesc_done;
  1670. }
  1671. if (unlikely(rx_desc->errors &
  1672. (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
  1673. IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
  1674. dev_kfree_skb_irq(skb);
  1675. goto rxdesc_done;
  1676. }
  1677. ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
  1678. /* Good Receive */
  1679. skb_put(skb, length);
  1680. /* Receive Checksum Offload */
  1681. ixgb_rx_checksum(adapter, rx_desc, skb);
  1682. skb->protocol = eth_type_trans(skb, netdev);
  1683. if (status & IXGB_RX_DESC_STATUS_VP)
  1684. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  1685. le16_to_cpu(rx_desc->special));
  1686. netif_receive_skb(skb);
  1687. rxdesc_done:
  1688. /* clean up descriptor, might be written over by hw */
  1689. rx_desc->status = 0;
  1690. /* return some buffers to hardware, one at a time is too slow */
  1691. if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
  1692. ixgb_alloc_rx_buffers(adapter, cleaned_count);
  1693. cleaned_count = 0;
  1694. }
  1695. /* use prefetched values */
  1696. rx_desc = next_rxd;
  1697. buffer_info = next_buffer;
  1698. }
  1699. rx_ring->next_to_clean = i;
  1700. cleaned_count = IXGB_DESC_UNUSED(rx_ring);
  1701. if (cleaned_count)
  1702. ixgb_alloc_rx_buffers(adapter, cleaned_count);
  1703. return cleaned;
  1704. }
  1705. /**
  1706. * ixgb_alloc_rx_buffers - Replace used receive buffers
  1707. * @adapter: address of board private structure
  1708. **/
  1709. static void
  1710. ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
  1711. {
  1712. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  1713. struct net_device *netdev = adapter->netdev;
  1714. struct pci_dev *pdev = adapter->pdev;
  1715. struct ixgb_rx_desc *rx_desc;
  1716. struct ixgb_buffer *buffer_info;
  1717. struct sk_buff *skb;
  1718. unsigned int i;
  1719. long cleancount;
  1720. i = rx_ring->next_to_use;
  1721. buffer_info = &rx_ring->buffer_info[i];
  1722. cleancount = IXGB_DESC_UNUSED(rx_ring);
  1723. /* leave three descriptors unused */
  1724. while (--cleancount > 2 && cleaned_count--) {
  1725. /* recycle! its good for you */
  1726. skb = buffer_info->skb;
  1727. if (skb) {
  1728. skb_trim(skb, 0);
  1729. goto map_skb;
  1730. }
  1731. skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
  1732. if (unlikely(!skb)) {
  1733. /* Better luck next round */
  1734. adapter->alloc_rx_buff_failed++;
  1735. break;
  1736. }
  1737. buffer_info->skb = skb;
  1738. buffer_info->length = adapter->rx_buffer_len;
  1739. map_skb:
  1740. buffer_info->dma = dma_map_single(&pdev->dev,
  1741. skb->data,
  1742. adapter->rx_buffer_len,
  1743. DMA_FROM_DEVICE);
  1744. if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
  1745. adapter->alloc_rx_buff_failed++;
  1746. break;
  1747. }
  1748. rx_desc = IXGB_RX_DESC(*rx_ring, i);
  1749. rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
  1750. /* guarantee DD bit not set now before h/w gets descriptor
  1751. * this is the rest of the workaround for h/w double
  1752. * writeback. */
  1753. rx_desc->status = 0;
  1754. if (++i == rx_ring->count)
  1755. i = 0;
  1756. buffer_info = &rx_ring->buffer_info[i];
  1757. }
  1758. if (likely(rx_ring->next_to_use != i)) {
  1759. rx_ring->next_to_use = i;
  1760. if (unlikely(i-- == 0))
  1761. i = (rx_ring->count - 1);
  1762. /* Force memory writes to complete before letting h/w
  1763. * know there are new descriptors to fetch. (Only
  1764. * applicable for weak-ordered memory model archs, such
  1765. * as IA-64). */
  1766. wmb();
  1767. IXGB_WRITE_REG(&adapter->hw, RDT, i);
  1768. }
  1769. }
  1770. static void
  1771. ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
  1772. {
  1773. u32 ctrl;
  1774. /* enable VLAN tag insert/strip */
  1775. ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
  1776. ctrl |= IXGB_CTRL0_VME;
  1777. IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
  1778. }
  1779. static void
  1780. ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
  1781. {
  1782. u32 ctrl;
  1783. /* disable VLAN tag insert/strip */
  1784. ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
  1785. ctrl &= ~IXGB_CTRL0_VME;
  1786. IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
  1787. }
  1788. static int
  1789. ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
  1790. {
  1791. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1792. u32 vfta, index;
  1793. /* add VID to filter table */
  1794. index = (vid >> 5) & 0x7F;
  1795. vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  1796. vfta |= (1 << (vid & 0x1F));
  1797. ixgb_write_vfta(&adapter->hw, index, vfta);
  1798. set_bit(vid, adapter->active_vlans);
  1799. return 0;
  1800. }
  1801. static int
  1802. ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
  1803. {
  1804. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1805. u32 vfta, index;
  1806. /* remove VID from filter table */
  1807. index = (vid >> 5) & 0x7F;
  1808. vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  1809. vfta &= ~(1 << (vid & 0x1F));
  1810. ixgb_write_vfta(&adapter->hw, index, vfta);
  1811. clear_bit(vid, adapter->active_vlans);
  1812. return 0;
  1813. }
  1814. static void
  1815. ixgb_restore_vlan(struct ixgb_adapter *adapter)
  1816. {
  1817. u16 vid;
  1818. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  1819. ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
  1820. }
  1821. #ifdef CONFIG_NET_POLL_CONTROLLER
  1822. /*
  1823. * Polling 'interrupt' - used by things like netconsole to send skbs
  1824. * without having to re-enable interrupts. It's not called while
  1825. * the interrupt routine is executing.
  1826. */
  1827. static void ixgb_netpoll(struct net_device *dev)
  1828. {
  1829. struct ixgb_adapter *adapter = netdev_priv(dev);
  1830. disable_irq(adapter->pdev->irq);
  1831. ixgb_intr(adapter->pdev->irq, dev);
  1832. enable_irq(adapter->pdev->irq);
  1833. }
  1834. #endif
  1835. /**
  1836. * ixgb_io_error_detected - called when PCI error is detected
  1837. * @pdev: pointer to pci device with error
  1838. * @state: pci channel state after error
  1839. *
  1840. * This callback is called by the PCI subsystem whenever
  1841. * a PCI bus error is detected.
  1842. */
  1843. static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
  1844. enum pci_channel_state state)
  1845. {
  1846. struct net_device *netdev = pci_get_drvdata(pdev);
  1847. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1848. netif_device_detach(netdev);
  1849. if (state == pci_channel_io_perm_failure)
  1850. return PCI_ERS_RESULT_DISCONNECT;
  1851. if (netif_running(netdev))
  1852. ixgb_down(adapter, true);
  1853. pci_disable_device(pdev);
  1854. /* Request a slot reset. */
  1855. return PCI_ERS_RESULT_NEED_RESET;
  1856. }
  1857. /**
  1858. * ixgb_io_slot_reset - called after the pci bus has been reset.
  1859. * @pdev pointer to pci device with error
  1860. *
  1861. * This callback is called after the PCI bus has been reset.
  1862. * Basically, this tries to restart the card from scratch.
  1863. * This is a shortened version of the device probe/discovery code,
  1864. * it resembles the first-half of the ixgb_probe() routine.
  1865. */
  1866. static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
  1867. {
  1868. struct net_device *netdev = pci_get_drvdata(pdev);
  1869. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1870. if (pci_enable_device(pdev)) {
  1871. netif_err(adapter, probe, adapter->netdev,
  1872. "Cannot re-enable PCI device after reset\n");
  1873. return PCI_ERS_RESULT_DISCONNECT;
  1874. }
  1875. /* Perform card reset only on one instance of the card */
  1876. if (0 != PCI_FUNC (pdev->devfn))
  1877. return PCI_ERS_RESULT_RECOVERED;
  1878. pci_set_master(pdev);
  1879. netif_carrier_off(netdev);
  1880. netif_stop_queue(netdev);
  1881. ixgb_reset(adapter);
  1882. /* Make sure the EEPROM is good */
  1883. if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
  1884. netif_err(adapter, probe, adapter->netdev,
  1885. "After reset, the EEPROM checksum is not valid\n");
  1886. return PCI_ERS_RESULT_DISCONNECT;
  1887. }
  1888. ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
  1889. memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
  1890. if (!is_valid_ether_addr(netdev->perm_addr)) {
  1891. netif_err(adapter, probe, adapter->netdev,
  1892. "After reset, invalid MAC address\n");
  1893. return PCI_ERS_RESULT_DISCONNECT;
  1894. }
  1895. return PCI_ERS_RESULT_RECOVERED;
  1896. }
  1897. /**
  1898. * ixgb_io_resume - called when its OK to resume normal operations
  1899. * @pdev pointer to pci device with error
  1900. *
  1901. * The error recovery driver tells us that its OK to resume
  1902. * normal operation. Implementation resembles the second-half
  1903. * of the ixgb_probe() routine.
  1904. */
  1905. static void ixgb_io_resume(struct pci_dev *pdev)
  1906. {
  1907. struct net_device *netdev = pci_get_drvdata(pdev);
  1908. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1909. pci_set_master(pdev);
  1910. if (netif_running(netdev)) {
  1911. if (ixgb_up(adapter)) {
  1912. pr_err("can't bring device back up after reset\n");
  1913. return;
  1914. }
  1915. }
  1916. netif_device_attach(netdev);
  1917. mod_timer(&adapter->watchdog_timer, jiffies);
  1918. }
  1919. /* ixgb_main.c */