enc28j60.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Microchip ENC28J60 ethernet driver (MAC + PHY)
  4. *
  5. * Copyright (C) 2007 Eurek srl
  6. * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
  7. * based on enc28j60.c written by David Anders for 2.4 kernel version
  8. *
  9. * $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $
  10. */
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/types.h>
  14. #include <linux/fcntl.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/property.h>
  17. #include <linux/string.h>
  18. #include <linux/errno.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/ethtool.h>
  22. #include <linux/tcp.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/delay.h>
  25. #include <linux/spi/spi.h>
  26. #include "enc28j60_hw.h"
  27. #define DRV_NAME "enc28j60"
  28. #define DRV_VERSION "1.02"
  29. #define SPI_OPLEN 1
  30. #define ENC28J60_MSG_DEFAULT \
  31. (NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK)
  32. /* Buffer size required for the largest SPI transfer (i.e., reading a
  33. * frame).
  34. */
  35. #define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN)
  36. #define TX_TIMEOUT (4 * HZ)
  37. /* Max TX retries in case of collision as suggested by errata datasheet */
  38. #define MAX_TX_RETRYCOUNT 16
  39. enum {
  40. RXFILTER_NORMAL,
  41. RXFILTER_MULTI,
  42. RXFILTER_PROMISC
  43. };
  44. /* Driver local data */
  45. struct enc28j60_net {
  46. struct net_device *netdev;
  47. struct spi_device *spi;
  48. struct mutex lock;
  49. struct sk_buff *tx_skb;
  50. struct work_struct tx_work;
  51. struct work_struct irq_work;
  52. struct work_struct setrx_work;
  53. struct work_struct restart_work;
  54. u8 bank; /* current register bank selected */
  55. u16 next_pk_ptr; /* next packet pointer within FIFO */
  56. u16 max_pk_counter; /* statistics: max packet counter */
  57. u16 tx_retry_count;
  58. bool hw_enable;
  59. bool full_duplex;
  60. int rxfilter;
  61. u32 msg_enable;
  62. u8 spi_transfer_buf[SPI_TRANSFER_BUF_LEN];
  63. };
  64. /* use ethtool to change the level for any given device */
  65. static struct {
  66. u32 msg_enable;
  67. } debug = { -1 };
  68. /*
  69. * SPI read buffer
  70. * Wait for the SPI transfer and copy received data to destination.
  71. */
  72. static int
  73. spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
  74. {
  75. struct device *dev = &priv->spi->dev;
  76. u8 *rx_buf = priv->spi_transfer_buf + 4;
  77. u8 *tx_buf = priv->spi_transfer_buf;
  78. struct spi_transfer tx = {
  79. .tx_buf = tx_buf,
  80. .len = SPI_OPLEN,
  81. };
  82. struct spi_transfer rx = {
  83. .rx_buf = rx_buf,
  84. .len = len,
  85. };
  86. struct spi_message msg;
  87. int ret;
  88. tx_buf[0] = ENC28J60_READ_BUF_MEM;
  89. spi_message_init(&msg);
  90. spi_message_add_tail(&tx, &msg);
  91. spi_message_add_tail(&rx, &msg);
  92. ret = spi_sync(priv->spi, &msg);
  93. if (ret == 0) {
  94. memcpy(data, rx_buf, len);
  95. ret = msg.status;
  96. }
  97. if (ret && netif_msg_drv(priv))
  98. dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
  99. __func__, ret);
  100. return ret;
  101. }
  102. /*
  103. * SPI write buffer
  104. */
  105. static int spi_write_buf(struct enc28j60_net *priv, int len, const u8 *data)
  106. {
  107. struct device *dev = &priv->spi->dev;
  108. int ret;
  109. if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0)
  110. ret = -EINVAL;
  111. else {
  112. priv->spi_transfer_buf[0] = ENC28J60_WRITE_BUF_MEM;
  113. memcpy(&priv->spi_transfer_buf[1], data, len);
  114. ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
  115. if (ret && netif_msg_drv(priv))
  116. dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
  117. __func__, ret);
  118. }
  119. return ret;
  120. }
  121. /*
  122. * basic SPI read operation
  123. */
  124. static u8 spi_read_op(struct enc28j60_net *priv, u8 op, u8 addr)
  125. {
  126. struct device *dev = &priv->spi->dev;
  127. u8 tx_buf[2];
  128. u8 rx_buf[4];
  129. u8 val = 0;
  130. int ret;
  131. int slen = SPI_OPLEN;
  132. /* do dummy read if needed */
  133. if (addr & SPRD_MASK)
  134. slen++;
  135. tx_buf[0] = op | (addr & ADDR_MASK);
  136. ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
  137. if (ret)
  138. dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
  139. __func__, ret);
  140. else
  141. val = rx_buf[slen - 1];
  142. return val;
  143. }
  144. /*
  145. * basic SPI write operation
  146. */
  147. static int spi_write_op(struct enc28j60_net *priv, u8 op, u8 addr, u8 val)
  148. {
  149. struct device *dev = &priv->spi->dev;
  150. int ret;
  151. priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK);
  152. priv->spi_transfer_buf[1] = val;
  153. ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
  154. if (ret && netif_msg_drv(priv))
  155. dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
  156. __func__, ret);
  157. return ret;
  158. }
  159. static void enc28j60_soft_reset(struct enc28j60_net *priv)
  160. {
  161. spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
  162. /* Errata workaround #1, CLKRDY check is unreliable,
  163. * delay at least 1 ms instead */
  164. udelay(2000);
  165. }
  166. /*
  167. * select the current register bank if necessary
  168. */
  169. static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
  170. {
  171. u8 b = (addr & BANK_MASK) >> 5;
  172. /* These registers (EIE, EIR, ESTAT, ECON2, ECON1)
  173. * are present in all banks, no need to switch bank.
  174. */
  175. if (addr >= EIE && addr <= ECON1)
  176. return;
  177. /* Clear or set each bank selection bit as needed */
  178. if ((b & ECON1_BSEL0) != (priv->bank & ECON1_BSEL0)) {
  179. if (b & ECON1_BSEL0)
  180. spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1,
  181. ECON1_BSEL0);
  182. else
  183. spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1,
  184. ECON1_BSEL0);
  185. }
  186. if ((b & ECON1_BSEL1) != (priv->bank & ECON1_BSEL1)) {
  187. if (b & ECON1_BSEL1)
  188. spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1,
  189. ECON1_BSEL1);
  190. else
  191. spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1,
  192. ECON1_BSEL1);
  193. }
  194. priv->bank = b;
  195. }
  196. /*
  197. * Register access routines through the SPI bus.
  198. * Every register access comes in two flavours:
  199. * - nolock_xxx: caller needs to invoke mutex_lock, usually to access
  200. * atomically more than one register
  201. * - locked_xxx: caller doesn't need to invoke mutex_lock, single access
  202. *
  203. * Some registers can be accessed through the bit field clear and
  204. * bit field set to avoid a read modify write cycle.
  205. */
  206. /*
  207. * Register bit field Set
  208. */
  209. static void nolock_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
  210. {
  211. enc28j60_set_bank(priv, addr);
  212. spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask);
  213. }
  214. static void locked_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
  215. {
  216. mutex_lock(&priv->lock);
  217. nolock_reg_bfset(priv, addr, mask);
  218. mutex_unlock(&priv->lock);
  219. }
  220. /*
  221. * Register bit field Clear
  222. */
  223. static void nolock_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
  224. {
  225. enc28j60_set_bank(priv, addr);
  226. spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask);
  227. }
  228. static void locked_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
  229. {
  230. mutex_lock(&priv->lock);
  231. nolock_reg_bfclr(priv, addr, mask);
  232. mutex_unlock(&priv->lock);
  233. }
  234. /*
  235. * Register byte read
  236. */
  237. static int nolock_regb_read(struct enc28j60_net *priv, u8 address)
  238. {
  239. enc28j60_set_bank(priv, address);
  240. return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
  241. }
  242. static int locked_regb_read(struct enc28j60_net *priv, u8 address)
  243. {
  244. int ret;
  245. mutex_lock(&priv->lock);
  246. ret = nolock_regb_read(priv, address);
  247. mutex_unlock(&priv->lock);
  248. return ret;
  249. }
  250. /*
  251. * Register word read
  252. */
  253. static int nolock_regw_read(struct enc28j60_net *priv, u8 address)
  254. {
  255. int rl, rh;
  256. enc28j60_set_bank(priv, address);
  257. rl = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
  258. rh = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address + 1);
  259. return (rh << 8) | rl;
  260. }
  261. static int locked_regw_read(struct enc28j60_net *priv, u8 address)
  262. {
  263. int ret;
  264. mutex_lock(&priv->lock);
  265. ret = nolock_regw_read(priv, address);
  266. mutex_unlock(&priv->lock);
  267. return ret;
  268. }
  269. /*
  270. * Register byte write
  271. */
  272. static void nolock_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
  273. {
  274. enc28j60_set_bank(priv, address);
  275. spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data);
  276. }
  277. static void locked_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
  278. {
  279. mutex_lock(&priv->lock);
  280. nolock_regb_write(priv, address, data);
  281. mutex_unlock(&priv->lock);
  282. }
  283. /*
  284. * Register word write
  285. */
  286. static void nolock_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
  287. {
  288. enc28j60_set_bank(priv, address);
  289. spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data);
  290. spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address + 1,
  291. (u8) (data >> 8));
  292. }
  293. static void locked_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
  294. {
  295. mutex_lock(&priv->lock);
  296. nolock_regw_write(priv, address, data);
  297. mutex_unlock(&priv->lock);
  298. }
  299. /*
  300. * Buffer memory read
  301. * Select the starting address and execute a SPI buffer read.
  302. */
  303. static void enc28j60_mem_read(struct enc28j60_net *priv, u16 addr, int len,
  304. u8 *data)
  305. {
  306. mutex_lock(&priv->lock);
  307. nolock_regw_write(priv, ERDPTL, addr);
  308. #ifdef CONFIG_ENC28J60_WRITEVERIFY
  309. if (netif_msg_drv(priv)) {
  310. struct device *dev = &priv->spi->dev;
  311. u16 reg;
  312. reg = nolock_regw_read(priv, ERDPTL);
  313. if (reg != addr)
  314. dev_printk(KERN_DEBUG, dev,
  315. "%s() error writing ERDPT (0x%04x - 0x%04x)\n",
  316. __func__, reg, addr);
  317. }
  318. #endif
  319. spi_read_buf(priv, len, data);
  320. mutex_unlock(&priv->lock);
  321. }
  322. /*
  323. * Write packet to enc28j60 TX buffer memory
  324. */
  325. static void
  326. enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
  327. {
  328. struct device *dev = &priv->spi->dev;
  329. mutex_lock(&priv->lock);
  330. /* Set the write pointer to start of transmit buffer area */
  331. nolock_regw_write(priv, EWRPTL, TXSTART_INIT);
  332. #ifdef CONFIG_ENC28J60_WRITEVERIFY
  333. if (netif_msg_drv(priv)) {
  334. u16 reg;
  335. reg = nolock_regw_read(priv, EWRPTL);
  336. if (reg != TXSTART_INIT)
  337. dev_printk(KERN_DEBUG, dev,
  338. "%s() ERWPT:0x%04x != 0x%04x\n",
  339. __func__, reg, TXSTART_INIT);
  340. }
  341. #endif
  342. /* Set the TXND pointer to correspond to the packet size given */
  343. nolock_regw_write(priv, ETXNDL, TXSTART_INIT + len);
  344. /* write per-packet control byte */
  345. spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00);
  346. if (netif_msg_hw(priv))
  347. dev_printk(KERN_DEBUG, dev,
  348. "%s() after control byte ERWPT:0x%04x\n",
  349. __func__, nolock_regw_read(priv, EWRPTL));
  350. /* copy the packet into the transmit buffer */
  351. spi_write_buf(priv, len, data);
  352. if (netif_msg_hw(priv))
  353. dev_printk(KERN_DEBUG, dev,
  354. "%s() after write packet ERWPT:0x%04x, len=%d\n",
  355. __func__, nolock_regw_read(priv, EWRPTL), len);
  356. mutex_unlock(&priv->lock);
  357. }
  358. static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val)
  359. {
  360. struct device *dev = &priv->spi->dev;
  361. unsigned long timeout = jiffies + msecs_to_jiffies(20);
  362. /* 20 msec timeout read */
  363. while ((nolock_regb_read(priv, reg) & mask) != val) {
  364. if (time_after(jiffies, timeout)) {
  365. if (netif_msg_drv(priv))
  366. dev_dbg(dev, "reg %02x ready timeout!\n", reg);
  367. return -ETIMEDOUT;
  368. }
  369. cpu_relax();
  370. }
  371. return 0;
  372. }
  373. /*
  374. * Wait until the PHY operation is complete.
  375. */
  376. static int wait_phy_ready(struct enc28j60_net *priv)
  377. {
  378. return poll_ready(priv, MISTAT, MISTAT_BUSY, 0) ? 0 : 1;
  379. }
  380. /*
  381. * PHY register read
  382. * PHY registers are not accessed directly, but through the MII.
  383. */
  384. static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address)
  385. {
  386. u16 ret;
  387. mutex_lock(&priv->lock);
  388. /* set the PHY register address */
  389. nolock_regb_write(priv, MIREGADR, address);
  390. /* start the register read operation */
  391. nolock_regb_write(priv, MICMD, MICMD_MIIRD);
  392. /* wait until the PHY read completes */
  393. wait_phy_ready(priv);
  394. /* quit reading */
  395. nolock_regb_write(priv, MICMD, 0x00);
  396. /* return the data */
  397. ret = nolock_regw_read(priv, MIRDL);
  398. mutex_unlock(&priv->lock);
  399. return ret;
  400. }
  401. static int enc28j60_phy_write(struct enc28j60_net *priv, u8 address, u16 data)
  402. {
  403. int ret;
  404. mutex_lock(&priv->lock);
  405. /* set the PHY register address */
  406. nolock_regb_write(priv, MIREGADR, address);
  407. /* write the PHY data */
  408. nolock_regw_write(priv, MIWRL, data);
  409. /* wait until the PHY write completes and return */
  410. ret = wait_phy_ready(priv);
  411. mutex_unlock(&priv->lock);
  412. return ret;
  413. }
  414. /*
  415. * Program the hardware MAC address from dev->dev_addr.
  416. */
  417. static int enc28j60_set_hw_macaddr(struct net_device *ndev)
  418. {
  419. int ret;
  420. struct enc28j60_net *priv = netdev_priv(ndev);
  421. struct device *dev = &priv->spi->dev;
  422. mutex_lock(&priv->lock);
  423. if (!priv->hw_enable) {
  424. if (netif_msg_drv(priv))
  425. dev_info(dev, "%s: Setting MAC address to %pM\n",
  426. ndev->name, ndev->dev_addr);
  427. /* NOTE: MAC address in ENC28J60 is byte-backward */
  428. nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]);
  429. nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]);
  430. nolock_regb_write(priv, MAADR3, ndev->dev_addr[2]);
  431. nolock_regb_write(priv, MAADR2, ndev->dev_addr[3]);
  432. nolock_regb_write(priv, MAADR1, ndev->dev_addr[4]);
  433. nolock_regb_write(priv, MAADR0, ndev->dev_addr[5]);
  434. ret = 0;
  435. } else {
  436. if (netif_msg_drv(priv))
  437. dev_printk(KERN_DEBUG, dev,
  438. "%s() Hardware must be disabled to set Mac address\n",
  439. __func__);
  440. ret = -EBUSY;
  441. }
  442. mutex_unlock(&priv->lock);
  443. return ret;
  444. }
  445. /*
  446. * Store the new hardware address in dev->dev_addr, and update the MAC.
  447. */
  448. static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
  449. {
  450. struct sockaddr *address = addr;
  451. if (netif_running(dev))
  452. return -EBUSY;
  453. if (!is_valid_ether_addr(address->sa_data))
  454. return -EADDRNOTAVAIL;
  455. ether_addr_copy(dev->dev_addr, address->sa_data);
  456. return enc28j60_set_hw_macaddr(dev);
  457. }
  458. /*
  459. * Debug routine to dump useful register contents
  460. */
  461. static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg)
  462. {
  463. struct device *dev = &priv->spi->dev;
  464. mutex_lock(&priv->lock);
  465. dev_printk(KERN_DEBUG, dev,
  466. " %s\n"
  467. "HwRevID: 0x%02x\n"
  468. "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n"
  469. " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n"
  470. "MAC : MACON1 MACON3 MACON4\n"
  471. " 0x%02x 0x%02x 0x%02x\n"
  472. "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n"
  473. " 0x%04x 0x%04x 0x%04x 0x%04x "
  474. "0x%02x 0x%02x 0x%04x\n"
  475. "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n"
  476. " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n",
  477. msg, nolock_regb_read(priv, EREVID),
  478. nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2),
  479. nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR),
  480. nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1),
  481. nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4),
  482. nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL),
  483. nolock_regw_read(priv, ERXWRPTL),
  484. nolock_regw_read(priv, ERXRDPTL),
  485. nolock_regb_read(priv, ERXFCON),
  486. nolock_regb_read(priv, EPKTCNT),
  487. nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL),
  488. nolock_regw_read(priv, ETXNDL),
  489. nolock_regb_read(priv, MACLCON1),
  490. nolock_regb_read(priv, MACLCON2),
  491. nolock_regb_read(priv, MAPHSUP));
  492. mutex_unlock(&priv->lock);
  493. }
  494. /*
  495. * ERXRDPT need to be set always at odd addresses, refer to errata datasheet
  496. */
  497. static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
  498. {
  499. u16 erxrdpt;
  500. if ((next_packet_ptr - 1 < start) || (next_packet_ptr - 1 > end))
  501. erxrdpt = end;
  502. else
  503. erxrdpt = next_packet_ptr - 1;
  504. return erxrdpt;
  505. }
  506. /*
  507. * Calculate wrap around when reading beyond the end of the RX buffer
  508. */
  509. static u16 rx_packet_start(u16 ptr)
  510. {
  511. if (ptr + RSV_SIZE > RXEND_INIT)
  512. return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
  513. else
  514. return ptr + RSV_SIZE;
  515. }
  516. static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
  517. {
  518. struct device *dev = &priv->spi->dev;
  519. u16 erxrdpt;
  520. if (start > 0x1FFF || end > 0x1FFF || start > end) {
  521. if (netif_msg_drv(priv))
  522. dev_err(dev, "%s(%d, %d) RXFIFO bad parameters!\n",
  523. __func__, start, end);
  524. return;
  525. }
  526. /* set receive buffer start + end */
  527. priv->next_pk_ptr = start;
  528. nolock_regw_write(priv, ERXSTL, start);
  529. erxrdpt = erxrdpt_workaround(priv->next_pk_ptr, start, end);
  530. nolock_regw_write(priv, ERXRDPTL, erxrdpt);
  531. nolock_regw_write(priv, ERXNDL, end);
  532. }
  533. static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
  534. {
  535. struct device *dev = &priv->spi->dev;
  536. if (start > 0x1FFF || end > 0x1FFF || start > end) {
  537. if (netif_msg_drv(priv))
  538. dev_err(dev, "%s(%d, %d) TXFIFO bad parameters!\n",
  539. __func__, start, end);
  540. return;
  541. }
  542. /* set transmit buffer start + end */
  543. nolock_regw_write(priv, ETXSTL, start);
  544. nolock_regw_write(priv, ETXNDL, end);
  545. }
  546. /*
  547. * Low power mode shrinks power consumption about 100x, so we'd like
  548. * the chip to be in that mode whenever it's inactive. (However, we
  549. * can't stay in low power mode during suspend with WOL active.)
  550. */
  551. static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low)
  552. {
  553. struct device *dev = &priv->spi->dev;
  554. if (netif_msg_drv(priv))
  555. dev_dbg(dev, "%s power...\n", is_low ? "low" : "high");
  556. mutex_lock(&priv->lock);
  557. if (is_low) {
  558. nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
  559. poll_ready(priv, ESTAT, ESTAT_RXBUSY, 0);
  560. poll_ready(priv, ECON1, ECON1_TXRTS, 0);
  561. /* ECON2_VRPS was set during initialization */
  562. nolock_reg_bfset(priv, ECON2, ECON2_PWRSV);
  563. } else {
  564. nolock_reg_bfclr(priv, ECON2, ECON2_PWRSV);
  565. poll_ready(priv, ESTAT, ESTAT_CLKRDY, ESTAT_CLKRDY);
  566. /* caller sets ECON1_RXEN */
  567. }
  568. mutex_unlock(&priv->lock);
  569. }
  570. static int enc28j60_hw_init(struct enc28j60_net *priv)
  571. {
  572. struct device *dev = &priv->spi->dev;
  573. u8 reg;
  574. if (netif_msg_drv(priv))
  575. dev_printk(KERN_DEBUG, dev, "%s() - %s\n", __func__,
  576. priv->full_duplex ? "FullDuplex" : "HalfDuplex");
  577. mutex_lock(&priv->lock);
  578. /* first reset the chip */
  579. enc28j60_soft_reset(priv);
  580. /* Clear ECON1 */
  581. spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, ECON1, 0x00);
  582. priv->bank = 0;
  583. priv->hw_enable = false;
  584. priv->tx_retry_count = 0;
  585. priv->max_pk_counter = 0;
  586. priv->rxfilter = RXFILTER_NORMAL;
  587. /* enable address auto increment and voltage regulator powersave */
  588. nolock_regb_write(priv, ECON2, ECON2_AUTOINC | ECON2_VRPS);
  589. nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT);
  590. nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT);
  591. mutex_unlock(&priv->lock);
  592. /*
  593. * Check the RevID.
  594. * If it's 0x00 or 0xFF probably the enc28j60 is not mounted or
  595. * damaged.
  596. */
  597. reg = locked_regb_read(priv, EREVID);
  598. if (netif_msg_drv(priv))
  599. dev_info(dev, "chip RevID: 0x%02x\n", reg);
  600. if (reg == 0x00 || reg == 0xff) {
  601. if (netif_msg_drv(priv))
  602. dev_printk(KERN_DEBUG, dev, "%s() Invalid RevId %d\n",
  603. __func__, reg);
  604. return 0;
  605. }
  606. /* default filter mode: (unicast OR broadcast) AND crc valid */
  607. locked_regb_write(priv, ERXFCON,
  608. ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN);
  609. /* enable MAC receive */
  610. locked_regb_write(priv, MACON1,
  611. MACON1_MARXEN | MACON1_TXPAUS | MACON1_RXPAUS);
  612. /* enable automatic padding and CRC operations */
  613. if (priv->full_duplex) {
  614. locked_regb_write(priv, MACON3,
  615. MACON3_PADCFG0 | MACON3_TXCRCEN |
  616. MACON3_FRMLNEN | MACON3_FULDPX);
  617. /* set inter-frame gap (non-back-to-back) */
  618. locked_regb_write(priv, MAIPGL, 0x12);
  619. /* set inter-frame gap (back-to-back) */
  620. locked_regb_write(priv, MABBIPG, 0x15);
  621. } else {
  622. locked_regb_write(priv, MACON3,
  623. MACON3_PADCFG0 | MACON3_TXCRCEN |
  624. MACON3_FRMLNEN);
  625. locked_regb_write(priv, MACON4, 1 << 6); /* DEFER bit */
  626. /* set inter-frame gap (non-back-to-back) */
  627. locked_regw_write(priv, MAIPGL, 0x0C12);
  628. /* set inter-frame gap (back-to-back) */
  629. locked_regb_write(priv, MABBIPG, 0x12);
  630. }
  631. /*
  632. * MACLCON1 (default)
  633. * MACLCON2 (default)
  634. * Set the maximum packet size which the controller will accept.
  635. */
  636. locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN);
  637. /* Configure LEDs */
  638. if (!enc28j60_phy_write(priv, PHLCON, ENC28J60_LAMPS_MODE))
  639. return 0;
  640. if (priv->full_duplex) {
  641. if (!enc28j60_phy_write(priv, PHCON1, PHCON1_PDPXMD))
  642. return 0;
  643. if (!enc28j60_phy_write(priv, PHCON2, 0x00))
  644. return 0;
  645. } else {
  646. if (!enc28j60_phy_write(priv, PHCON1, 0x00))
  647. return 0;
  648. if (!enc28j60_phy_write(priv, PHCON2, PHCON2_HDLDIS))
  649. return 0;
  650. }
  651. if (netif_msg_hw(priv))
  652. enc28j60_dump_regs(priv, "Hw initialized.");
  653. return 1;
  654. }
  655. static void enc28j60_hw_enable(struct enc28j60_net *priv)
  656. {
  657. struct device *dev = &priv->spi->dev;
  658. /* enable interrupts */
  659. if (netif_msg_hw(priv))
  660. dev_printk(KERN_DEBUG, dev, "%s() enabling interrupts.\n",
  661. __func__);
  662. enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
  663. mutex_lock(&priv->lock);
  664. nolock_reg_bfclr(priv, EIR, EIR_DMAIF | EIR_LINKIF |
  665. EIR_TXIF | EIR_TXERIF | EIR_RXERIF | EIR_PKTIF);
  666. nolock_regb_write(priv, EIE, EIE_INTIE | EIE_PKTIE | EIE_LINKIE |
  667. EIE_TXIE | EIE_TXERIE | EIE_RXERIE);
  668. /* enable receive logic */
  669. nolock_reg_bfset(priv, ECON1, ECON1_RXEN);
  670. priv->hw_enable = true;
  671. mutex_unlock(&priv->lock);
  672. }
  673. static void enc28j60_hw_disable(struct enc28j60_net *priv)
  674. {
  675. mutex_lock(&priv->lock);
  676. /* disable interrupts and packet reception */
  677. nolock_regb_write(priv, EIE, 0x00);
  678. nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
  679. priv->hw_enable = false;
  680. mutex_unlock(&priv->lock);
  681. }
  682. static int
  683. enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex)
  684. {
  685. struct enc28j60_net *priv = netdev_priv(ndev);
  686. int ret = 0;
  687. if (!priv->hw_enable) {
  688. /* link is in low power mode now; duplex setting
  689. * will take effect on next enc28j60_hw_init().
  690. */
  691. if (autoneg == AUTONEG_DISABLE && speed == SPEED_10)
  692. priv->full_duplex = (duplex == DUPLEX_FULL);
  693. else {
  694. if (netif_msg_link(priv))
  695. netdev_warn(ndev, "unsupported link setting\n");
  696. ret = -EOPNOTSUPP;
  697. }
  698. } else {
  699. if (netif_msg_link(priv))
  700. netdev_warn(ndev, "Warning: hw must be disabled to set link mode\n");
  701. ret = -EBUSY;
  702. }
  703. return ret;
  704. }
  705. /*
  706. * Read the Transmit Status Vector
  707. */
  708. static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
  709. {
  710. struct device *dev = &priv->spi->dev;
  711. int endptr;
  712. endptr = locked_regw_read(priv, ETXNDL);
  713. if (netif_msg_hw(priv))
  714. dev_printk(KERN_DEBUG, dev, "reading TSV at addr:0x%04x\n",
  715. endptr + 1);
  716. enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
  717. }
  718. static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
  719. u8 tsv[TSV_SIZE])
  720. {
  721. struct device *dev = &priv->spi->dev;
  722. u16 tmp1, tmp2;
  723. dev_printk(KERN_DEBUG, dev, "%s - TSV:\n", msg);
  724. tmp1 = tsv[1];
  725. tmp1 <<= 8;
  726. tmp1 |= tsv[0];
  727. tmp2 = tsv[5];
  728. tmp2 <<= 8;
  729. tmp2 |= tsv[4];
  730. dev_printk(KERN_DEBUG, dev,
  731. "ByteCount: %d, CollisionCount: %d, TotByteOnWire: %d\n",
  732. tmp1, tsv[2] & 0x0f, tmp2);
  733. dev_printk(KERN_DEBUG, dev,
  734. "TxDone: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
  735. TSV_GETBIT(tsv, TSV_TXDONE),
  736. TSV_GETBIT(tsv, TSV_TXCRCERROR),
  737. TSV_GETBIT(tsv, TSV_TXLENCHKERROR),
  738. TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE));
  739. dev_printk(KERN_DEBUG, dev,
  740. "Multicast: %d, Broadcast: %d, PacketDefer: %d, ExDefer: %d\n",
  741. TSV_GETBIT(tsv, TSV_TXMULTICAST),
  742. TSV_GETBIT(tsv, TSV_TXBROADCAST),
  743. TSV_GETBIT(tsv, TSV_TXPACKETDEFER),
  744. TSV_GETBIT(tsv, TSV_TXEXDEFER));
  745. dev_printk(KERN_DEBUG, dev,
  746. "ExCollision: %d, LateCollision: %d, Giant: %d, Underrun: %d\n",
  747. TSV_GETBIT(tsv, TSV_TXEXCOLLISION),
  748. TSV_GETBIT(tsv, TSV_TXLATECOLLISION),
  749. TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN));
  750. dev_printk(KERN_DEBUG, dev,
  751. "ControlFrame: %d, PauseFrame: %d, BackPressApp: %d, VLanTagFrame: %d\n",
  752. TSV_GETBIT(tsv, TSV_TXCONTROLFRAME),
  753. TSV_GETBIT(tsv, TSV_TXPAUSEFRAME),
  754. TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP),
  755. TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME));
  756. }
  757. /*
  758. * Receive Status vector
  759. */
  760. static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg,
  761. u16 pk_ptr, int len, u16 sts)
  762. {
  763. struct device *dev = &priv->spi->dev;
  764. dev_printk(KERN_DEBUG, dev, "%s - NextPk: 0x%04x - RSV:\n", msg, pk_ptr);
  765. dev_printk(KERN_DEBUG, dev, "ByteCount: %d, DribbleNibble: %d\n",
  766. len, RSV_GETBIT(sts, RSV_DRIBBLENIBBLE));
  767. dev_printk(KERN_DEBUG, dev,
  768. "RxOK: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
  769. RSV_GETBIT(sts, RSV_RXOK),
  770. RSV_GETBIT(sts, RSV_CRCERROR),
  771. RSV_GETBIT(sts, RSV_LENCHECKERR),
  772. RSV_GETBIT(sts, RSV_LENOUTOFRANGE));
  773. dev_printk(KERN_DEBUG, dev,
  774. "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
  775. RSV_GETBIT(sts, RSV_RXMULTICAST),
  776. RSV_GETBIT(sts, RSV_RXBROADCAST),
  777. RSV_GETBIT(sts, RSV_RXLONGEVDROPEV),
  778. RSV_GETBIT(sts, RSV_CARRIEREV));
  779. dev_printk(KERN_DEBUG, dev,
  780. "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
  781. RSV_GETBIT(sts, RSV_RXCONTROLFRAME),
  782. RSV_GETBIT(sts, RSV_RXPAUSEFRAME),
  783. RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE),
  784. RSV_GETBIT(sts, RSV_RXTYPEVLAN));
  785. }
  786. static void dump_packet(const char *msg, int len, const char *data)
  787. {
  788. printk(KERN_DEBUG DRV_NAME ": %s - packet len:%d\n", msg, len);
  789. print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1,
  790. data, len, true);
  791. }
  792. /*
  793. * Hardware receive function.
  794. * Read the buffer memory, update the FIFO pointer to free the buffer,
  795. * check the status vector and decrement the packet counter.
  796. */
  797. static void enc28j60_hw_rx(struct net_device *ndev)
  798. {
  799. struct enc28j60_net *priv = netdev_priv(ndev);
  800. struct device *dev = &priv->spi->dev;
  801. struct sk_buff *skb = NULL;
  802. u16 erxrdpt, next_packet, rxstat;
  803. u8 rsv[RSV_SIZE];
  804. int len;
  805. if (netif_msg_rx_status(priv))
  806. netdev_printk(KERN_DEBUG, ndev, "RX pk_addr:0x%04x\n",
  807. priv->next_pk_ptr);
  808. if (unlikely(priv->next_pk_ptr > RXEND_INIT)) {
  809. if (netif_msg_rx_err(priv))
  810. netdev_err(ndev, "%s() Invalid packet address!! 0x%04x\n",
  811. __func__, priv->next_pk_ptr);
  812. /* packet address corrupted: reset RX logic */
  813. mutex_lock(&priv->lock);
  814. nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
  815. nolock_reg_bfset(priv, ECON1, ECON1_RXRST);
  816. nolock_reg_bfclr(priv, ECON1, ECON1_RXRST);
  817. nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT);
  818. nolock_reg_bfclr(priv, EIR, EIR_RXERIF);
  819. nolock_reg_bfset(priv, ECON1, ECON1_RXEN);
  820. mutex_unlock(&priv->lock);
  821. ndev->stats.rx_errors++;
  822. return;
  823. }
  824. /* Read next packet pointer and rx status vector */
  825. enc28j60_mem_read(priv, priv->next_pk_ptr, sizeof(rsv), rsv);
  826. next_packet = rsv[1];
  827. next_packet <<= 8;
  828. next_packet |= rsv[0];
  829. len = rsv[3];
  830. len <<= 8;
  831. len |= rsv[2];
  832. rxstat = rsv[5];
  833. rxstat <<= 8;
  834. rxstat |= rsv[4];
  835. if (netif_msg_rx_status(priv))
  836. enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat);
  837. if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) {
  838. if (netif_msg_rx_err(priv))
  839. netdev_err(ndev, "Rx Error (%04x)\n", rxstat);
  840. ndev->stats.rx_errors++;
  841. if (RSV_GETBIT(rxstat, RSV_CRCERROR))
  842. ndev->stats.rx_crc_errors++;
  843. if (RSV_GETBIT(rxstat, RSV_LENCHECKERR))
  844. ndev->stats.rx_frame_errors++;
  845. if (len > MAX_FRAMELEN)
  846. ndev->stats.rx_over_errors++;
  847. } else {
  848. skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN);
  849. if (!skb) {
  850. if (netif_msg_rx_err(priv))
  851. netdev_err(ndev, "out of memory for Rx'd frame\n");
  852. ndev->stats.rx_dropped++;
  853. } else {
  854. skb_reserve(skb, NET_IP_ALIGN);
  855. /* copy the packet from the receive buffer */
  856. enc28j60_mem_read(priv,
  857. rx_packet_start(priv->next_pk_ptr),
  858. len, skb_put(skb, len));
  859. if (netif_msg_pktdata(priv))
  860. dump_packet(__func__, skb->len, skb->data);
  861. skb->protocol = eth_type_trans(skb, ndev);
  862. /* update statistics */
  863. ndev->stats.rx_packets++;
  864. ndev->stats.rx_bytes += len;
  865. netif_rx_ni(skb);
  866. }
  867. }
  868. /*
  869. * Move the RX read pointer to the start of the next
  870. * received packet.
  871. * This frees the memory we just read out.
  872. */
  873. erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
  874. if (netif_msg_hw(priv))
  875. dev_printk(KERN_DEBUG, dev, "%s() ERXRDPT:0x%04x\n",
  876. __func__, erxrdpt);
  877. mutex_lock(&priv->lock);
  878. nolock_regw_write(priv, ERXRDPTL, erxrdpt);
  879. #ifdef CONFIG_ENC28J60_WRITEVERIFY
  880. if (netif_msg_drv(priv)) {
  881. u16 reg;
  882. reg = nolock_regw_read(priv, ERXRDPTL);
  883. if (reg != erxrdpt)
  884. dev_printk(KERN_DEBUG, dev,
  885. "%s() ERXRDPT verify error (0x%04x - 0x%04x)\n",
  886. __func__, reg, erxrdpt);
  887. }
  888. #endif
  889. priv->next_pk_ptr = next_packet;
  890. /* we are done with this packet, decrement the packet counter */
  891. nolock_reg_bfset(priv, ECON2, ECON2_PKTDEC);
  892. mutex_unlock(&priv->lock);
  893. }
  894. /*
  895. * Calculate free space in RxFIFO
  896. */
  897. static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
  898. {
  899. struct net_device *ndev = priv->netdev;
  900. int epkcnt, erxst, erxnd, erxwr, erxrd;
  901. int free_space;
  902. mutex_lock(&priv->lock);
  903. epkcnt = nolock_regb_read(priv, EPKTCNT);
  904. if (epkcnt >= 255)
  905. free_space = -1;
  906. else {
  907. erxst = nolock_regw_read(priv, ERXSTL);
  908. erxnd = nolock_regw_read(priv, ERXNDL);
  909. erxwr = nolock_regw_read(priv, ERXWRPTL);
  910. erxrd = nolock_regw_read(priv, ERXRDPTL);
  911. if (erxwr > erxrd)
  912. free_space = (erxnd - erxst) - (erxwr - erxrd);
  913. else if (erxwr == erxrd)
  914. free_space = (erxnd - erxst);
  915. else
  916. free_space = erxrd - erxwr - 1;
  917. }
  918. mutex_unlock(&priv->lock);
  919. if (netif_msg_rx_status(priv))
  920. netdev_printk(KERN_DEBUG, ndev, "%s() free_space = %d\n",
  921. __func__, free_space);
  922. return free_space;
  923. }
  924. /*
  925. * Access the PHY to determine link status
  926. */
  927. static void enc28j60_check_link_status(struct net_device *ndev)
  928. {
  929. struct enc28j60_net *priv = netdev_priv(ndev);
  930. struct device *dev = &priv->spi->dev;
  931. u16 reg;
  932. int duplex;
  933. reg = enc28j60_phy_read(priv, PHSTAT2);
  934. if (netif_msg_hw(priv))
  935. dev_printk(KERN_DEBUG, dev,
  936. "%s() PHSTAT1: %04x, PHSTAT2: %04x\n", __func__,
  937. enc28j60_phy_read(priv, PHSTAT1), reg);
  938. duplex = reg & PHSTAT2_DPXSTAT;
  939. if (reg & PHSTAT2_LSTAT) {
  940. netif_carrier_on(ndev);
  941. if (netif_msg_ifup(priv))
  942. netdev_info(ndev, "link up - %s\n",
  943. duplex ? "Full duplex" : "Half duplex");
  944. } else {
  945. if (netif_msg_ifdown(priv))
  946. netdev_info(ndev, "link down\n");
  947. netif_carrier_off(ndev);
  948. }
  949. }
  950. static void enc28j60_tx_clear(struct net_device *ndev, bool err)
  951. {
  952. struct enc28j60_net *priv = netdev_priv(ndev);
  953. if (err)
  954. ndev->stats.tx_errors++;
  955. else
  956. ndev->stats.tx_packets++;
  957. if (priv->tx_skb) {
  958. if (!err)
  959. ndev->stats.tx_bytes += priv->tx_skb->len;
  960. dev_kfree_skb(priv->tx_skb);
  961. priv->tx_skb = NULL;
  962. }
  963. locked_reg_bfclr(priv, ECON1, ECON1_TXRTS);
  964. netif_wake_queue(ndev);
  965. }
  966. /*
  967. * RX handler
  968. * Ignore PKTIF because is unreliable! (Look at the errata datasheet)
  969. * Check EPKTCNT is the suggested workaround.
  970. * We don't need to clear interrupt flag, automatically done when
  971. * enc28j60_hw_rx() decrements the packet counter.
  972. * Returns how many packet processed.
  973. */
  974. static int enc28j60_rx_interrupt(struct net_device *ndev)
  975. {
  976. struct enc28j60_net *priv = netdev_priv(ndev);
  977. int pk_counter, ret;
  978. pk_counter = locked_regb_read(priv, EPKTCNT);
  979. if (pk_counter && netif_msg_intr(priv))
  980. netdev_printk(KERN_DEBUG, ndev, "intRX, pk_cnt: %d\n",
  981. pk_counter);
  982. if (pk_counter > priv->max_pk_counter) {
  983. /* update statistics */
  984. priv->max_pk_counter = pk_counter;
  985. if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1)
  986. netdev_printk(KERN_DEBUG, ndev, "RX max_pk_cnt: %d\n",
  987. priv->max_pk_counter);
  988. }
  989. ret = pk_counter;
  990. while (pk_counter-- > 0)
  991. enc28j60_hw_rx(ndev);
  992. return ret;
  993. }
  994. static void enc28j60_irq_work_handler(struct work_struct *work)
  995. {
  996. struct enc28j60_net *priv =
  997. container_of(work, struct enc28j60_net, irq_work);
  998. struct net_device *ndev = priv->netdev;
  999. int intflags, loop;
  1000. /* disable further interrupts */
  1001. locked_reg_bfclr(priv, EIE, EIE_INTIE);
  1002. do {
  1003. loop = 0;
  1004. intflags = locked_regb_read(priv, EIR);
  1005. /* DMA interrupt handler (not currently used) */
  1006. if ((intflags & EIR_DMAIF) != 0) {
  1007. loop++;
  1008. if (netif_msg_intr(priv))
  1009. netdev_printk(KERN_DEBUG, ndev, "intDMA(%d)\n",
  1010. loop);
  1011. locked_reg_bfclr(priv, EIR, EIR_DMAIF);
  1012. }
  1013. /* LINK changed handler */
  1014. if ((intflags & EIR_LINKIF) != 0) {
  1015. loop++;
  1016. if (netif_msg_intr(priv))
  1017. netdev_printk(KERN_DEBUG, ndev, "intLINK(%d)\n",
  1018. loop);
  1019. enc28j60_check_link_status(ndev);
  1020. /* read PHIR to clear the flag */
  1021. enc28j60_phy_read(priv, PHIR);
  1022. }
  1023. /* TX complete handler */
  1024. if (((intflags & EIR_TXIF) != 0) &&
  1025. ((intflags & EIR_TXERIF) == 0)) {
  1026. bool err = false;
  1027. loop++;
  1028. if (netif_msg_intr(priv))
  1029. netdev_printk(KERN_DEBUG, ndev, "intTX(%d)\n",
  1030. loop);
  1031. priv->tx_retry_count = 0;
  1032. if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) {
  1033. if (netif_msg_tx_err(priv))
  1034. netdev_err(ndev, "Tx Error (aborted)\n");
  1035. err = true;
  1036. }
  1037. if (netif_msg_tx_done(priv)) {
  1038. u8 tsv[TSV_SIZE];
  1039. enc28j60_read_tsv(priv, tsv);
  1040. enc28j60_dump_tsv(priv, "Tx Done", tsv);
  1041. }
  1042. enc28j60_tx_clear(ndev, err);
  1043. locked_reg_bfclr(priv, EIR, EIR_TXIF);
  1044. }
  1045. /* TX Error handler */
  1046. if ((intflags & EIR_TXERIF) != 0) {
  1047. u8 tsv[TSV_SIZE];
  1048. loop++;
  1049. if (netif_msg_intr(priv))
  1050. netdev_printk(KERN_DEBUG, ndev, "intTXErr(%d)\n",
  1051. loop);
  1052. locked_reg_bfclr(priv, ECON1, ECON1_TXRTS);
  1053. enc28j60_read_tsv(priv, tsv);
  1054. if (netif_msg_tx_err(priv))
  1055. enc28j60_dump_tsv(priv, "Tx Error", tsv);
  1056. /* Reset TX logic */
  1057. mutex_lock(&priv->lock);
  1058. nolock_reg_bfset(priv, ECON1, ECON1_TXRST);
  1059. nolock_reg_bfclr(priv, ECON1, ECON1_TXRST);
  1060. nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT);
  1061. mutex_unlock(&priv->lock);
  1062. /* Transmit Late collision check for retransmit */
  1063. if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) {
  1064. if (netif_msg_tx_err(priv))
  1065. netdev_printk(KERN_DEBUG, ndev,
  1066. "LateCollision TXErr (%d)\n",
  1067. priv->tx_retry_count);
  1068. if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT)
  1069. locked_reg_bfset(priv, ECON1,
  1070. ECON1_TXRTS);
  1071. else
  1072. enc28j60_tx_clear(ndev, true);
  1073. } else
  1074. enc28j60_tx_clear(ndev, true);
  1075. locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
  1076. }
  1077. /* RX Error handler */
  1078. if ((intflags & EIR_RXERIF) != 0) {
  1079. loop++;
  1080. if (netif_msg_intr(priv))
  1081. netdev_printk(KERN_DEBUG, ndev, "intRXErr(%d)\n",
  1082. loop);
  1083. /* Check free FIFO space to flag RX overrun */
  1084. if (enc28j60_get_free_rxfifo(priv) <= 0) {
  1085. if (netif_msg_rx_err(priv))
  1086. netdev_printk(KERN_DEBUG, ndev, "RX Overrun\n");
  1087. ndev->stats.rx_dropped++;
  1088. }
  1089. locked_reg_bfclr(priv, EIR, EIR_RXERIF);
  1090. }
  1091. /* RX handler */
  1092. if (enc28j60_rx_interrupt(ndev))
  1093. loop++;
  1094. } while (loop);
  1095. /* re-enable interrupts */
  1096. locked_reg_bfset(priv, EIE, EIE_INTIE);
  1097. }
  1098. /*
  1099. * Hardware transmit function.
  1100. * Fill the buffer memory and send the contents of the transmit buffer
  1101. * onto the network
  1102. */
  1103. static void enc28j60_hw_tx(struct enc28j60_net *priv)
  1104. {
  1105. struct net_device *ndev = priv->netdev;
  1106. BUG_ON(!priv->tx_skb);
  1107. if (netif_msg_tx_queued(priv))
  1108. netdev_printk(KERN_DEBUG, ndev, "Tx Packet Len:%d\n",
  1109. priv->tx_skb->len);
  1110. if (netif_msg_pktdata(priv))
  1111. dump_packet(__func__,
  1112. priv->tx_skb->len, priv->tx_skb->data);
  1113. enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data);
  1114. #ifdef CONFIG_ENC28J60_WRITEVERIFY
  1115. /* readback and verify written data */
  1116. if (netif_msg_drv(priv)) {
  1117. struct device *dev = &priv->spi->dev;
  1118. int test_len, k;
  1119. u8 test_buf[64]; /* limit the test to the first 64 bytes */
  1120. int okflag;
  1121. test_len = priv->tx_skb->len;
  1122. if (test_len > sizeof(test_buf))
  1123. test_len = sizeof(test_buf);
  1124. /* + 1 to skip control byte */
  1125. enc28j60_mem_read(priv, TXSTART_INIT + 1, test_len, test_buf);
  1126. okflag = 1;
  1127. for (k = 0; k < test_len; k++) {
  1128. if (priv->tx_skb->data[k] != test_buf[k]) {
  1129. dev_printk(KERN_DEBUG, dev,
  1130. "Error, %d location differ: 0x%02x-0x%02x\n",
  1131. k, priv->tx_skb->data[k], test_buf[k]);
  1132. okflag = 0;
  1133. }
  1134. }
  1135. if (!okflag)
  1136. dev_printk(KERN_DEBUG, dev, "Tx write buffer, verify ERROR!\n");
  1137. }
  1138. #endif
  1139. /* set TX request flag */
  1140. locked_reg_bfset(priv, ECON1, ECON1_TXRTS);
  1141. }
  1142. static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
  1143. struct net_device *dev)
  1144. {
  1145. struct enc28j60_net *priv = netdev_priv(dev);
  1146. /* If some error occurs while trying to transmit this
  1147. * packet, you should return '1' from this function.
  1148. * In such a case you _may not_ do anything to the
  1149. * SKB, it is still owned by the network queueing
  1150. * layer when an error is returned. This means you
  1151. * may not modify any SKB fields, you may not free
  1152. * the SKB, etc.
  1153. */
  1154. netif_stop_queue(dev);
  1155. /* Remember the skb for deferred processing */
  1156. priv->tx_skb = skb;
  1157. schedule_work(&priv->tx_work);
  1158. return NETDEV_TX_OK;
  1159. }
  1160. static void enc28j60_tx_work_handler(struct work_struct *work)
  1161. {
  1162. struct enc28j60_net *priv =
  1163. container_of(work, struct enc28j60_net, tx_work);
  1164. /* actual delivery of data */
  1165. enc28j60_hw_tx(priv);
  1166. }
  1167. static irqreturn_t enc28j60_irq(int irq, void *dev_id)
  1168. {
  1169. struct enc28j60_net *priv = dev_id;
  1170. /*
  1171. * Can't do anything in interrupt context because we need to
  1172. * block (spi_sync() is blocking) so fire of the interrupt
  1173. * handling workqueue.
  1174. * Remember that we access enc28j60 registers through SPI bus
  1175. * via spi_sync() call.
  1176. */
  1177. schedule_work(&priv->irq_work);
  1178. return IRQ_HANDLED;
  1179. }
  1180. static void enc28j60_tx_timeout(struct net_device *ndev)
  1181. {
  1182. struct enc28j60_net *priv = netdev_priv(ndev);
  1183. if (netif_msg_timer(priv))
  1184. netdev_err(ndev, "tx timeout\n");
  1185. ndev->stats.tx_errors++;
  1186. /* can't restart safely under softirq */
  1187. schedule_work(&priv->restart_work);
  1188. }
  1189. /*
  1190. * Open/initialize the board. This is called (in the current kernel)
  1191. * sometime after booting when the 'ifconfig' program is run.
  1192. *
  1193. * This routine should set everything up anew at each open, even
  1194. * registers that "should" only need to be set once at boot, so that
  1195. * there is non-reboot way to recover if something goes wrong.
  1196. */
  1197. static int enc28j60_net_open(struct net_device *dev)
  1198. {
  1199. struct enc28j60_net *priv = netdev_priv(dev);
  1200. if (!is_valid_ether_addr(dev->dev_addr)) {
  1201. if (netif_msg_ifup(priv))
  1202. netdev_err(dev, "invalid MAC address %pM\n", dev->dev_addr);
  1203. return -EADDRNOTAVAIL;
  1204. }
  1205. /* Reset the hardware here (and take it out of low power mode) */
  1206. enc28j60_lowpower(priv, false);
  1207. enc28j60_hw_disable(priv);
  1208. if (!enc28j60_hw_init(priv)) {
  1209. if (netif_msg_ifup(priv))
  1210. netdev_err(dev, "hw_reset() failed\n");
  1211. return -EINVAL;
  1212. }
  1213. /* Update the MAC address (in case user has changed it) */
  1214. enc28j60_set_hw_macaddr(dev);
  1215. /* Enable interrupts */
  1216. enc28j60_hw_enable(priv);
  1217. /* check link status */
  1218. enc28j60_check_link_status(dev);
  1219. /* We are now ready to accept transmit requests from
  1220. * the queueing layer of the networking.
  1221. */
  1222. netif_start_queue(dev);
  1223. return 0;
  1224. }
  1225. /* The inverse routine to net_open(). */
  1226. static int enc28j60_net_close(struct net_device *dev)
  1227. {
  1228. struct enc28j60_net *priv = netdev_priv(dev);
  1229. enc28j60_hw_disable(priv);
  1230. enc28j60_lowpower(priv, true);
  1231. netif_stop_queue(dev);
  1232. return 0;
  1233. }
  1234. /*
  1235. * Set or clear the multicast filter for this adapter
  1236. * num_addrs == -1 Promiscuous mode, receive all packets
  1237. * num_addrs == 0 Normal mode, filter out multicast packets
  1238. * num_addrs > 0 Multicast mode, receive normal and MC packets
  1239. */
  1240. static void enc28j60_set_multicast_list(struct net_device *dev)
  1241. {
  1242. struct enc28j60_net *priv = netdev_priv(dev);
  1243. int oldfilter = priv->rxfilter;
  1244. if (dev->flags & IFF_PROMISC) {
  1245. if (netif_msg_link(priv))
  1246. netdev_info(dev, "promiscuous mode\n");
  1247. priv->rxfilter = RXFILTER_PROMISC;
  1248. } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
  1249. if (netif_msg_link(priv))
  1250. netdev_info(dev, "%smulticast mode\n",
  1251. (dev->flags & IFF_ALLMULTI) ? "all-" : "");
  1252. priv->rxfilter = RXFILTER_MULTI;
  1253. } else {
  1254. if (netif_msg_link(priv))
  1255. netdev_info(dev, "normal mode\n");
  1256. priv->rxfilter = RXFILTER_NORMAL;
  1257. }
  1258. if (oldfilter != priv->rxfilter)
  1259. schedule_work(&priv->setrx_work);
  1260. }
  1261. static void enc28j60_setrx_work_handler(struct work_struct *work)
  1262. {
  1263. struct enc28j60_net *priv =
  1264. container_of(work, struct enc28j60_net, setrx_work);
  1265. struct device *dev = &priv->spi->dev;
  1266. if (priv->rxfilter == RXFILTER_PROMISC) {
  1267. if (netif_msg_drv(priv))
  1268. dev_printk(KERN_DEBUG, dev, "promiscuous mode\n");
  1269. locked_regb_write(priv, ERXFCON, 0x00);
  1270. } else if (priv->rxfilter == RXFILTER_MULTI) {
  1271. if (netif_msg_drv(priv))
  1272. dev_printk(KERN_DEBUG, dev, "multicast mode\n");
  1273. locked_regb_write(priv, ERXFCON,
  1274. ERXFCON_UCEN | ERXFCON_CRCEN |
  1275. ERXFCON_BCEN | ERXFCON_MCEN);
  1276. } else {
  1277. if (netif_msg_drv(priv))
  1278. dev_printk(KERN_DEBUG, dev, "normal mode\n");
  1279. locked_regb_write(priv, ERXFCON,
  1280. ERXFCON_UCEN | ERXFCON_CRCEN |
  1281. ERXFCON_BCEN);
  1282. }
  1283. }
  1284. static void enc28j60_restart_work_handler(struct work_struct *work)
  1285. {
  1286. struct enc28j60_net *priv =
  1287. container_of(work, struct enc28j60_net, restart_work);
  1288. struct net_device *ndev = priv->netdev;
  1289. int ret;
  1290. rtnl_lock();
  1291. if (netif_running(ndev)) {
  1292. enc28j60_net_close(ndev);
  1293. ret = enc28j60_net_open(ndev);
  1294. if (unlikely(ret)) {
  1295. netdev_info(ndev, "could not restart %d\n", ret);
  1296. dev_close(ndev);
  1297. }
  1298. }
  1299. rtnl_unlock();
  1300. }
  1301. /* ......................... ETHTOOL SUPPORT ........................... */
  1302. static void
  1303. enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1304. {
  1305. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  1306. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  1307. strlcpy(info->bus_info,
  1308. dev_name(dev->dev.parent), sizeof(info->bus_info));
  1309. }
  1310. static int
  1311. enc28j60_get_link_ksettings(struct net_device *dev,
  1312. struct ethtool_link_ksettings *cmd)
  1313. {
  1314. struct enc28j60_net *priv = netdev_priv(dev);
  1315. ethtool_link_ksettings_zero_link_mode(cmd, supported);
  1316. ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
  1317. ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full);
  1318. ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
  1319. cmd->base.speed = SPEED_10;
  1320. cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
  1321. cmd->base.port = PORT_TP;
  1322. cmd->base.autoneg = AUTONEG_DISABLE;
  1323. return 0;
  1324. }
  1325. static int
  1326. enc28j60_set_link_ksettings(struct net_device *dev,
  1327. const struct ethtool_link_ksettings *cmd)
  1328. {
  1329. return enc28j60_setlink(dev, cmd->base.autoneg,
  1330. cmd->base.speed, cmd->base.duplex);
  1331. }
  1332. static u32 enc28j60_get_msglevel(struct net_device *dev)
  1333. {
  1334. struct enc28j60_net *priv = netdev_priv(dev);
  1335. return priv->msg_enable;
  1336. }
  1337. static void enc28j60_set_msglevel(struct net_device *dev, u32 val)
  1338. {
  1339. struct enc28j60_net *priv = netdev_priv(dev);
  1340. priv->msg_enable = val;
  1341. }
  1342. static const struct ethtool_ops enc28j60_ethtool_ops = {
  1343. .get_drvinfo = enc28j60_get_drvinfo,
  1344. .get_msglevel = enc28j60_get_msglevel,
  1345. .set_msglevel = enc28j60_set_msglevel,
  1346. .get_link_ksettings = enc28j60_get_link_ksettings,
  1347. .set_link_ksettings = enc28j60_set_link_ksettings,
  1348. };
  1349. static int enc28j60_chipset_init(struct net_device *dev)
  1350. {
  1351. struct enc28j60_net *priv = netdev_priv(dev);
  1352. return enc28j60_hw_init(priv);
  1353. }
  1354. static const struct net_device_ops enc28j60_netdev_ops = {
  1355. .ndo_open = enc28j60_net_open,
  1356. .ndo_stop = enc28j60_net_close,
  1357. .ndo_start_xmit = enc28j60_send_packet,
  1358. .ndo_set_rx_mode = enc28j60_set_multicast_list,
  1359. .ndo_set_mac_address = enc28j60_set_mac_address,
  1360. .ndo_tx_timeout = enc28j60_tx_timeout,
  1361. .ndo_validate_addr = eth_validate_addr,
  1362. };
  1363. static int enc28j60_probe(struct spi_device *spi)
  1364. {
  1365. unsigned char macaddr[ETH_ALEN];
  1366. struct net_device *dev;
  1367. struct enc28j60_net *priv;
  1368. int ret = 0;
  1369. if (netif_msg_drv(&debug))
  1370. dev_info(&spi->dev, "Ethernet driver %s loaded\n", DRV_VERSION);
  1371. dev = alloc_etherdev(sizeof(struct enc28j60_net));
  1372. if (!dev) {
  1373. ret = -ENOMEM;
  1374. goto error_alloc;
  1375. }
  1376. priv = netdev_priv(dev);
  1377. priv->netdev = dev; /* priv to netdev reference */
  1378. priv->spi = spi; /* priv to spi reference */
  1379. priv->msg_enable = netif_msg_init(debug.msg_enable, ENC28J60_MSG_DEFAULT);
  1380. mutex_init(&priv->lock);
  1381. INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler);
  1382. INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler);
  1383. INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler);
  1384. INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler);
  1385. spi_set_drvdata(spi, priv); /* spi to priv reference */
  1386. SET_NETDEV_DEV(dev, &spi->dev);
  1387. if (!enc28j60_chipset_init(dev)) {
  1388. if (netif_msg_probe(priv))
  1389. dev_info(&spi->dev, "chip not found\n");
  1390. ret = -EIO;
  1391. goto error_irq;
  1392. }
  1393. if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr)))
  1394. ether_addr_copy(dev->dev_addr, macaddr);
  1395. else
  1396. eth_hw_addr_random(dev);
  1397. enc28j60_set_hw_macaddr(dev);
  1398. /* Board setup must set the relevant edge trigger type;
  1399. * level triggers won't currently work.
  1400. */
  1401. ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv);
  1402. if (ret < 0) {
  1403. if (netif_msg_probe(priv))
  1404. dev_err(&spi->dev, "request irq %d failed (ret = %d)\n",
  1405. spi->irq, ret);
  1406. goto error_irq;
  1407. }
  1408. dev->if_port = IF_PORT_10BASET;
  1409. dev->irq = spi->irq;
  1410. dev->netdev_ops = &enc28j60_netdev_ops;
  1411. dev->watchdog_timeo = TX_TIMEOUT;
  1412. dev->ethtool_ops = &enc28j60_ethtool_ops;
  1413. enc28j60_lowpower(priv, true);
  1414. ret = register_netdev(dev);
  1415. if (ret) {
  1416. if (netif_msg_probe(priv))
  1417. dev_err(&spi->dev, "register netdev failed (ret = %d)\n",
  1418. ret);
  1419. goto error_register;
  1420. }
  1421. return 0;
  1422. error_register:
  1423. free_irq(spi->irq, priv);
  1424. error_irq:
  1425. free_netdev(dev);
  1426. error_alloc:
  1427. return ret;
  1428. }
  1429. static int enc28j60_remove(struct spi_device *spi)
  1430. {
  1431. struct enc28j60_net *priv = spi_get_drvdata(spi);
  1432. unregister_netdev(priv->netdev);
  1433. free_irq(spi->irq, priv);
  1434. free_netdev(priv->netdev);
  1435. return 0;
  1436. }
  1437. static const struct of_device_id enc28j60_dt_ids[] = {
  1438. { .compatible = "microchip,enc28j60" },
  1439. { /* sentinel */ }
  1440. };
  1441. MODULE_DEVICE_TABLE(of, enc28j60_dt_ids);
  1442. static struct spi_driver enc28j60_driver = {
  1443. .driver = {
  1444. .name = DRV_NAME,
  1445. .of_match_table = enc28j60_dt_ids,
  1446. },
  1447. .probe = enc28j60_probe,
  1448. .remove = enc28j60_remove,
  1449. };
  1450. module_spi_driver(enc28j60_driver);
  1451. MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
  1452. MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
  1453. MODULE_LICENSE("GPL");
  1454. module_param_named(debug, debug.msg_enable, int, 0);
  1455. MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)");
  1456. MODULE_ALIAS("spi:" DRV_NAME);