smsc75xx.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. /***************************************************************************
  2. *
  3. * Copyright (C) 2007-2010 SMSC
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version 2
  8. * of the License, or (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. *****************************************************************************/
  20. #include <linux/module.h>
  21. #include <linux/kmod.h>
  22. #include <linux/init.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/ethtool.h>
  26. #include <linux/mii.h>
  27. #include <linux/usb.h>
  28. #include <linux/crc32.h>
  29. #include <linux/usb/usbnet.h>
  30. #include <linux/slab.h>
  31. #include "smsc75xx.h"
  32. #define SMSC_CHIPNAME "smsc75xx"
  33. #define SMSC_DRIVER_VERSION "1.0.0"
  34. #define HS_USB_PKT_SIZE (512)
  35. #define FS_USB_PKT_SIZE (64)
  36. #define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
  37. #define DEFAULT_FS_BURST_CAP_SIZE (6 * 1024 + 33 * FS_USB_PKT_SIZE)
  38. #define DEFAULT_BULK_IN_DELAY (0x00002000)
  39. #define MAX_SINGLE_PACKET_SIZE (9000)
  40. #define LAN75XX_EEPROM_MAGIC (0x7500)
  41. #define EEPROM_MAC_OFFSET (0x01)
  42. #define DEFAULT_TX_CSUM_ENABLE (true)
  43. #define DEFAULT_RX_CSUM_ENABLE (true)
  44. #define SMSC75XX_INTERNAL_PHY_ID (1)
  45. #define SMSC75XX_TX_OVERHEAD (8)
  46. #define MAX_RX_FIFO_SIZE (20 * 1024)
  47. #define MAX_TX_FIFO_SIZE (12 * 1024)
  48. #define USB_VENDOR_ID_SMSC (0x0424)
  49. #define USB_PRODUCT_ID_LAN7500 (0x7500)
  50. #define USB_PRODUCT_ID_LAN7505 (0x7505)
  51. #define RXW_PADDING 2
  52. #define check_warn(ret, fmt, args...) \
  53. ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
  54. #define check_warn_return(ret, fmt, args...) \
  55. ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
  56. #define check_warn_goto_done(ret, fmt, args...) \
  57. ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
  58. struct smsc75xx_priv {
  59. struct usbnet *dev;
  60. u32 rfe_ctl;
  61. u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
  62. struct mutex dataport_mutex;
  63. spinlock_t rfe_ctl_lock;
  64. struct work_struct set_multicast;
  65. };
  66. struct usb_context {
  67. struct usb_ctrlrequest req;
  68. struct usbnet *dev;
  69. };
  70. static bool turbo_mode = true;
  71. module_param(turbo_mode, bool, 0644);
  72. MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
  73. static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
  74. u32 *data)
  75. {
  76. u32 *buf = kmalloc(4, GFP_KERNEL);
  77. int ret;
  78. BUG_ON(!dev);
  79. if (!buf)
  80. return -ENOMEM;
  81. ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
  82. USB_VENDOR_REQUEST_READ_REGISTER,
  83. USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
  84. 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
  85. if (unlikely(ret < 0))
  86. netdev_warn(dev->net,
  87. "Failed to read reg index 0x%08x: %d", index, ret);
  88. le32_to_cpus(buf);
  89. *data = *buf;
  90. kfree(buf);
  91. return ret;
  92. }
  93. static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
  94. u32 data)
  95. {
  96. u32 *buf = kmalloc(4, GFP_KERNEL);
  97. int ret;
  98. BUG_ON(!dev);
  99. if (!buf)
  100. return -ENOMEM;
  101. *buf = data;
  102. cpu_to_le32s(buf);
  103. ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
  104. USB_VENDOR_REQUEST_WRITE_REGISTER,
  105. USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
  106. 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
  107. if (unlikely(ret < 0))
  108. netdev_warn(dev->net,
  109. "Failed to write reg index 0x%08x: %d", index, ret);
  110. kfree(buf);
  111. return ret;
  112. }
  113. /* Loop until the read is completed with timeout
  114. * called with phy_mutex held */
  115. static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
  116. {
  117. unsigned long start_time = jiffies;
  118. u32 val;
  119. int ret;
  120. do {
  121. ret = smsc75xx_read_reg(dev, MII_ACCESS, &val);
  122. check_warn_return(ret, "Error reading MII_ACCESS");
  123. if (!(val & MII_ACCESS_BUSY))
  124. return 0;
  125. } while (!time_after(jiffies, start_time + HZ));
  126. return -EIO;
  127. }
  128. static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
  129. {
  130. struct usbnet *dev = netdev_priv(netdev);
  131. u32 val, addr;
  132. int ret;
  133. mutex_lock(&dev->phy_mutex);
  134. /* confirm MII not busy */
  135. ret = smsc75xx_phy_wait_not_busy(dev);
  136. check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read");
  137. /* set the address, index & direction (read from PHY) */
  138. phy_id &= dev->mii.phy_id_mask;
  139. idx &= dev->mii.reg_num_mask;
  140. addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
  141. | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
  142. | MII_ACCESS_READ | MII_ACCESS_BUSY;
  143. ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
  144. check_warn_goto_done(ret, "Error writing MII_ACCESS");
  145. ret = smsc75xx_phy_wait_not_busy(dev);
  146. check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
  147. ret = smsc75xx_read_reg(dev, MII_DATA, &val);
  148. check_warn_goto_done(ret, "Error reading MII_DATA");
  149. ret = (u16)(val & 0xFFFF);
  150. done:
  151. mutex_unlock(&dev->phy_mutex);
  152. return ret;
  153. }
  154. static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
  155. int regval)
  156. {
  157. struct usbnet *dev = netdev_priv(netdev);
  158. u32 val, addr;
  159. int ret;
  160. mutex_lock(&dev->phy_mutex);
  161. /* confirm MII not busy */
  162. ret = smsc75xx_phy_wait_not_busy(dev);
  163. check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write");
  164. val = regval;
  165. ret = smsc75xx_write_reg(dev, MII_DATA, val);
  166. check_warn_goto_done(ret, "Error writing MII_DATA");
  167. /* set the address, index & direction (write to PHY) */
  168. phy_id &= dev->mii.phy_id_mask;
  169. idx &= dev->mii.reg_num_mask;
  170. addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
  171. | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
  172. | MII_ACCESS_WRITE | MII_ACCESS_BUSY;
  173. ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
  174. check_warn_goto_done(ret, "Error writing MII_ACCESS");
  175. ret = smsc75xx_phy_wait_not_busy(dev);
  176. check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
  177. done:
  178. mutex_unlock(&dev->phy_mutex);
  179. }
  180. static int smsc75xx_wait_eeprom(struct usbnet *dev)
  181. {
  182. unsigned long start_time = jiffies;
  183. u32 val;
  184. int ret;
  185. do {
  186. ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
  187. check_warn_return(ret, "Error reading E2P_CMD");
  188. if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT))
  189. break;
  190. udelay(40);
  191. } while (!time_after(jiffies, start_time + HZ));
  192. if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) {
  193. netdev_warn(dev->net, "EEPROM read operation timeout");
  194. return -EIO;
  195. }
  196. return 0;
  197. }
  198. static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev)
  199. {
  200. unsigned long start_time = jiffies;
  201. u32 val;
  202. int ret;
  203. do {
  204. ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
  205. check_warn_return(ret, "Error reading E2P_CMD");
  206. if (!(val & E2P_CMD_BUSY))
  207. return 0;
  208. udelay(40);
  209. } while (!time_after(jiffies, start_time + HZ));
  210. netdev_warn(dev->net, "EEPROM is busy");
  211. return -EIO;
  212. }
  213. static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
  214. u8 *data)
  215. {
  216. u32 val;
  217. int i, ret;
  218. BUG_ON(!dev);
  219. BUG_ON(!data);
  220. ret = smsc75xx_eeprom_confirm_not_busy(dev);
  221. if (ret)
  222. return ret;
  223. for (i = 0; i < length; i++) {
  224. val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR);
  225. ret = smsc75xx_write_reg(dev, E2P_CMD, val);
  226. check_warn_return(ret, "Error writing E2P_CMD");
  227. ret = smsc75xx_wait_eeprom(dev);
  228. if (ret < 0)
  229. return ret;
  230. ret = smsc75xx_read_reg(dev, E2P_DATA, &val);
  231. check_warn_return(ret, "Error reading E2P_DATA");
  232. data[i] = val & 0xFF;
  233. offset++;
  234. }
  235. return 0;
  236. }
  237. static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
  238. u8 *data)
  239. {
  240. u32 val;
  241. int i, ret;
  242. BUG_ON(!dev);
  243. BUG_ON(!data);
  244. ret = smsc75xx_eeprom_confirm_not_busy(dev);
  245. if (ret)
  246. return ret;
  247. /* Issue write/erase enable command */
  248. val = E2P_CMD_BUSY | E2P_CMD_EWEN;
  249. ret = smsc75xx_write_reg(dev, E2P_CMD, val);
  250. check_warn_return(ret, "Error writing E2P_CMD");
  251. ret = smsc75xx_wait_eeprom(dev);
  252. if (ret < 0)
  253. return ret;
  254. for (i = 0; i < length; i++) {
  255. /* Fill data register */
  256. val = data[i];
  257. ret = smsc75xx_write_reg(dev, E2P_DATA, val);
  258. check_warn_return(ret, "Error writing E2P_DATA");
  259. /* Send "write" command */
  260. val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR);
  261. ret = smsc75xx_write_reg(dev, E2P_CMD, val);
  262. check_warn_return(ret, "Error writing E2P_CMD");
  263. ret = smsc75xx_wait_eeprom(dev);
  264. if (ret < 0)
  265. return ret;
  266. offset++;
  267. }
  268. return 0;
  269. }
  270. static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev)
  271. {
  272. int i, ret;
  273. for (i = 0; i < 100; i++) {
  274. u32 dp_sel;
  275. ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
  276. check_warn_return(ret, "Error reading DP_SEL");
  277. if (dp_sel & DP_SEL_DPRDY)
  278. return 0;
  279. udelay(40);
  280. }
  281. netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out");
  282. return -EIO;
  283. }
  284. static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr,
  285. u32 length, u32 *buf)
  286. {
  287. struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
  288. u32 dp_sel;
  289. int i, ret;
  290. mutex_lock(&pdata->dataport_mutex);
  291. ret = smsc75xx_dataport_wait_not_busy(dev);
  292. check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry");
  293. ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
  294. check_warn_goto_done(ret, "Error reading DP_SEL");
  295. dp_sel &= ~DP_SEL_RSEL;
  296. dp_sel |= ram_select;
  297. ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel);
  298. check_warn_goto_done(ret, "Error writing DP_SEL");
  299. for (i = 0; i < length; i++) {
  300. ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i);
  301. check_warn_goto_done(ret, "Error writing DP_ADDR");
  302. ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]);
  303. check_warn_goto_done(ret, "Error writing DP_DATA");
  304. ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE);
  305. check_warn_goto_done(ret, "Error writing DP_CMD");
  306. ret = smsc75xx_dataport_wait_not_busy(dev);
  307. check_warn_goto_done(ret, "smsc75xx_dataport_write timeout");
  308. }
  309. done:
  310. mutex_unlock(&pdata->dataport_mutex);
  311. return ret;
  312. }
  313. /* returns hash bit number for given MAC address */
  314. static u32 smsc75xx_hash(char addr[ETH_ALEN])
  315. {
  316. return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
  317. }
  318. static void smsc75xx_deferred_multicast_write(struct work_struct *param)
  319. {
  320. struct smsc75xx_priv *pdata =
  321. container_of(param, struct smsc75xx_priv, set_multicast);
  322. struct usbnet *dev = pdata->dev;
  323. int ret;
  324. netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x",
  325. pdata->rfe_ctl);
  326. smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN,
  327. DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table);
  328. ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
  329. check_warn(ret, "Error writing RFE_CRL");
  330. }
  331. static void smsc75xx_set_multicast(struct net_device *netdev)
  332. {
  333. struct usbnet *dev = netdev_priv(netdev);
  334. struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
  335. unsigned long flags;
  336. int i;
  337. spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
  338. pdata->rfe_ctl &=
  339. ~(RFE_CTL_AU | RFE_CTL_AM | RFE_CTL_DPF | RFE_CTL_MHF);
  340. pdata->rfe_ctl |= RFE_CTL_AB;
  341. for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
  342. pdata->multicast_hash_table[i] = 0;
  343. if (dev->net->flags & IFF_PROMISC) {
  344. netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
  345. pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU;
  346. } else if (dev->net->flags & IFF_ALLMULTI) {
  347. netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
  348. pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
  349. } else if (!netdev_mc_empty(dev->net)) {
  350. struct netdev_hw_addr *ha;
  351. netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
  352. pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
  353. netdev_for_each_mc_addr(ha, netdev) {
  354. u32 bitnum = smsc75xx_hash(ha->addr);
  355. pdata->multicast_hash_table[bitnum / 32] |=
  356. (1 << (bitnum % 32));
  357. }
  358. } else {
  359. netif_dbg(dev, drv, dev->net, "receive own packets only");
  360. pdata->rfe_ctl |= RFE_CTL_DPF;
  361. }
  362. spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
  363. /* defer register writes to a sleepable context */
  364. schedule_work(&pdata->set_multicast);
  365. }
  366. static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex,
  367. u16 lcladv, u16 rmtadv)
  368. {
  369. u32 flow = 0, fct_flow = 0;
  370. int ret;
  371. if (duplex == DUPLEX_FULL) {
  372. u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
  373. if (cap & FLOW_CTRL_TX) {
  374. flow = (FLOW_TX_FCEN | 0xFFFF);
  375. /* set fct_flow thresholds to 20% and 80% */
  376. fct_flow = (8 << 8) | 32;
  377. }
  378. if (cap & FLOW_CTRL_RX)
  379. flow |= FLOW_RX_FCEN;
  380. netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
  381. (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
  382. (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
  383. } else {
  384. netif_dbg(dev, link, dev->net, "half duplex");
  385. }
  386. ret = smsc75xx_write_reg(dev, FLOW, flow);
  387. check_warn_return(ret, "Error writing FLOW");
  388. ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow);
  389. check_warn_return(ret, "Error writing FCT_FLOW");
  390. return 0;
  391. }
  392. static int smsc75xx_link_reset(struct usbnet *dev)
  393. {
  394. struct mii_if_info *mii = &dev->mii;
  395. struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
  396. u16 lcladv, rmtadv;
  397. int ret;
  398. /* read and write to clear phy interrupt status */
  399. ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
  400. check_warn_return(ret, "Error reading PHY_INT_SRC");
  401. smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC, 0xffff);
  402. ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
  403. check_warn_return(ret, "Error writing INT_STS");
  404. mii_check_media(mii, 1, 1);
  405. mii_ethtool_gset(&dev->mii, &ecmd);
  406. lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
  407. rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
  408. netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x"
  409. " rmtadv: %04x", ethtool_cmd_speed(&ecmd),
  410. ecmd.duplex, lcladv, rmtadv);
  411. return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
  412. }
  413. static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
  414. {
  415. u32 intdata;
  416. if (urb->actual_length != 4) {
  417. netdev_warn(dev->net,
  418. "unexpected urb length %d", urb->actual_length);
  419. return;
  420. }
  421. memcpy(&intdata, urb->transfer_buffer, 4);
  422. le32_to_cpus(&intdata);
  423. netif_dbg(dev, link, dev->net, "intdata: 0x%08X", intdata);
  424. if (intdata & INT_ENP_PHY_INT)
  425. usbnet_defer_kevent(dev, EVENT_LINK_RESET);
  426. else
  427. netdev_warn(dev->net,
  428. "unexpected interrupt, intdata=0x%08X", intdata);
  429. }
  430. static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net)
  431. {
  432. return MAX_EEPROM_SIZE;
  433. }
  434. static int smsc75xx_ethtool_get_eeprom(struct net_device *netdev,
  435. struct ethtool_eeprom *ee, u8 *data)
  436. {
  437. struct usbnet *dev = netdev_priv(netdev);
  438. ee->magic = LAN75XX_EEPROM_MAGIC;
  439. return smsc75xx_read_eeprom(dev, ee->offset, ee->len, data);
  440. }
  441. static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
  442. struct ethtool_eeprom *ee, u8 *data)
  443. {
  444. struct usbnet *dev = netdev_priv(netdev);
  445. if (ee->magic != LAN75XX_EEPROM_MAGIC) {
  446. netdev_warn(dev->net,
  447. "EEPROM: magic value mismatch: 0x%x", ee->magic);
  448. return -EINVAL;
  449. }
  450. return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
  451. }
  452. static const struct ethtool_ops smsc75xx_ethtool_ops = {
  453. .get_link = usbnet_get_link,
  454. .nway_reset = usbnet_nway_reset,
  455. .get_drvinfo = usbnet_get_drvinfo,
  456. .get_msglevel = usbnet_get_msglevel,
  457. .set_msglevel = usbnet_set_msglevel,
  458. .get_settings = usbnet_get_settings,
  459. .set_settings = usbnet_set_settings,
  460. .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
  461. .get_eeprom = smsc75xx_ethtool_get_eeprom,
  462. .set_eeprom = smsc75xx_ethtool_set_eeprom,
  463. };
  464. static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
  465. {
  466. struct usbnet *dev = netdev_priv(netdev);
  467. if (!netif_running(netdev))
  468. return -EINVAL;
  469. return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
  470. }
  471. static void smsc75xx_init_mac_address(struct usbnet *dev)
  472. {
  473. /* try reading mac address from EEPROM */
  474. if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
  475. dev->net->dev_addr) == 0) {
  476. if (is_valid_ether_addr(dev->net->dev_addr)) {
  477. /* eeprom values are valid so use them */
  478. netif_dbg(dev, ifup, dev->net,
  479. "MAC address read from EEPROM");
  480. return;
  481. }
  482. }
  483. /* no eeprom, or eeprom values are invalid. generate random MAC */
  484. eth_hw_addr_random(dev->net);
  485. netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr");
  486. }
  487. static int smsc75xx_set_mac_address(struct usbnet *dev)
  488. {
  489. u32 addr_lo = dev->net->dev_addr[0] | dev->net->dev_addr[1] << 8 |
  490. dev->net->dev_addr[2] << 16 | dev->net->dev_addr[3] << 24;
  491. u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8;
  492. int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi);
  493. check_warn_return(ret, "Failed to write RX_ADDRH: %d", ret);
  494. ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo);
  495. check_warn_return(ret, "Failed to write RX_ADDRL: %d", ret);
  496. addr_hi |= ADDR_FILTX_FB_VALID;
  497. ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi);
  498. check_warn_return(ret, "Failed to write ADDR_FILTX: %d", ret);
  499. ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo);
  500. check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d", ret);
  501. return 0;
  502. }
  503. static int smsc75xx_phy_initialize(struct usbnet *dev)
  504. {
  505. int bmcr, ret, timeout = 0;
  506. /* Initialize MII structure */
  507. dev->mii.dev = dev->net;
  508. dev->mii.mdio_read = smsc75xx_mdio_read;
  509. dev->mii.mdio_write = smsc75xx_mdio_write;
  510. dev->mii.phy_id_mask = 0x1f;
  511. dev->mii.reg_num_mask = 0x1f;
  512. dev->mii.supports_gmii = 1;
  513. dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID;
  514. /* reset phy and wait for reset to complete */
  515. smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
  516. do {
  517. msleep(10);
  518. bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
  519. check_warn_return(bmcr, "Error reading MII_BMCR");
  520. timeout++;
  521. } while ((bmcr & BMCR_RESET) && (timeout < 100));
  522. if (timeout >= 100) {
  523. netdev_warn(dev->net, "timeout on PHY Reset");
  524. return -EIO;
  525. }
  526. smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
  527. ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
  528. ADVERTISE_PAUSE_ASYM);
  529. smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
  530. ADVERTISE_1000FULL);
  531. /* read and write to clear phy interrupt status */
  532. ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
  533. check_warn_return(ret, "Error reading PHY_INT_SRC");
  534. smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff);
  535. smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
  536. PHY_INT_MASK_DEFAULT);
  537. mii_nway_restart(&dev->mii);
  538. netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
  539. return 0;
  540. }
  541. static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
  542. {
  543. int ret = 0;
  544. u32 buf;
  545. bool rxenabled;
  546. ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
  547. check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
  548. rxenabled = ((buf & MAC_RX_RXEN) != 0);
  549. if (rxenabled) {
  550. buf &= ~MAC_RX_RXEN;
  551. ret = smsc75xx_write_reg(dev, MAC_RX, buf);
  552. check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
  553. }
  554. /* add 4 to size for FCS */
  555. buf &= ~MAC_RX_MAX_SIZE;
  556. buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE);
  557. ret = smsc75xx_write_reg(dev, MAC_RX, buf);
  558. check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
  559. if (rxenabled) {
  560. buf |= MAC_RX_RXEN;
  561. ret = smsc75xx_write_reg(dev, MAC_RX, buf);
  562. check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
  563. }
  564. return 0;
  565. }
  566. static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
  567. {
  568. struct usbnet *dev = netdev_priv(netdev);
  569. int ret;
  570. if (new_mtu > MAX_SINGLE_PACKET_SIZE)
  571. return -EINVAL;
  572. ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
  573. check_warn_return(ret, "Failed to set mac rx frame length");
  574. return usbnet_change_mtu(netdev, new_mtu);
  575. }
  576. /* Enable or disable Rx checksum offload engine */
  577. static int smsc75xx_set_features(struct net_device *netdev,
  578. netdev_features_t features)
  579. {
  580. struct usbnet *dev = netdev_priv(netdev);
  581. struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
  582. unsigned long flags;
  583. int ret;
  584. spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
  585. if (features & NETIF_F_RXCSUM)
  586. pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM;
  587. else
  588. pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM);
  589. spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
  590. /* it's racing here! */
  591. ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
  592. check_warn_return(ret, "Error writing RFE_CTL");
  593. return 0;
  594. }
  595. static int smsc75xx_reset(struct usbnet *dev)
  596. {
  597. struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
  598. u32 buf;
  599. int ret = 0, timeout;
  600. netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
  601. ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
  602. check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
  603. buf |= HW_CFG_LRST;
  604. ret = smsc75xx_write_reg(dev, HW_CFG, buf);
  605. check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
  606. timeout = 0;
  607. do {
  608. msleep(10);
  609. ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
  610. check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
  611. timeout++;
  612. } while ((buf & HW_CFG_LRST) && (timeout < 100));
  613. if (timeout >= 100) {
  614. netdev_warn(dev->net, "timeout on completion of Lite Reset");
  615. return -EIO;
  616. }
  617. netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY");
  618. ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
  619. check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
  620. buf |= PMT_CTL_PHY_RST;
  621. ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
  622. check_warn_return(ret, "Failed to write PMT_CTL: %d", ret);
  623. timeout = 0;
  624. do {
  625. msleep(10);
  626. ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
  627. check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
  628. timeout++;
  629. } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
  630. if (timeout >= 100) {
  631. netdev_warn(dev->net, "timeout waiting for PHY Reset");
  632. return -EIO;
  633. }
  634. netif_dbg(dev, ifup, dev->net, "PHY reset complete");
  635. smsc75xx_init_mac_address(dev);
  636. ret = smsc75xx_set_mac_address(dev);
  637. check_warn_return(ret, "Failed to set mac address");
  638. netif_dbg(dev, ifup, dev->net, "MAC Address: %pM", dev->net->dev_addr);
  639. ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
  640. check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
  641. netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x", buf);
  642. buf |= HW_CFG_BIR;
  643. ret = smsc75xx_write_reg(dev, HW_CFG, buf);
  644. check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
  645. ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
  646. check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
  647. netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after "
  648. "writing HW_CFG_BIR: 0x%08x", buf);
  649. if (!turbo_mode) {
  650. buf = 0;
  651. dev->rx_urb_size = MAX_SINGLE_PACKET_SIZE;
  652. } else if (dev->udev->speed == USB_SPEED_HIGH) {
  653. buf = DEFAULT_HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
  654. dev->rx_urb_size = DEFAULT_HS_BURST_CAP_SIZE;
  655. } else {
  656. buf = DEFAULT_FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
  657. dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
  658. }
  659. netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld",
  660. (ulong)dev->rx_urb_size);
  661. ret = smsc75xx_write_reg(dev, BURST_CAP, buf);
  662. check_warn_return(ret, "Failed to write BURST_CAP: %d", ret);
  663. ret = smsc75xx_read_reg(dev, BURST_CAP, &buf);
  664. check_warn_return(ret, "Failed to read BURST_CAP: %d", ret);
  665. netif_dbg(dev, ifup, dev->net,
  666. "Read Value from BURST_CAP after writing: 0x%08x", buf);
  667. ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
  668. check_warn_return(ret, "Failed to write BULK_IN_DLY: %d", ret);
  669. ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf);
  670. check_warn_return(ret, "Failed to read BULK_IN_DLY: %d", ret);
  671. netif_dbg(dev, ifup, dev->net,
  672. "Read Value from BULK_IN_DLY after writing: 0x%08x", buf);
  673. if (turbo_mode) {
  674. ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
  675. check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
  676. netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
  677. buf |= (HW_CFG_MEF | HW_CFG_BCE);
  678. ret = smsc75xx_write_reg(dev, HW_CFG, buf);
  679. check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
  680. ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
  681. check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
  682. netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
  683. }
  684. /* set FIFO sizes */
  685. buf = (MAX_RX_FIFO_SIZE - 512) / 512;
  686. ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf);
  687. check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d", ret);
  688. netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x", buf);
  689. buf = (MAX_TX_FIFO_SIZE - 512) / 512;
  690. ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf);
  691. check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d", ret);
  692. netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x", buf);
  693. ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
  694. check_warn_return(ret, "Failed to write INT_STS: %d", ret);
  695. ret = smsc75xx_read_reg(dev, ID_REV, &buf);
  696. check_warn_return(ret, "Failed to read ID_REV: %d", ret);
  697. netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf);
  698. /* Configure GPIO pins as LED outputs */
  699. ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf);
  700. check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret);
  701. buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL);
  702. buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL;
  703. ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf);
  704. check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret);
  705. ret = smsc75xx_write_reg(dev, FLOW, 0);
  706. check_warn_return(ret, "Failed to write FLOW: %d", ret);
  707. ret = smsc75xx_write_reg(dev, FCT_FLOW, 0);
  708. check_warn_return(ret, "Failed to write FCT_FLOW: %d", ret);
  709. /* Don't need rfe_ctl_lock during initialisation */
  710. ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
  711. check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
  712. pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF;
  713. ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
  714. check_warn_return(ret, "Failed to write RFE_CTL: %d", ret);
  715. ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
  716. check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
  717. netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl);
  718. /* Enable or disable checksum offload engines */
  719. smsc75xx_set_features(dev->net, dev->net->features);
  720. smsc75xx_set_multicast(dev->net);
  721. ret = smsc75xx_phy_initialize(dev);
  722. check_warn_return(ret, "Failed to initialize PHY: %d", ret);
  723. ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf);
  724. check_warn_return(ret, "Failed to read INT_EP_CTL: %d", ret);
  725. /* enable PHY interrupts */
  726. buf |= INT_ENP_PHY_INT;
  727. ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
  728. check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret);
  729. /* allow mac to detect speed and duplex from phy */
  730. ret = smsc75xx_read_reg(dev, MAC_CR, &buf);
  731. check_warn_return(ret, "Failed to read MAC_CR: %d", ret);
  732. buf |= (MAC_CR_ADD | MAC_CR_ASD);
  733. ret = smsc75xx_write_reg(dev, MAC_CR, buf);
  734. check_warn_return(ret, "Failed to write MAC_CR: %d", ret);
  735. ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
  736. check_warn_return(ret, "Failed to read MAC_TX: %d", ret);
  737. buf |= MAC_TX_TXEN;
  738. ret = smsc75xx_write_reg(dev, MAC_TX, buf);
  739. check_warn_return(ret, "Failed to write MAC_TX: %d", ret);
  740. netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x", buf);
  741. ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf);
  742. check_warn_return(ret, "Failed to read FCT_TX_CTL: %d", ret);
  743. buf |= FCT_TX_CTL_EN;
  744. ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf);
  745. check_warn_return(ret, "Failed to write FCT_TX_CTL: %d", ret);
  746. netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf);
  747. ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
  748. check_warn_return(ret, "Failed to set max rx frame length");
  749. ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
  750. check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
  751. buf |= MAC_RX_RXEN;
  752. ret = smsc75xx_write_reg(dev, MAC_RX, buf);
  753. check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
  754. netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x", buf);
  755. ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf);
  756. check_warn_return(ret, "Failed to read FCT_RX_CTL: %d", ret);
  757. buf |= FCT_RX_CTL_EN;
  758. ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf);
  759. check_warn_return(ret, "Failed to write FCT_RX_CTL: %d", ret);
  760. netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x", buf);
  761. netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0");
  762. return 0;
  763. }
  764. static const struct net_device_ops smsc75xx_netdev_ops = {
  765. .ndo_open = usbnet_open,
  766. .ndo_stop = usbnet_stop,
  767. .ndo_start_xmit = usbnet_start_xmit,
  768. .ndo_tx_timeout = usbnet_tx_timeout,
  769. .ndo_change_mtu = smsc75xx_change_mtu,
  770. .ndo_set_mac_address = eth_mac_addr,
  771. .ndo_validate_addr = eth_validate_addr,
  772. .ndo_do_ioctl = smsc75xx_ioctl,
  773. .ndo_set_rx_mode = smsc75xx_set_multicast,
  774. .ndo_set_features = smsc75xx_set_features,
  775. };
  776. static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
  777. {
  778. struct smsc75xx_priv *pdata = NULL;
  779. int ret;
  780. printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
  781. ret = usbnet_get_endpoints(dev, intf);
  782. check_warn_return(ret, "usbnet_get_endpoints failed: %d", ret);
  783. dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv),
  784. GFP_KERNEL);
  785. pdata = (struct smsc75xx_priv *)(dev->data[0]);
  786. if (!pdata) {
  787. netdev_warn(dev->net, "Unable to allocate smsc75xx_priv");
  788. return -ENOMEM;
  789. }
  790. pdata->dev = dev;
  791. spin_lock_init(&pdata->rfe_ctl_lock);
  792. mutex_init(&pdata->dataport_mutex);
  793. INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
  794. if (DEFAULT_TX_CSUM_ENABLE)
  795. dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  796. if (DEFAULT_RX_CSUM_ENABLE)
  797. dev->net->features |= NETIF_F_RXCSUM;
  798. dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  799. NETIF_F_RXCSUM;
  800. /* Init all registers */
  801. ret = smsc75xx_reset(dev);
  802. dev->net->netdev_ops = &smsc75xx_netdev_ops;
  803. dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
  804. dev->net->flags |= IFF_MULTICAST;
  805. dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
  806. dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
  807. return 0;
  808. }
  809. static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
  810. {
  811. struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
  812. if (pdata) {
  813. netif_dbg(dev, ifdown, dev->net, "free pdata");
  814. kfree(pdata);
  815. pdata = NULL;
  816. dev->data[0] = 0;
  817. }
  818. }
  819. static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
  820. u32 rx_cmd_a, u32 rx_cmd_b)
  821. {
  822. if (!(dev->net->features & NETIF_F_RXCSUM) ||
  823. unlikely(rx_cmd_a & RX_CMD_A_LCSM)) {
  824. skb->ip_summed = CHECKSUM_NONE;
  825. } else {
  826. skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT));
  827. skb->ip_summed = CHECKSUM_COMPLETE;
  828. }
  829. }
  830. static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
  831. {
  832. /* This check is no longer done by usbnet */
  833. if (skb->len < dev->net->hard_header_len)
  834. return 0;
  835. while (skb->len > 0) {
  836. u32 rx_cmd_a, rx_cmd_b, align_count, size;
  837. struct sk_buff *ax_skb;
  838. unsigned char *packet;
  839. memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
  840. le32_to_cpus(&rx_cmd_a);
  841. skb_pull(skb, 4);
  842. memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
  843. le32_to_cpus(&rx_cmd_b);
  844. skb_pull(skb, 4 + RXW_PADDING);
  845. packet = skb->data;
  846. /* get the packet length */
  847. size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
  848. align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
  849. if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
  850. netif_dbg(dev, rx_err, dev->net,
  851. "Error rx_cmd_a=0x%08x", rx_cmd_a);
  852. dev->net->stats.rx_errors++;
  853. dev->net->stats.rx_dropped++;
  854. if (rx_cmd_a & RX_CMD_A_FCS)
  855. dev->net->stats.rx_crc_errors++;
  856. else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
  857. dev->net->stats.rx_frame_errors++;
  858. } else {
  859. /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
  860. if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
  861. netif_dbg(dev, rx_err, dev->net,
  862. "size err rx_cmd_a=0x%08x", rx_cmd_a);
  863. return 0;
  864. }
  865. /* last frame in this batch */
  866. if (skb->len == size) {
  867. smsc75xx_rx_csum_offload(dev, skb, rx_cmd_a,
  868. rx_cmd_b);
  869. skb_trim(skb, skb->len - 4); /* remove fcs */
  870. skb->truesize = size + sizeof(struct sk_buff);
  871. return 1;
  872. }
  873. ax_skb = skb_clone(skb, GFP_ATOMIC);
  874. if (unlikely(!ax_skb)) {
  875. netdev_warn(dev->net, "Error allocating skb");
  876. return 0;
  877. }
  878. ax_skb->len = size;
  879. ax_skb->data = packet;
  880. skb_set_tail_pointer(ax_skb, size);
  881. smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a,
  882. rx_cmd_b);
  883. skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
  884. ax_skb->truesize = size + sizeof(struct sk_buff);
  885. usbnet_skb_return(dev, ax_skb);
  886. }
  887. skb_pull(skb, size);
  888. /* padding bytes before the next frame starts */
  889. if (skb->len)
  890. skb_pull(skb, align_count);
  891. }
  892. if (unlikely(skb->len < 0)) {
  893. netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
  894. return 0;
  895. }
  896. return 1;
  897. }
  898. static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
  899. struct sk_buff *skb, gfp_t flags)
  900. {
  901. u32 tx_cmd_a, tx_cmd_b;
  902. if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
  903. struct sk_buff *skb2 =
  904. skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
  905. dev_kfree_skb_any(skb);
  906. skb = skb2;
  907. if (!skb)
  908. return NULL;
  909. }
  910. tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
  911. if (skb->ip_summed == CHECKSUM_PARTIAL)
  912. tx_cmd_a |= TX_CMD_A_IPE | TX_CMD_A_TPE;
  913. if (skb_is_gso(skb)) {
  914. u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN);
  915. tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT) & TX_CMD_B_MSS;
  916. tx_cmd_a |= TX_CMD_A_LSO;
  917. } else {
  918. tx_cmd_b = 0;
  919. }
  920. skb_push(skb, 4);
  921. cpu_to_le32s(&tx_cmd_b);
  922. memcpy(skb->data, &tx_cmd_b, 4);
  923. skb_push(skb, 4);
  924. cpu_to_le32s(&tx_cmd_a);
  925. memcpy(skb->data, &tx_cmd_a, 4);
  926. return skb;
  927. }
  928. static const struct driver_info smsc75xx_info = {
  929. .description = "smsc75xx USB 2.0 Gigabit Ethernet",
  930. .bind = smsc75xx_bind,
  931. .unbind = smsc75xx_unbind,
  932. .link_reset = smsc75xx_link_reset,
  933. .reset = smsc75xx_reset,
  934. .rx_fixup = smsc75xx_rx_fixup,
  935. .tx_fixup = smsc75xx_tx_fixup,
  936. .status = smsc75xx_status,
  937. .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
  938. };
  939. static const struct usb_device_id products[] = {
  940. {
  941. /* SMSC7500 USB Gigabit Ethernet Device */
  942. USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7500),
  943. .driver_info = (unsigned long) &smsc75xx_info,
  944. },
  945. {
  946. /* SMSC7500 USB Gigabit Ethernet Device */
  947. USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7505),
  948. .driver_info = (unsigned long) &smsc75xx_info,
  949. },
  950. { }, /* END */
  951. };
  952. MODULE_DEVICE_TABLE(usb, products);
  953. static struct usb_driver smsc75xx_driver = {
  954. .name = SMSC_CHIPNAME,
  955. .id_table = products,
  956. .probe = usbnet_probe,
  957. .suspend = usbnet_suspend,
  958. .resume = usbnet_resume,
  959. .disconnect = usbnet_disconnect,
  960. };
  961. module_usb_driver(smsc75xx_driver);
  962. MODULE_AUTHOR("Nancy Lin");
  963. MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
  964. MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices");
  965. MODULE_LICENSE("GPL");