netsec.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808
  1. // SPDX-License-Identifier: GPL-2.0+
  2. #include <linux/types.h>
  3. #include <linux/clk.h>
  4. #include <linux/platform_device.h>
  5. #include <linux/pm_runtime.h>
  6. #include <linux/acpi.h>
  7. #include <linux/of_mdio.h>
  8. #include <linux/etherdevice.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/io.h>
  11. #include <net/tcp.h>
  12. #include <net/ip6_checksum.h>
  13. #define NETSEC_REG_SOFT_RST 0x104
  14. #define NETSEC_REG_COM_INIT 0x120
  15. #define NETSEC_REG_TOP_STATUS 0x200
  16. #define NETSEC_IRQ_RX BIT(1)
  17. #define NETSEC_IRQ_TX BIT(0)
  18. #define NETSEC_REG_TOP_INTEN 0x204
  19. #define NETSEC_REG_INTEN_SET 0x234
  20. #define NETSEC_REG_INTEN_CLR 0x238
  21. #define NETSEC_REG_NRM_TX_STATUS 0x400
  22. #define NETSEC_REG_NRM_TX_INTEN 0x404
  23. #define NETSEC_REG_NRM_TX_INTEN_SET 0x428
  24. #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
  25. #define NRM_TX_ST_NTOWNR BIT(17)
  26. #define NRM_TX_ST_TR_ERR BIT(16)
  27. #define NRM_TX_ST_TXDONE BIT(15)
  28. #define NRM_TX_ST_TMREXP BIT(14)
  29. #define NETSEC_REG_NRM_RX_STATUS 0x440
  30. #define NETSEC_REG_NRM_RX_INTEN 0x444
  31. #define NETSEC_REG_NRM_RX_INTEN_SET 0x468
  32. #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
  33. #define NRM_RX_ST_RC_ERR BIT(16)
  34. #define NRM_RX_ST_PKTCNT BIT(15)
  35. #define NRM_RX_ST_TMREXP BIT(14)
  36. #define NETSEC_REG_PKT_CMD_BUF 0xd0
  37. #define NETSEC_REG_CLK_EN 0x100
  38. #define NETSEC_REG_PKT_CTRL 0x140
  39. #define NETSEC_REG_DMA_TMR_CTRL 0x20c
  40. #define NETSEC_REG_F_TAIKI_MC_VER 0x22c
  41. #define NETSEC_REG_F_TAIKI_VER 0x230
  42. #define NETSEC_REG_DMA_HM_CTRL 0x214
  43. #define NETSEC_REG_DMA_MH_CTRL 0x220
  44. #define NETSEC_REG_ADDR_DIS_CORE 0x218
  45. #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
  46. #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
  47. #define NETSEC_REG_NRM_TX_PKTCNT 0x410
  48. #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
  49. #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
  50. #define NETSEC_REG_NRM_TX_TMR 0x41c
  51. #define NETSEC_REG_NRM_RX_PKTCNT 0x454
  52. #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
  53. #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
  54. #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
  55. #define NETSEC_REG_NRM_RX_TMR 0x45c
  56. #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
  57. #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
  58. #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
  59. #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
  60. #define NETSEC_REG_NRM_TX_CONFIG 0x430
  61. #define NETSEC_REG_NRM_RX_CONFIG 0x470
  62. #define MAC_REG_STATUS 0x1024
  63. #define MAC_REG_DATA 0x11c0
  64. #define MAC_REG_CMD 0x11c4
  65. #define MAC_REG_FLOW_TH 0x11cc
  66. #define MAC_REG_INTF_SEL 0x11d4
  67. #define MAC_REG_DESC_INIT 0x11fc
  68. #define MAC_REG_DESC_SOFT_RST 0x1204
  69. #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
  70. #define GMAC_REG_MCR 0x0000
  71. #define GMAC_REG_MFFR 0x0004
  72. #define GMAC_REG_GAR 0x0010
  73. #define GMAC_REG_GDR 0x0014
  74. #define GMAC_REG_FCR 0x0018
  75. #define GMAC_REG_BMR 0x1000
  76. #define GMAC_REG_RDLAR 0x100c
  77. #define GMAC_REG_TDLAR 0x1010
  78. #define GMAC_REG_OMR 0x1018
  79. #define MHZ(n) ((n) * 1000 * 1000)
  80. #define NETSEC_TX_SHIFT_OWN_FIELD 31
  81. #define NETSEC_TX_SHIFT_LD_FIELD 30
  82. #define NETSEC_TX_SHIFT_DRID_FIELD 24
  83. #define NETSEC_TX_SHIFT_PT_FIELD 21
  84. #define NETSEC_TX_SHIFT_TDRID_FIELD 16
  85. #define NETSEC_TX_SHIFT_CC_FIELD 15
  86. #define NETSEC_TX_SHIFT_FS_FIELD 9
  87. #define NETSEC_TX_LAST 8
  88. #define NETSEC_TX_SHIFT_CO 7
  89. #define NETSEC_TX_SHIFT_SO 6
  90. #define NETSEC_TX_SHIFT_TRS_FIELD 4
  91. #define NETSEC_RX_PKT_OWN_FIELD 31
  92. #define NETSEC_RX_PKT_LD_FIELD 30
  93. #define NETSEC_RX_PKT_SDRID_FIELD 24
  94. #define NETSEC_RX_PKT_FR_FIELD 23
  95. #define NETSEC_RX_PKT_ER_FIELD 21
  96. #define NETSEC_RX_PKT_ERR_FIELD 16
  97. #define NETSEC_RX_PKT_TDRID_FIELD 12
  98. #define NETSEC_RX_PKT_FS_FIELD 9
  99. #define NETSEC_RX_PKT_LS_FIELD 8
  100. #define NETSEC_RX_PKT_CO_FIELD 6
  101. #define NETSEC_RX_PKT_ERR_MASK 3
  102. #define NETSEC_MAX_TX_PKT_LEN 1518
  103. #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
  104. #define NETSEC_RING_GMAC 15
  105. #define NETSEC_RING_MAX 2
  106. #define NETSEC_TCP_SEG_LEN_MAX 1460
  107. #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
  108. #define NETSEC_RX_CKSUM_NOTAVAIL 0
  109. #define NETSEC_RX_CKSUM_OK 1
  110. #define NETSEC_RX_CKSUM_NG 2
  111. #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
  112. #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
  113. #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
  114. #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
  115. #define NETSEC_INT_PKTCNT_MAX 2047
  116. #define NETSEC_FLOW_START_TH_MAX 95
  117. #define NETSEC_FLOW_STOP_TH_MAX 95
  118. #define NETSEC_FLOW_PAUSE_TIME_MIN 5
  119. #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
  120. #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
  121. #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
  122. #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
  123. #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
  124. #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
  125. #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
  126. #define NETSEC_CLK_EN_REG_DOM_G BIT(5)
  127. #define NETSEC_CLK_EN_REG_DOM_C BIT(1)
  128. #define NETSEC_CLK_EN_REG_DOM_D BIT(0)
  129. #define NETSEC_COM_INIT_REG_DB BIT(2)
  130. #define NETSEC_COM_INIT_REG_CLS BIT(1)
  131. #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
  132. NETSEC_COM_INIT_REG_DB)
  133. #define NETSEC_SOFT_RST_REG_RESET 0
  134. #define NETSEC_SOFT_RST_REG_RUN BIT(31)
  135. #define NETSEC_DMA_CTRL_REG_STOP 1
  136. #define MH_CTRL__MODE_TRANS BIT(20)
  137. #define NETSEC_GMAC_CMD_ST_READ 0
  138. #define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
  139. #define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
  140. #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
  141. #define NETSEC_GMAC_BMR_REG_RESET 0x00020181
  142. #define NETSEC_GMAC_BMR_REG_SWR 0x00000001
  143. #define NETSEC_GMAC_OMR_REG_ST BIT(13)
  144. #define NETSEC_GMAC_OMR_REG_SR BIT(1)
  145. #define NETSEC_GMAC_MCR_REG_IBN BIT(30)
  146. #define NETSEC_GMAC_MCR_REG_CST BIT(25)
  147. #define NETSEC_GMAC_MCR_REG_JE BIT(20)
  148. #define NETSEC_MCR_PS BIT(15)
  149. #define NETSEC_GMAC_MCR_REG_FES BIT(14)
  150. #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
  151. #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
  152. #define NETSEC_FCR_RFE BIT(2)
  153. #define NETSEC_FCR_TFE BIT(1)
  154. #define NETSEC_GMAC_GAR_REG_GW BIT(1)
  155. #define NETSEC_GMAC_GAR_REG_GB BIT(0)
  156. #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
  157. #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
  158. #define GMAC_REG_SHIFT_CR_GAR 2
  159. #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
  160. #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
  161. #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
  162. #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
  163. #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
  164. #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
  165. #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
  166. #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
  167. #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
  168. #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
  169. #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
  170. #define NETSEC_REG_DESC_TMR_MODE 4
  171. #define NETSEC_REG_DESC_ENDIAN 0
  172. #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
  173. #define NETSEC_MAC_DESC_INIT_REG_INIT 1
  174. #define NETSEC_EEPROM_MAC_ADDRESS 0x00
  175. #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
  176. #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
  177. #define NETSEC_EEPROM_HM_ME_SIZE 0x10
  178. #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
  179. #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
  180. #define NETSEC_EEPROM_MH_ME_SIZE 0x1C
  181. #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
  182. #define NETSEC_EEPROM_PKT_ME_SIZE 0x24
  183. #define DESC_NUM 256
  184. #define DESC_SZ sizeof(struct netsec_de)
  185. #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
  186. enum ring_id {
  187. NETSEC_RING_TX = 0,
  188. NETSEC_RING_RX
  189. };
  190. struct netsec_desc {
  191. struct sk_buff *skb;
  192. dma_addr_t dma_addr;
  193. void *addr;
  194. u16 len;
  195. };
  196. struct netsec_desc_ring {
  197. dma_addr_t desc_dma;
  198. struct netsec_desc *desc;
  199. void *vaddr;
  200. u16 pkt_cnt;
  201. u16 head, tail;
  202. };
  203. struct netsec_priv {
  204. struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
  205. struct ethtool_coalesce et_coalesce;
  206. spinlock_t reglock; /* protect reg access */
  207. struct napi_struct napi;
  208. phy_interface_t phy_interface;
  209. struct net_device *ndev;
  210. struct device_node *phy_np;
  211. struct phy_device *phydev;
  212. struct mii_bus *mii_bus;
  213. void __iomem *ioaddr;
  214. void __iomem *eeprom_base;
  215. struct device *dev;
  216. struct clk *clk;
  217. u32 msg_enable;
  218. u32 freq;
  219. u32 phy_addr;
  220. bool rx_cksum_offload_flag;
  221. };
  222. struct netsec_de { /* Netsec Descriptor layout */
  223. u32 attr;
  224. u32 data_buf_addr_up;
  225. u32 data_buf_addr_lw;
  226. u32 buf_len_info;
  227. };
  228. struct netsec_tx_pkt_ctrl {
  229. u16 tcp_seg_len;
  230. bool tcp_seg_offload_flag;
  231. bool cksum_offload_flag;
  232. };
  233. struct netsec_rx_pkt_info {
  234. int rx_cksum_result;
  235. int err_code;
  236. bool err_flag;
  237. };
  238. static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
  239. {
  240. writel(val, priv->ioaddr + reg_addr);
  241. }
  242. static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
  243. {
  244. return readl(priv->ioaddr + reg_addr);
  245. }
  246. /************* MDIO BUS OPS FOLLOW *************/
  247. #define TIMEOUT_SPINS_MAC 1000
  248. #define TIMEOUT_SECONDARY_MS_MAC 100
  249. static u32 netsec_clk_type(u32 freq)
  250. {
  251. if (freq < MHZ(35))
  252. return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
  253. if (freq < MHZ(60))
  254. return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
  255. if (freq < MHZ(100))
  256. return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
  257. if (freq < MHZ(150))
  258. return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
  259. if (freq < MHZ(250))
  260. return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
  261. return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
  262. }
  263. static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
  264. {
  265. u32 timeout = TIMEOUT_SPINS_MAC;
  266. while (--timeout && netsec_read(priv, addr) & mask)
  267. cpu_relax();
  268. if (timeout)
  269. return 0;
  270. timeout = TIMEOUT_SECONDARY_MS_MAC;
  271. while (--timeout && netsec_read(priv, addr) & mask)
  272. usleep_range(1000, 2000);
  273. if (timeout)
  274. return 0;
  275. netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
  276. return -ETIMEDOUT;
  277. }
  278. static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
  279. {
  280. netsec_write(priv, MAC_REG_DATA, value);
  281. netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
  282. return netsec_wait_while_busy(priv,
  283. MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
  284. }
  285. static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
  286. {
  287. int ret;
  288. netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
  289. ret = netsec_wait_while_busy(priv,
  290. MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
  291. if (ret)
  292. return ret;
  293. *read = netsec_read(priv, MAC_REG_DATA);
  294. return 0;
  295. }
  296. static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
  297. u32 addr, u32 mask)
  298. {
  299. u32 timeout = TIMEOUT_SPINS_MAC;
  300. int ret, data;
  301. do {
  302. ret = netsec_mac_read(priv, addr, &data);
  303. if (ret)
  304. break;
  305. cpu_relax();
  306. } while (--timeout && (data & mask));
  307. if (timeout)
  308. return 0;
  309. timeout = TIMEOUT_SECONDARY_MS_MAC;
  310. do {
  311. usleep_range(1000, 2000);
  312. ret = netsec_mac_read(priv, addr, &data);
  313. if (ret)
  314. break;
  315. cpu_relax();
  316. } while (--timeout && (data & mask));
  317. if (timeout && !ret)
  318. return 0;
  319. netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
  320. return -ETIMEDOUT;
  321. }
  322. static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
  323. {
  324. struct phy_device *phydev = priv->ndev->phydev;
  325. u32 value = 0;
  326. value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
  327. NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
  328. if (phydev->speed != SPEED_1000)
  329. value |= NETSEC_MCR_PS;
  330. if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
  331. phydev->speed == SPEED_100)
  332. value |= NETSEC_GMAC_MCR_REG_FES;
  333. value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
  334. if (phy_interface_mode_is_rgmii(priv->phy_interface))
  335. value |= NETSEC_GMAC_MCR_REG_IBN;
  336. if (netsec_mac_write(priv, GMAC_REG_MCR, value))
  337. return -ETIMEDOUT;
  338. return 0;
  339. }
  340. static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
  341. static int netsec_phy_write(struct mii_bus *bus,
  342. int phy_addr, int reg, u16 val)
  343. {
  344. int status;
  345. struct netsec_priv *priv = bus->priv;
  346. if (netsec_mac_write(priv, GMAC_REG_GDR, val))
  347. return -ETIMEDOUT;
  348. if (netsec_mac_write(priv, GMAC_REG_GAR,
  349. phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
  350. reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
  351. NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
  352. (netsec_clk_type(priv->freq) <<
  353. GMAC_REG_SHIFT_CR_GAR)))
  354. return -ETIMEDOUT;
  355. status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
  356. NETSEC_GMAC_GAR_REG_GB);
  357. /* Developerbox implements RTL8211E PHY and there is
  358. * a compatibility problem with F_GMAC4.
  359. * RTL8211E expects MDC clock must be kept toggling for several
  360. * clock cycle with MDIO high before entering the IDLE state.
  361. * To meet this requirement, netsec driver needs to issue dummy
  362. * read(e.g. read PHYID1(offset 0x2) register) right after write.
  363. */
  364. netsec_phy_read(bus, phy_addr, MII_PHYSID1);
  365. return status;
  366. }
  367. static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
  368. {
  369. struct netsec_priv *priv = bus->priv;
  370. u32 data;
  371. int ret;
  372. if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
  373. phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
  374. reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
  375. (netsec_clk_type(priv->freq) <<
  376. GMAC_REG_SHIFT_CR_GAR)))
  377. return -ETIMEDOUT;
  378. ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
  379. NETSEC_GMAC_GAR_REG_GB);
  380. if (ret)
  381. return ret;
  382. ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
  383. if (ret)
  384. return ret;
  385. return data;
  386. }
  387. /************* ETHTOOL_OPS FOLLOW *************/
  388. static void netsec_et_get_drvinfo(struct net_device *net_device,
  389. struct ethtool_drvinfo *info)
  390. {
  391. strlcpy(info->driver, "netsec", sizeof(info->driver));
  392. strlcpy(info->bus_info, dev_name(net_device->dev.parent),
  393. sizeof(info->bus_info));
  394. }
  395. static int netsec_et_get_coalesce(struct net_device *net_device,
  396. struct ethtool_coalesce *et_coalesce)
  397. {
  398. struct netsec_priv *priv = netdev_priv(net_device);
  399. *et_coalesce = priv->et_coalesce;
  400. return 0;
  401. }
  402. static int netsec_et_set_coalesce(struct net_device *net_device,
  403. struct ethtool_coalesce *et_coalesce)
  404. {
  405. struct netsec_priv *priv = netdev_priv(net_device);
  406. priv->et_coalesce = *et_coalesce;
  407. if (priv->et_coalesce.tx_coalesce_usecs < 50)
  408. priv->et_coalesce.tx_coalesce_usecs = 50;
  409. if (priv->et_coalesce.tx_max_coalesced_frames < 1)
  410. priv->et_coalesce.tx_max_coalesced_frames = 1;
  411. netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
  412. priv->et_coalesce.tx_max_coalesced_frames);
  413. netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
  414. priv->et_coalesce.tx_coalesce_usecs);
  415. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
  416. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
  417. if (priv->et_coalesce.rx_coalesce_usecs < 50)
  418. priv->et_coalesce.rx_coalesce_usecs = 50;
  419. if (priv->et_coalesce.rx_max_coalesced_frames < 1)
  420. priv->et_coalesce.rx_max_coalesced_frames = 1;
  421. netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
  422. priv->et_coalesce.rx_max_coalesced_frames);
  423. netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
  424. priv->et_coalesce.rx_coalesce_usecs);
  425. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
  426. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
  427. return 0;
  428. }
  429. static u32 netsec_et_get_msglevel(struct net_device *dev)
  430. {
  431. struct netsec_priv *priv = netdev_priv(dev);
  432. return priv->msg_enable;
  433. }
  434. static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
  435. {
  436. struct netsec_priv *priv = netdev_priv(dev);
  437. priv->msg_enable = datum;
  438. }
  439. static const struct ethtool_ops netsec_ethtool_ops = {
  440. .get_drvinfo = netsec_et_get_drvinfo,
  441. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  442. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  443. .get_link = ethtool_op_get_link,
  444. .get_coalesce = netsec_et_get_coalesce,
  445. .set_coalesce = netsec_et_set_coalesce,
  446. .get_msglevel = netsec_et_get_msglevel,
  447. .set_msglevel = netsec_et_set_msglevel,
  448. };
  449. /************* NETDEV_OPS FOLLOW *************/
  450. static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
  451. struct netsec_desc *desc)
  452. {
  453. struct sk_buff *skb;
  454. if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
  455. skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
  456. } else {
  457. desc->len = L1_CACHE_ALIGN(desc->len);
  458. skb = netdev_alloc_skb(priv->ndev, desc->len);
  459. }
  460. if (!skb)
  461. return NULL;
  462. desc->addr = skb->data;
  463. desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
  464. DMA_FROM_DEVICE);
  465. if (dma_mapping_error(priv->dev, desc->dma_addr)) {
  466. dev_kfree_skb_any(skb);
  467. return NULL;
  468. }
  469. return skb;
  470. }
  471. static void netsec_set_rx_de(struct netsec_priv *priv,
  472. struct netsec_desc_ring *dring, u16 idx,
  473. const struct netsec_desc *desc,
  474. struct sk_buff *skb)
  475. {
  476. struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
  477. u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
  478. (1 << NETSEC_RX_PKT_FS_FIELD) |
  479. (1 << NETSEC_RX_PKT_LS_FIELD);
  480. if (idx == DESC_NUM - 1)
  481. attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
  482. de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
  483. de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
  484. de->buf_len_info = desc->len;
  485. de->attr = attr;
  486. dma_wmb();
  487. dring->desc[idx].dma_addr = desc->dma_addr;
  488. dring->desc[idx].addr = desc->addr;
  489. dring->desc[idx].len = desc->len;
  490. dring->desc[idx].skb = skb;
  491. }
  492. static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
  493. struct netsec_desc_ring *dring,
  494. u16 idx,
  495. struct netsec_rx_pkt_info *rxpi,
  496. struct netsec_desc *desc, u16 *len)
  497. {
  498. struct netsec_de de = {};
  499. memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
  500. *len = de.buf_len_info >> 16;
  501. rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
  502. rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
  503. rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
  504. NETSEC_RX_PKT_ERR_MASK;
  505. *desc = dring->desc[idx];
  506. return desc->skb;
  507. }
  508. static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
  509. struct netsec_rx_pkt_info *rxpi,
  510. struct netsec_desc *desc,
  511. u16 *len)
  512. {
  513. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  514. struct sk_buff *tmp_skb, *skb = NULL;
  515. struct netsec_desc td;
  516. int tail;
  517. *rxpi = (struct netsec_rx_pkt_info){};
  518. td.len = priv->ndev->mtu + 22;
  519. tmp_skb = netsec_alloc_skb(priv, &td);
  520. tail = dring->tail;
  521. if (!tmp_skb) {
  522. netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
  523. dring->desc[tail].skb);
  524. } else {
  525. skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
  526. netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
  527. }
  528. /* move tail ahead */
  529. dring->tail = (dring->tail + 1) % DESC_NUM;
  530. return skb;
  531. }
  532. static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
  533. {
  534. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
  535. unsigned int pkts, bytes;
  536. dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
  537. if (dring->pkt_cnt < budget)
  538. budget = dring->pkt_cnt;
  539. pkts = 0;
  540. bytes = 0;
  541. while (pkts < budget) {
  542. struct netsec_desc *desc;
  543. struct netsec_de *entry;
  544. int tail, eop;
  545. tail = dring->tail;
  546. /* move tail ahead */
  547. dring->tail = (tail + 1) % DESC_NUM;
  548. desc = &dring->desc[tail];
  549. entry = dring->vaddr + DESC_SZ * tail;
  550. eop = (entry->attr >> NETSEC_TX_LAST) & 1;
  551. dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
  552. DMA_TO_DEVICE);
  553. if (eop) {
  554. pkts++;
  555. bytes += desc->skb->len;
  556. dev_kfree_skb(desc->skb);
  557. }
  558. *desc = (struct netsec_desc){};
  559. }
  560. dring->pkt_cnt -= budget;
  561. priv->ndev->stats.tx_packets += budget;
  562. priv->ndev->stats.tx_bytes += bytes;
  563. netdev_completed_queue(priv->ndev, budget, bytes);
  564. return budget;
  565. }
  566. static int netsec_process_tx(struct netsec_priv *priv, int budget)
  567. {
  568. struct net_device *ndev = priv->ndev;
  569. int new, done = 0;
  570. do {
  571. new = netsec_clean_tx_dring(priv, budget);
  572. done += new;
  573. budget -= new;
  574. } while (new);
  575. if (done && netif_queue_stopped(ndev))
  576. netif_wake_queue(ndev);
  577. return done;
  578. }
  579. static int netsec_process_rx(struct netsec_priv *priv, int budget)
  580. {
  581. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  582. struct net_device *ndev = priv->ndev;
  583. struct netsec_rx_pkt_info rx_info;
  584. int done = 0;
  585. struct netsec_desc desc;
  586. struct sk_buff *skb;
  587. u16 len;
  588. while (done < budget) {
  589. u16 idx = dring->tail;
  590. struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
  591. if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
  592. /* reading the register clears the irq */
  593. netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
  594. break;
  595. }
  596. /* This barrier is needed to keep us from reading
  597. * any other fields out of the netsec_de until we have
  598. * verified the descriptor has been written back
  599. */
  600. dma_rmb();
  601. done++;
  602. skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
  603. if (unlikely(!skb) || rx_info.err_flag) {
  604. netif_err(priv, drv, priv->ndev,
  605. "%s: rx fail err(%d)\n",
  606. __func__, rx_info.err_code);
  607. ndev->stats.rx_dropped++;
  608. continue;
  609. }
  610. dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
  611. DMA_FROM_DEVICE);
  612. skb_put(skb, len);
  613. skb->protocol = eth_type_trans(skb, priv->ndev);
  614. if (priv->rx_cksum_offload_flag &&
  615. rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
  616. skb->ip_summed = CHECKSUM_UNNECESSARY;
  617. if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
  618. ndev->stats.rx_packets++;
  619. ndev->stats.rx_bytes += len;
  620. }
  621. }
  622. return done;
  623. }
  624. static int netsec_napi_poll(struct napi_struct *napi, int budget)
  625. {
  626. struct netsec_priv *priv;
  627. int tx, rx, done, todo;
  628. priv = container_of(napi, struct netsec_priv, napi);
  629. todo = budget;
  630. do {
  631. if (!todo)
  632. break;
  633. tx = netsec_process_tx(priv, todo);
  634. todo -= tx;
  635. if (!todo)
  636. break;
  637. rx = netsec_process_rx(priv, todo);
  638. todo -= rx;
  639. } while (rx || tx);
  640. done = budget - todo;
  641. if (done < budget && napi_complete_done(napi, done)) {
  642. unsigned long flags;
  643. spin_lock_irqsave(&priv->reglock, flags);
  644. netsec_write(priv, NETSEC_REG_INTEN_SET,
  645. NETSEC_IRQ_RX | NETSEC_IRQ_TX);
  646. spin_unlock_irqrestore(&priv->reglock, flags);
  647. }
  648. return done;
  649. }
  650. static void netsec_set_tx_de(struct netsec_priv *priv,
  651. struct netsec_desc_ring *dring,
  652. const struct netsec_tx_pkt_ctrl *tx_ctrl,
  653. const struct netsec_desc *desc,
  654. struct sk_buff *skb)
  655. {
  656. int idx = dring->head;
  657. struct netsec_de *de;
  658. u32 attr;
  659. de = dring->vaddr + (DESC_SZ * idx);
  660. attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
  661. (1 << NETSEC_TX_SHIFT_PT_FIELD) |
  662. (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
  663. (1 << NETSEC_TX_SHIFT_FS_FIELD) |
  664. (1 << NETSEC_TX_LAST) |
  665. (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
  666. (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
  667. (1 << NETSEC_TX_SHIFT_TRS_FIELD);
  668. if (idx == DESC_NUM - 1)
  669. attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
  670. de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
  671. de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
  672. de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
  673. de->attr = attr;
  674. dma_wmb();
  675. dring->desc[idx] = *desc;
  676. dring->desc[idx].skb = skb;
  677. /* move head ahead */
  678. dring->head = (dring->head + 1) % DESC_NUM;
  679. }
  680. static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
  681. struct net_device *ndev)
  682. {
  683. struct netsec_priv *priv = netdev_priv(ndev);
  684. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
  685. struct netsec_tx_pkt_ctrl tx_ctrl = {};
  686. struct netsec_desc tx_desc;
  687. u16 tso_seg_len = 0;
  688. int filled;
  689. /* differentiate between full/emtpy ring */
  690. if (dring->head >= dring->tail)
  691. filled = dring->head - dring->tail;
  692. else
  693. filled = dring->head + DESC_NUM - dring->tail;
  694. if (DESC_NUM - filled < 2) { /* if less than 2 available */
  695. netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
  696. netif_stop_queue(priv->ndev);
  697. dma_wmb();
  698. return NETDEV_TX_BUSY;
  699. }
  700. if (skb->ip_summed == CHECKSUM_PARTIAL)
  701. tx_ctrl.cksum_offload_flag = true;
  702. if (skb_is_gso(skb))
  703. tso_seg_len = skb_shinfo(skb)->gso_size;
  704. if (tso_seg_len > 0) {
  705. if (skb->protocol == htons(ETH_P_IP)) {
  706. ip_hdr(skb)->tot_len = 0;
  707. tcp_hdr(skb)->check =
  708. ~tcp_v4_check(0, ip_hdr(skb)->saddr,
  709. ip_hdr(skb)->daddr, 0);
  710. } else {
  711. ipv6_hdr(skb)->payload_len = 0;
  712. tcp_hdr(skb)->check =
  713. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  714. &ipv6_hdr(skb)->daddr,
  715. 0, IPPROTO_TCP, 0);
  716. }
  717. tx_ctrl.tcp_seg_offload_flag = true;
  718. tx_ctrl.tcp_seg_len = tso_seg_len;
  719. }
  720. tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
  721. skb_headlen(skb), DMA_TO_DEVICE);
  722. if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
  723. netif_err(priv, drv, priv->ndev,
  724. "%s: DMA mapping failed\n", __func__);
  725. ndev->stats.tx_dropped++;
  726. dev_kfree_skb_any(skb);
  727. return NETDEV_TX_OK;
  728. }
  729. tx_desc.addr = skb->data;
  730. tx_desc.len = skb_headlen(skb);
  731. skb_tx_timestamp(skb);
  732. netdev_sent_queue(priv->ndev, skb->len);
  733. netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
  734. netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
  735. return NETDEV_TX_OK;
  736. }
  737. static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
  738. {
  739. struct netsec_desc_ring *dring = &priv->desc_ring[id];
  740. struct netsec_desc *desc;
  741. u16 idx;
  742. if (!dring->vaddr || !dring->desc)
  743. return;
  744. for (idx = 0; idx < DESC_NUM; idx++) {
  745. desc = &dring->desc[idx];
  746. if (!desc->addr)
  747. continue;
  748. dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
  749. id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
  750. DMA_TO_DEVICE);
  751. dev_kfree_skb(desc->skb);
  752. }
  753. memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
  754. memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
  755. dring->head = 0;
  756. dring->tail = 0;
  757. dring->pkt_cnt = 0;
  758. if (id == NETSEC_RING_TX)
  759. netdev_reset_queue(priv->ndev);
  760. }
  761. static void netsec_free_dring(struct netsec_priv *priv, int id)
  762. {
  763. struct netsec_desc_ring *dring = &priv->desc_ring[id];
  764. if (dring->vaddr) {
  765. dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
  766. dring->vaddr, dring->desc_dma);
  767. dring->vaddr = NULL;
  768. }
  769. kfree(dring->desc);
  770. dring->desc = NULL;
  771. }
  772. static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
  773. {
  774. struct netsec_desc_ring *dring = &priv->desc_ring[id];
  775. int ret = 0;
  776. dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
  777. &dring->desc_dma, GFP_KERNEL);
  778. if (!dring->vaddr) {
  779. ret = -ENOMEM;
  780. goto err;
  781. }
  782. dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
  783. if (!dring->desc) {
  784. ret = -ENOMEM;
  785. goto err;
  786. }
  787. return 0;
  788. err:
  789. netsec_free_dring(priv, id);
  790. return ret;
  791. }
  792. static int netsec_setup_rx_dring(struct netsec_priv *priv)
  793. {
  794. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  795. struct netsec_desc desc;
  796. struct sk_buff *skb;
  797. int n;
  798. desc.len = priv->ndev->mtu + 22;
  799. for (n = 0; n < DESC_NUM; n++) {
  800. skb = netsec_alloc_skb(priv, &desc);
  801. if (!skb) {
  802. netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
  803. return -ENOMEM;
  804. }
  805. netsec_set_rx_de(priv, dring, n, &desc, skb);
  806. }
  807. return 0;
  808. }
  809. static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
  810. u32 addr_h, u32 addr_l, u32 size)
  811. {
  812. u64 base = (u64)addr_h << 32 | addr_l;
  813. void __iomem *ucode;
  814. u32 i;
  815. ucode = ioremap(base, size * sizeof(u32));
  816. if (!ucode)
  817. return -ENOMEM;
  818. for (i = 0; i < size; i++)
  819. netsec_write(priv, reg, readl(ucode + i * 4));
  820. iounmap(ucode);
  821. return 0;
  822. }
  823. static int netsec_netdev_load_microcode(struct netsec_priv *priv)
  824. {
  825. u32 addr_h, addr_l, size;
  826. int err;
  827. addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
  828. addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
  829. size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
  830. err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
  831. addr_h, addr_l, size);
  832. if (err)
  833. return err;
  834. addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
  835. addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
  836. size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
  837. err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
  838. addr_h, addr_l, size);
  839. if (err)
  840. return err;
  841. addr_h = 0;
  842. addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
  843. size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
  844. err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
  845. addr_h, addr_l, size);
  846. if (err)
  847. return err;
  848. return 0;
  849. }
  850. static int netsec_reset_hardware(struct netsec_priv *priv,
  851. bool load_ucode)
  852. {
  853. u32 value;
  854. int err;
  855. /* stop DMA engines */
  856. if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
  857. netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
  858. NETSEC_DMA_CTRL_REG_STOP);
  859. netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
  860. NETSEC_DMA_CTRL_REG_STOP);
  861. while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
  862. NETSEC_DMA_CTRL_REG_STOP)
  863. cpu_relax();
  864. while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
  865. NETSEC_DMA_CTRL_REG_STOP)
  866. cpu_relax();
  867. }
  868. netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
  869. netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
  870. netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
  871. while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
  872. cpu_relax();
  873. /* set desc_start addr */
  874. netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
  875. upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
  876. netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
  877. lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
  878. netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
  879. upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
  880. netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
  881. lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
  882. /* set normal tx dring ring config */
  883. netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
  884. 1 << NETSEC_REG_DESC_ENDIAN);
  885. netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
  886. 1 << NETSEC_REG_DESC_ENDIAN);
  887. if (load_ucode) {
  888. err = netsec_netdev_load_microcode(priv);
  889. if (err) {
  890. netif_err(priv, probe, priv->ndev,
  891. "%s: failed to load microcode (%d)\n",
  892. __func__, err);
  893. return err;
  894. }
  895. }
  896. /* start DMA engines */
  897. netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
  898. netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
  899. usleep_range(1000, 2000);
  900. if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
  901. NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
  902. netif_err(priv, probe, priv->ndev,
  903. "microengine start failed\n");
  904. return -ENXIO;
  905. }
  906. netsec_write(priv, NETSEC_REG_TOP_STATUS,
  907. NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
  908. value = NETSEC_PKT_CTRL_REG_MODE_NRM;
  909. if (priv->ndev->mtu > ETH_DATA_LEN)
  910. value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
  911. /* change to normal mode */
  912. netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
  913. netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
  914. while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
  915. NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
  916. cpu_relax();
  917. /* clear any pending EMPTY/ERR irq status */
  918. netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
  919. /* Disable TX & RX intr */
  920. netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
  921. return 0;
  922. }
  923. static int netsec_start_gmac(struct netsec_priv *priv)
  924. {
  925. struct phy_device *phydev = priv->ndev->phydev;
  926. u32 value = 0;
  927. int ret;
  928. if (phydev->speed != SPEED_1000)
  929. value = (NETSEC_GMAC_MCR_REG_CST |
  930. NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
  931. if (netsec_mac_write(priv, GMAC_REG_MCR, value))
  932. return -ETIMEDOUT;
  933. if (netsec_mac_write(priv, GMAC_REG_BMR,
  934. NETSEC_GMAC_BMR_REG_RESET))
  935. return -ETIMEDOUT;
  936. /* Wait soft reset */
  937. usleep_range(1000, 5000);
  938. ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
  939. if (ret)
  940. return ret;
  941. if (value & NETSEC_GMAC_BMR_REG_SWR)
  942. return -EAGAIN;
  943. netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
  944. if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
  945. return -ETIMEDOUT;
  946. netsec_write(priv, MAC_REG_DESC_INIT, 1);
  947. if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
  948. return -ETIMEDOUT;
  949. if (netsec_mac_write(priv, GMAC_REG_BMR,
  950. NETSEC_GMAC_BMR_REG_COMMON))
  951. return -ETIMEDOUT;
  952. if (netsec_mac_write(priv, GMAC_REG_RDLAR,
  953. NETSEC_GMAC_RDLAR_REG_COMMON))
  954. return -ETIMEDOUT;
  955. if (netsec_mac_write(priv, GMAC_REG_TDLAR,
  956. NETSEC_GMAC_TDLAR_REG_COMMON))
  957. return -ETIMEDOUT;
  958. if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
  959. return -ETIMEDOUT;
  960. ret = netsec_mac_update_to_phy_state(priv);
  961. if (ret)
  962. return ret;
  963. ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
  964. if (ret)
  965. return ret;
  966. value |= NETSEC_GMAC_OMR_REG_SR;
  967. value |= NETSEC_GMAC_OMR_REG_ST;
  968. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
  969. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
  970. netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
  971. if (netsec_mac_write(priv, GMAC_REG_OMR, value))
  972. return -ETIMEDOUT;
  973. return 0;
  974. }
  975. static int netsec_stop_gmac(struct netsec_priv *priv)
  976. {
  977. u32 value;
  978. int ret;
  979. ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
  980. if (ret)
  981. return ret;
  982. value &= ~NETSEC_GMAC_OMR_REG_SR;
  983. value &= ~NETSEC_GMAC_OMR_REG_ST;
  984. /* disable all interrupts */
  985. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
  986. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
  987. return netsec_mac_write(priv, GMAC_REG_OMR, value);
  988. }
  989. static void netsec_phy_adjust_link(struct net_device *ndev)
  990. {
  991. struct netsec_priv *priv = netdev_priv(ndev);
  992. if (ndev->phydev->link)
  993. netsec_start_gmac(priv);
  994. else
  995. netsec_stop_gmac(priv);
  996. phy_print_status(ndev->phydev);
  997. }
  998. static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
  999. {
  1000. struct netsec_priv *priv = dev_id;
  1001. u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
  1002. unsigned long flags;
  1003. /* Disable interrupts */
  1004. if (status & NETSEC_IRQ_TX) {
  1005. val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
  1006. netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
  1007. }
  1008. if (status & NETSEC_IRQ_RX) {
  1009. val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
  1010. netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
  1011. }
  1012. spin_lock_irqsave(&priv->reglock, flags);
  1013. netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
  1014. spin_unlock_irqrestore(&priv->reglock, flags);
  1015. napi_schedule(&priv->napi);
  1016. return IRQ_HANDLED;
  1017. }
  1018. static int netsec_netdev_open(struct net_device *ndev)
  1019. {
  1020. struct netsec_priv *priv = netdev_priv(ndev);
  1021. int ret;
  1022. pm_runtime_get_sync(priv->dev);
  1023. ret = netsec_setup_rx_dring(priv);
  1024. if (ret) {
  1025. netif_err(priv, probe, priv->ndev,
  1026. "%s: fail setup ring\n", __func__);
  1027. goto err1;
  1028. }
  1029. ret = request_irq(priv->ndev->irq, netsec_irq_handler,
  1030. IRQF_SHARED, "netsec", priv);
  1031. if (ret) {
  1032. netif_err(priv, drv, priv->ndev, "request_irq failed\n");
  1033. goto err2;
  1034. }
  1035. if (dev_of_node(priv->dev)) {
  1036. if (!of_phy_connect(priv->ndev, priv->phy_np,
  1037. netsec_phy_adjust_link, 0,
  1038. priv->phy_interface)) {
  1039. netif_err(priv, link, priv->ndev, "missing PHY\n");
  1040. ret = -ENODEV;
  1041. goto err3;
  1042. }
  1043. } else {
  1044. ret = phy_connect_direct(priv->ndev, priv->phydev,
  1045. netsec_phy_adjust_link,
  1046. priv->phy_interface);
  1047. if (ret) {
  1048. netif_err(priv, link, priv->ndev,
  1049. "phy_connect_direct() failed (%d)\n", ret);
  1050. goto err3;
  1051. }
  1052. }
  1053. phy_start(ndev->phydev);
  1054. netsec_start_gmac(priv);
  1055. napi_enable(&priv->napi);
  1056. netif_start_queue(ndev);
  1057. /* Enable TX+RX intr. */
  1058. netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
  1059. return 0;
  1060. err3:
  1061. free_irq(priv->ndev->irq, priv);
  1062. err2:
  1063. netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
  1064. err1:
  1065. pm_runtime_put_sync(priv->dev);
  1066. return ret;
  1067. }
  1068. static int netsec_netdev_stop(struct net_device *ndev)
  1069. {
  1070. int ret;
  1071. struct netsec_priv *priv = netdev_priv(ndev);
  1072. netif_stop_queue(priv->ndev);
  1073. dma_wmb();
  1074. napi_disable(&priv->napi);
  1075. netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
  1076. netsec_stop_gmac(priv);
  1077. free_irq(priv->ndev->irq, priv);
  1078. netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
  1079. netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
  1080. phy_stop(ndev->phydev);
  1081. phy_disconnect(ndev->phydev);
  1082. ret = netsec_reset_hardware(priv, false);
  1083. pm_runtime_put_sync(priv->dev);
  1084. return ret;
  1085. }
  1086. static int netsec_netdev_init(struct net_device *ndev)
  1087. {
  1088. struct netsec_priv *priv = netdev_priv(ndev);
  1089. int ret;
  1090. u16 data;
  1091. ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
  1092. if (ret)
  1093. return ret;
  1094. ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
  1095. if (ret)
  1096. goto err1;
  1097. /* set phy power down */
  1098. data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
  1099. BMCR_PDOWN;
  1100. netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
  1101. ret = netsec_reset_hardware(priv, true);
  1102. if (ret)
  1103. goto err2;
  1104. return 0;
  1105. err2:
  1106. netsec_free_dring(priv, NETSEC_RING_RX);
  1107. err1:
  1108. netsec_free_dring(priv, NETSEC_RING_TX);
  1109. return ret;
  1110. }
  1111. static void netsec_netdev_uninit(struct net_device *ndev)
  1112. {
  1113. struct netsec_priv *priv = netdev_priv(ndev);
  1114. netsec_free_dring(priv, NETSEC_RING_RX);
  1115. netsec_free_dring(priv, NETSEC_RING_TX);
  1116. }
  1117. static int netsec_netdev_set_features(struct net_device *ndev,
  1118. netdev_features_t features)
  1119. {
  1120. struct netsec_priv *priv = netdev_priv(ndev);
  1121. priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
  1122. return 0;
  1123. }
  1124. static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
  1125. int cmd)
  1126. {
  1127. return phy_mii_ioctl(ndev->phydev, ifr, cmd);
  1128. }
  1129. static const struct net_device_ops netsec_netdev_ops = {
  1130. .ndo_init = netsec_netdev_init,
  1131. .ndo_uninit = netsec_netdev_uninit,
  1132. .ndo_open = netsec_netdev_open,
  1133. .ndo_stop = netsec_netdev_stop,
  1134. .ndo_start_xmit = netsec_netdev_start_xmit,
  1135. .ndo_set_features = netsec_netdev_set_features,
  1136. .ndo_set_mac_address = eth_mac_addr,
  1137. .ndo_validate_addr = eth_validate_addr,
  1138. .ndo_do_ioctl = netsec_netdev_ioctl,
  1139. };
  1140. static int netsec_of_probe(struct platform_device *pdev,
  1141. struct netsec_priv *priv, u32 *phy_addr)
  1142. {
  1143. priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
  1144. if (!priv->phy_np) {
  1145. dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
  1146. return -EINVAL;
  1147. }
  1148. *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
  1149. priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
  1150. if (IS_ERR(priv->clk)) {
  1151. dev_err(&pdev->dev, "phy_ref_clk not found\n");
  1152. return PTR_ERR(priv->clk);
  1153. }
  1154. priv->freq = clk_get_rate(priv->clk);
  1155. return 0;
  1156. }
  1157. static int netsec_acpi_probe(struct platform_device *pdev,
  1158. struct netsec_priv *priv, u32 *phy_addr)
  1159. {
  1160. int ret;
  1161. if (!IS_ENABLED(CONFIG_ACPI))
  1162. return -ENODEV;
  1163. ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
  1164. if (ret) {
  1165. dev_err(&pdev->dev,
  1166. "missing required property 'phy-channel'\n");
  1167. return ret;
  1168. }
  1169. ret = device_property_read_u32(&pdev->dev,
  1170. "socionext,phy-clock-frequency",
  1171. &priv->freq);
  1172. if (ret)
  1173. dev_err(&pdev->dev,
  1174. "missing required property 'socionext,phy-clock-frequency'\n");
  1175. return ret;
  1176. }
  1177. static void netsec_unregister_mdio(struct netsec_priv *priv)
  1178. {
  1179. struct phy_device *phydev = priv->phydev;
  1180. if (!dev_of_node(priv->dev) && phydev) {
  1181. phy_device_remove(phydev);
  1182. phy_device_free(phydev);
  1183. }
  1184. mdiobus_unregister(priv->mii_bus);
  1185. }
  1186. static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
  1187. {
  1188. struct mii_bus *bus;
  1189. int ret;
  1190. bus = devm_mdiobus_alloc(priv->dev);
  1191. if (!bus)
  1192. return -ENOMEM;
  1193. snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
  1194. bus->priv = priv;
  1195. bus->name = "SNI NETSEC MDIO";
  1196. bus->read = netsec_phy_read;
  1197. bus->write = netsec_phy_write;
  1198. bus->parent = priv->dev;
  1199. priv->mii_bus = bus;
  1200. if (dev_of_node(priv->dev)) {
  1201. struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
  1202. mdio_node = of_get_child_by_name(parent, "mdio");
  1203. if (mdio_node) {
  1204. parent = mdio_node;
  1205. } else {
  1206. /* older f/w doesn't populate the mdio subnode,
  1207. * allow relaxed upgrade of f/w in due time.
  1208. */
  1209. dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
  1210. }
  1211. ret = of_mdiobus_register(bus, parent);
  1212. of_node_put(mdio_node);
  1213. if (ret) {
  1214. dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
  1215. return ret;
  1216. }
  1217. } else {
  1218. /* Mask out all PHYs from auto probing. */
  1219. bus->phy_mask = ~0;
  1220. ret = mdiobus_register(bus);
  1221. if (ret) {
  1222. dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
  1223. return ret;
  1224. }
  1225. priv->phydev = get_phy_device(bus, phy_addr, false);
  1226. if (IS_ERR(priv->phydev)) {
  1227. ret = PTR_ERR(priv->phydev);
  1228. dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
  1229. priv->phydev = NULL;
  1230. return -ENODEV;
  1231. }
  1232. ret = phy_device_register(priv->phydev);
  1233. if (ret) {
  1234. mdiobus_unregister(bus);
  1235. dev_err(priv->dev,
  1236. "phy_device_register err(%d)\n", ret);
  1237. }
  1238. }
  1239. return ret;
  1240. }
  1241. static int netsec_probe(struct platform_device *pdev)
  1242. {
  1243. struct resource *mmio_res, *eeprom_res, *irq_res;
  1244. u8 *mac, macbuf[ETH_ALEN];
  1245. struct netsec_priv *priv;
  1246. u32 hw_ver, phy_addr = 0;
  1247. struct net_device *ndev;
  1248. int ret;
  1249. mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1250. if (!mmio_res) {
  1251. dev_err(&pdev->dev, "No MMIO resource found.\n");
  1252. return -ENODEV;
  1253. }
  1254. eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1255. if (!eeprom_res) {
  1256. dev_info(&pdev->dev, "No EEPROM resource found.\n");
  1257. return -ENODEV;
  1258. }
  1259. irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1260. if (!irq_res) {
  1261. dev_err(&pdev->dev, "No IRQ resource found.\n");
  1262. return -ENODEV;
  1263. }
  1264. ndev = alloc_etherdev(sizeof(*priv));
  1265. if (!ndev)
  1266. return -ENOMEM;
  1267. priv = netdev_priv(ndev);
  1268. spin_lock_init(&priv->reglock);
  1269. SET_NETDEV_DEV(ndev, &pdev->dev);
  1270. platform_set_drvdata(pdev, priv);
  1271. ndev->irq = irq_res->start;
  1272. priv->dev = &pdev->dev;
  1273. priv->ndev = ndev;
  1274. priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
  1275. NETIF_MSG_LINK | NETIF_MSG_PROBE;
  1276. priv->phy_interface = device_get_phy_mode(&pdev->dev);
  1277. if ((int)priv->phy_interface < 0) {
  1278. dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
  1279. ret = -ENODEV;
  1280. goto free_ndev;
  1281. }
  1282. priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
  1283. resource_size(mmio_res));
  1284. if (!priv->ioaddr) {
  1285. dev_err(&pdev->dev, "devm_ioremap() failed\n");
  1286. ret = -ENXIO;
  1287. goto free_ndev;
  1288. }
  1289. priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
  1290. resource_size(eeprom_res));
  1291. if (!priv->eeprom_base) {
  1292. dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
  1293. ret = -ENXIO;
  1294. goto free_ndev;
  1295. }
  1296. mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
  1297. if (mac)
  1298. ether_addr_copy(ndev->dev_addr, mac);
  1299. if (priv->eeprom_base &&
  1300. (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
  1301. void __iomem *macp = priv->eeprom_base +
  1302. NETSEC_EEPROM_MAC_ADDRESS;
  1303. ndev->dev_addr[0] = readb(macp + 3);
  1304. ndev->dev_addr[1] = readb(macp + 2);
  1305. ndev->dev_addr[2] = readb(macp + 1);
  1306. ndev->dev_addr[3] = readb(macp + 0);
  1307. ndev->dev_addr[4] = readb(macp + 7);
  1308. ndev->dev_addr[5] = readb(macp + 6);
  1309. }
  1310. if (!is_valid_ether_addr(ndev->dev_addr)) {
  1311. dev_warn(&pdev->dev, "No MAC address found, using random\n");
  1312. eth_hw_addr_random(ndev);
  1313. }
  1314. if (dev_of_node(&pdev->dev))
  1315. ret = netsec_of_probe(pdev, priv, &phy_addr);
  1316. else
  1317. ret = netsec_acpi_probe(pdev, priv, &phy_addr);
  1318. if (ret)
  1319. goto free_ndev;
  1320. priv->phy_addr = phy_addr;
  1321. if (!priv->freq) {
  1322. dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
  1323. ret = -ENODEV;
  1324. goto free_ndev;
  1325. }
  1326. /* default for throughput */
  1327. priv->et_coalesce.rx_coalesce_usecs = 500;
  1328. priv->et_coalesce.rx_max_coalesced_frames = 8;
  1329. priv->et_coalesce.tx_coalesce_usecs = 500;
  1330. priv->et_coalesce.tx_max_coalesced_frames = 8;
  1331. ret = device_property_read_u32(&pdev->dev, "max-frame-size",
  1332. &ndev->max_mtu);
  1333. if (ret < 0)
  1334. ndev->max_mtu = ETH_DATA_LEN;
  1335. /* runtime_pm coverage just for probe, open/close also cover it */
  1336. pm_runtime_enable(&pdev->dev);
  1337. pm_runtime_get_sync(&pdev->dev);
  1338. hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
  1339. /* this driver only supports F_TAIKI style NETSEC */
  1340. if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
  1341. NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
  1342. ret = -ENODEV;
  1343. goto pm_disable;
  1344. }
  1345. dev_info(&pdev->dev, "hardware revision %d.%d\n",
  1346. hw_ver >> 16, hw_ver & 0xffff);
  1347. netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
  1348. ndev->netdev_ops = &netsec_netdev_ops;
  1349. ndev->ethtool_ops = &netsec_ethtool_ops;
  1350. ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
  1351. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1352. ndev->hw_features = ndev->features;
  1353. priv->rx_cksum_offload_flag = true;
  1354. ret = netsec_register_mdio(priv, phy_addr);
  1355. if (ret)
  1356. goto unreg_napi;
  1357. if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
  1358. dev_warn(&pdev->dev, "Failed to set DMA mask\n");
  1359. ret = register_netdev(ndev);
  1360. if (ret) {
  1361. netif_err(priv, probe, ndev, "register_netdev() failed\n");
  1362. goto unreg_mii;
  1363. }
  1364. pm_runtime_put_sync(&pdev->dev);
  1365. return 0;
  1366. unreg_mii:
  1367. netsec_unregister_mdio(priv);
  1368. unreg_napi:
  1369. netif_napi_del(&priv->napi);
  1370. pm_disable:
  1371. pm_runtime_put_sync(&pdev->dev);
  1372. pm_runtime_disable(&pdev->dev);
  1373. free_ndev:
  1374. free_netdev(ndev);
  1375. dev_err(&pdev->dev, "init failed\n");
  1376. return ret;
  1377. }
  1378. static int netsec_remove(struct platform_device *pdev)
  1379. {
  1380. struct netsec_priv *priv = platform_get_drvdata(pdev);
  1381. unregister_netdev(priv->ndev);
  1382. netsec_unregister_mdio(priv);
  1383. netif_napi_del(&priv->napi);
  1384. pm_runtime_disable(&pdev->dev);
  1385. free_netdev(priv->ndev);
  1386. return 0;
  1387. }
  1388. #ifdef CONFIG_PM
  1389. static int netsec_runtime_suspend(struct device *dev)
  1390. {
  1391. struct netsec_priv *priv = dev_get_drvdata(dev);
  1392. netsec_write(priv, NETSEC_REG_CLK_EN, 0);
  1393. clk_disable_unprepare(priv->clk);
  1394. return 0;
  1395. }
  1396. static int netsec_runtime_resume(struct device *dev)
  1397. {
  1398. struct netsec_priv *priv = dev_get_drvdata(dev);
  1399. clk_prepare_enable(priv->clk);
  1400. netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
  1401. NETSEC_CLK_EN_REG_DOM_C |
  1402. NETSEC_CLK_EN_REG_DOM_G);
  1403. return 0;
  1404. }
  1405. #endif
  1406. static const struct dev_pm_ops netsec_pm_ops = {
  1407. SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
  1408. };
  1409. static const struct of_device_id netsec_dt_ids[] = {
  1410. { .compatible = "socionext,synquacer-netsec" },
  1411. { }
  1412. };
  1413. MODULE_DEVICE_TABLE(of, netsec_dt_ids);
  1414. #ifdef CONFIG_ACPI
  1415. static const struct acpi_device_id netsec_acpi_ids[] = {
  1416. { "SCX0001" },
  1417. { }
  1418. };
  1419. MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
  1420. #endif
  1421. static struct platform_driver netsec_driver = {
  1422. .probe = netsec_probe,
  1423. .remove = netsec_remove,
  1424. .driver = {
  1425. .name = "netsec",
  1426. .pm = &netsec_pm_ops,
  1427. .of_match_table = netsec_dt_ids,
  1428. .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
  1429. },
  1430. };
  1431. module_platform_driver(netsec_driver);
  1432. MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
  1433. MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
  1434. MODULE_DESCRIPTION("NETSEC Ethernet driver");
  1435. MODULE_LICENSE("GPL");