lantiq_gswip.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Lantiq / Intel GSWIP switch driver for VRX200 SoCs
  4. *
  5. * Copyright (C) 2010 Lantiq Deutschland
  6. * Copyright (C) 2012 John Crispin <john@phrozen.org>
  7. * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
  8. *
  9. * The VLAN and bridge model the GSWIP hardware uses does not directly
  10. * matches the model DSA uses.
  11. *
  12. * The hardware has 64 possible table entries for bridges with one VLAN
  13. * ID, one flow id and a list of ports for each bridge. All entries which
  14. * match the same flow ID are combined in the mac learning table, they
  15. * act as one global bridge.
  16. * The hardware does not support VLAN filter on the port, but on the
  17. * bridge, this driver converts the DSA model to the hardware.
  18. *
  19. * The CPU gets all the exception frames which do not match any forwarding
  20. * rule and the CPU port is also added to all bridges. This makes it possible
  21. * to handle all the special cases easily in software.
  22. * At the initialization the driver allocates one bridge table entry for
  23. * each switch port which is used when the port is used without an
  24. * explicit bridge. This prevents the frames from being forwarded
  25. * between all LAN ports by default.
  26. */
  27. #include <linux/clk.h>
  28. #include <linux/delay.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/firmware.h>
  31. #include <linux/if_bridge.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/iopoll.h>
  34. #include <linux/mfd/syscon.h>
  35. #include <linux/module.h>
  36. #include <linux/of_mdio.h>
  37. #include <linux/of_net.h>
  38. #include <linux/of_platform.h>
  39. #include <linux/phy.h>
  40. #include <linux/phylink.h>
  41. #include <linux/platform_device.h>
  42. #include <linux/regmap.h>
  43. #include <linux/reset.h>
  44. #include <net/dsa.h>
  45. #include <dt-bindings/mips/lantiq_rcu_gphy.h>
  46. #include "lantiq_pce.h"
  47. /* GSWIP MDIO Registers */
  48. #define GSWIP_MDIO_GLOB 0x00
  49. #define GSWIP_MDIO_GLOB_ENABLE BIT(15)
  50. #define GSWIP_MDIO_CTRL 0x08
  51. #define GSWIP_MDIO_CTRL_BUSY BIT(12)
  52. #define GSWIP_MDIO_CTRL_RD BIT(11)
  53. #define GSWIP_MDIO_CTRL_WR BIT(10)
  54. #define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
  55. #define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
  56. #define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
  57. #define GSWIP_MDIO_READ 0x09
  58. #define GSWIP_MDIO_WRITE 0x0A
  59. #define GSWIP_MDIO_MDC_CFG0 0x0B
  60. #define GSWIP_MDIO_MDC_CFG1 0x0C
  61. #define GSWIP_MDIO_PHYp(p) (0x15 - (p))
  62. #define GSWIP_MDIO_PHY_LINK_MASK 0x6000
  63. #define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
  64. #define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
  65. #define GSWIP_MDIO_PHY_LINK_UP 0x2000
  66. #define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
  67. #define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
  68. #define GSWIP_MDIO_PHY_SPEED_M10 0x0000
  69. #define GSWIP_MDIO_PHY_SPEED_M100 0x0800
  70. #define GSWIP_MDIO_PHY_SPEED_G1 0x1000
  71. #define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
  72. #define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
  73. #define GSWIP_MDIO_PHY_FDUP_EN 0x0200
  74. #define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
  75. #define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
  76. #define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
  77. #define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
  78. #define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
  79. #define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
  80. #define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
  81. #define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
  82. #define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
  83. #define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
  84. #define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
  85. GSWIP_MDIO_PHY_FCONRX_MASK | \
  86. GSWIP_MDIO_PHY_FCONTX_MASK | \
  87. GSWIP_MDIO_PHY_LINK_MASK | \
  88. GSWIP_MDIO_PHY_SPEED_MASK | \
  89. GSWIP_MDIO_PHY_FDUP_MASK)
  90. /* GSWIP MII Registers */
  91. #define GSWIP_MII_CFGp(p) (0x2 * (p))
  92. #define GSWIP_MII_CFG_RESET BIT(15)
  93. #define GSWIP_MII_CFG_EN BIT(14)
  94. #define GSWIP_MII_CFG_ISOLATE BIT(13)
  95. #define GSWIP_MII_CFG_LDCLKDIS BIT(12)
  96. #define GSWIP_MII_CFG_RGMII_IBS BIT(8)
  97. #define GSWIP_MII_CFG_RMII_CLK BIT(7)
  98. #define GSWIP_MII_CFG_MODE_MIIP 0x0
  99. #define GSWIP_MII_CFG_MODE_MIIM 0x1
  100. #define GSWIP_MII_CFG_MODE_RMIIP 0x2
  101. #define GSWIP_MII_CFG_MODE_RMIIM 0x3
  102. #define GSWIP_MII_CFG_MODE_RGMII 0x4
  103. #define GSWIP_MII_CFG_MODE_MASK 0xf
  104. #define GSWIP_MII_CFG_RATE_M2P5 0x00
  105. #define GSWIP_MII_CFG_RATE_M25 0x10
  106. #define GSWIP_MII_CFG_RATE_M125 0x20
  107. #define GSWIP_MII_CFG_RATE_M50 0x30
  108. #define GSWIP_MII_CFG_RATE_AUTO 0x40
  109. #define GSWIP_MII_CFG_RATE_MASK 0x70
  110. #define GSWIP_MII_PCDU0 0x01
  111. #define GSWIP_MII_PCDU1 0x03
  112. #define GSWIP_MII_PCDU5 0x05
  113. #define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
  114. #define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
  115. /* GSWIP Core Registers */
  116. #define GSWIP_SWRES 0x000
  117. #define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
  118. #define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
  119. #define GSWIP_VERSION 0x013
  120. #define GSWIP_VERSION_REV_SHIFT 0
  121. #define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
  122. #define GSWIP_VERSION_MOD_SHIFT 8
  123. #define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
  124. #define GSWIP_VERSION_2_0 0x100
  125. #define GSWIP_VERSION_2_1 0x021
  126. #define GSWIP_VERSION_2_2 0x122
  127. #define GSWIP_VERSION_2_2_ETC 0x022
  128. #define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
  129. #define GSWIP_BM_RAM_ADDR 0x044
  130. #define GSWIP_BM_RAM_CTRL 0x045
  131. #define GSWIP_BM_RAM_CTRL_BAS BIT(15)
  132. #define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
  133. #define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
  134. #define GSWIP_BM_QUEUE_GCTRL 0x04A
  135. #define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
  136. /* buffer management Port Configuration Register */
  137. #define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
  138. #define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
  139. #define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
  140. /* buffer management Port Control Register */
  141. #define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
  142. #define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
  143. #define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
  144. /* PCE */
  145. #define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
  146. #define GSWIP_PCE_TBL_MASK 0x448
  147. #define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
  148. #define GSWIP_PCE_TBL_ADDR 0x44E
  149. #define GSWIP_PCE_TBL_CTRL 0x44F
  150. #define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
  151. #define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
  152. #define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
  153. #define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
  154. #define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
  155. #define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
  156. #define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
  157. #define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
  158. #define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
  159. #define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
  160. #define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
  161. #define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
  162. #define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
  163. #define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
  164. #define GSWIP_PCE_GCTRL_0 0x456
  165. #define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
  166. #define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
  167. #define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
  168. #define GSWIP_PCE_GCTRL_1 0x457
  169. #define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
  170. #define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
  171. #define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
  172. #define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
  173. #define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
  174. #define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
  175. #define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
  176. #define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
  177. #define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
  178. #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
  179. #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
  180. #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
  181. #define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
  182. #define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
  183. #define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
  184. #define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
  185. #define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
  186. #define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
  187. #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
  188. #define GSWIP_MAC_FLEN 0x8C5
  189. #define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
  190. #define GSWIP_MAC_CTRL_0_PADEN BIT(8)
  191. #define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
  192. #define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
  193. #define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
  194. #define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
  195. #define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
  196. #define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
  197. #define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
  198. #define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
  199. #define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
  200. #define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
  201. #define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
  202. #define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
  203. #define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
  204. #define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
  205. #define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
  206. #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
  207. #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
  208. /* Ethernet Switch Fetch DMA Port Control Register */
  209. #define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
  210. #define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
  211. #define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
  212. #define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
  213. #define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
  214. #define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
  215. #define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
  216. #define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
  217. #define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
  218. /* Ethernet Switch Store DMA Port Control Register */
  219. #define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
  220. #define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
  221. #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
  222. #define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
  223. #define GSWIP_TABLE_ACTIVE_VLAN 0x01
  224. #define GSWIP_TABLE_VLAN_MAPPING 0x02
  225. #define GSWIP_TABLE_MAC_BRIDGE 0x0b
  226. #define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
  227. #define XRX200_GPHY_FW_ALIGN (16 * 1024)
  228. struct gswip_hw_info {
  229. int max_ports;
  230. int cpu_port;
  231. };
  232. struct xway_gphy_match_data {
  233. char *fe_firmware_name;
  234. char *ge_firmware_name;
  235. };
  236. struct gswip_gphy_fw {
  237. struct clk *clk_gate;
  238. struct reset_control *reset;
  239. u32 fw_addr_offset;
  240. char *fw_name;
  241. };
  242. struct gswip_vlan {
  243. struct net_device *bridge;
  244. u16 vid;
  245. u8 fid;
  246. };
  247. struct gswip_priv {
  248. __iomem void *gswip;
  249. __iomem void *mdio;
  250. __iomem void *mii;
  251. const struct gswip_hw_info *hw_info;
  252. const struct xway_gphy_match_data *gphy_fw_name_cfg;
  253. struct dsa_switch *ds;
  254. struct device *dev;
  255. struct regmap *rcu_regmap;
  256. struct gswip_vlan vlans[64];
  257. int num_gphy_fw;
  258. struct gswip_gphy_fw *gphy_fw;
  259. u32 port_vlan_filter;
  260. };
  261. struct gswip_pce_table_entry {
  262. u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
  263. u16 table; // PCE_TBL_CTRL.ADDR = pData->table
  264. u16 key[8];
  265. u16 val[5];
  266. u16 mask;
  267. u8 gmap;
  268. bool type;
  269. bool valid;
  270. bool key_mode;
  271. };
  272. struct gswip_rmon_cnt_desc {
  273. unsigned int size;
  274. unsigned int offset;
  275. const char *name;
  276. };
  277. #define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
  278. static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
  279. /** Receive Packet Count (only packets that are accepted and not discarded). */
  280. MIB_DESC(1, 0x1F, "RxGoodPkts"),
  281. MIB_DESC(1, 0x23, "RxUnicastPkts"),
  282. MIB_DESC(1, 0x22, "RxMulticastPkts"),
  283. MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
  284. MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
  285. MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
  286. MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
  287. MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
  288. MIB_DESC(1, 0x20, "RxGoodPausePkts"),
  289. MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
  290. MIB_DESC(1, 0x12, "Rx64BytePkts"),
  291. MIB_DESC(1, 0x13, "Rx127BytePkts"),
  292. MIB_DESC(1, 0x14, "Rx255BytePkts"),
  293. MIB_DESC(1, 0x15, "Rx511BytePkts"),
  294. MIB_DESC(1, 0x16, "Rx1023BytePkts"),
  295. /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
  296. MIB_DESC(1, 0x17, "RxMaxBytePkts"),
  297. MIB_DESC(1, 0x18, "RxDroppedPkts"),
  298. MIB_DESC(1, 0x19, "RxFilteredPkts"),
  299. MIB_DESC(2, 0x24, "RxGoodBytes"),
  300. MIB_DESC(2, 0x26, "RxBadBytes"),
  301. MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
  302. MIB_DESC(1, 0x0C, "TxGoodPkts"),
  303. MIB_DESC(1, 0x06, "TxUnicastPkts"),
  304. MIB_DESC(1, 0x07, "TxMulticastPkts"),
  305. MIB_DESC(1, 0x00, "Tx64BytePkts"),
  306. MIB_DESC(1, 0x01, "Tx127BytePkts"),
  307. MIB_DESC(1, 0x02, "Tx255BytePkts"),
  308. MIB_DESC(1, 0x03, "Tx511BytePkts"),
  309. MIB_DESC(1, 0x04, "Tx1023BytePkts"),
  310. /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
  311. MIB_DESC(1, 0x05, "TxMaxBytePkts"),
  312. MIB_DESC(1, 0x08, "TxSingleCollCount"),
  313. MIB_DESC(1, 0x09, "TxMultCollCount"),
  314. MIB_DESC(1, 0x0A, "TxLateCollCount"),
  315. MIB_DESC(1, 0x0B, "TxExcessCollCount"),
  316. MIB_DESC(1, 0x0D, "TxPauseCount"),
  317. MIB_DESC(1, 0x10, "TxDroppedPkts"),
  318. MIB_DESC(2, 0x0E, "TxGoodBytes"),
  319. };
  320. static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
  321. {
  322. return __raw_readl(priv->gswip + (offset * 4));
  323. }
  324. static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
  325. {
  326. __raw_writel(val, priv->gswip + (offset * 4));
  327. }
  328. static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
  329. u32 offset)
  330. {
  331. u32 val = gswip_switch_r(priv, offset);
  332. val &= ~(clear);
  333. val |= set;
  334. gswip_switch_w(priv, val, offset);
  335. }
  336. static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
  337. u32 cleared)
  338. {
  339. u32 val;
  340. return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
  341. (val & cleared) == 0, 20, 50000);
  342. }
  343. static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
  344. {
  345. return __raw_readl(priv->mdio + (offset * 4));
  346. }
  347. static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
  348. {
  349. __raw_writel(val, priv->mdio + (offset * 4));
  350. }
  351. static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
  352. u32 offset)
  353. {
  354. u32 val = gswip_mdio_r(priv, offset);
  355. val &= ~(clear);
  356. val |= set;
  357. gswip_mdio_w(priv, val, offset);
  358. }
  359. static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
  360. {
  361. return __raw_readl(priv->mii + (offset * 4));
  362. }
  363. static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
  364. {
  365. __raw_writel(val, priv->mii + (offset * 4));
  366. }
  367. static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
  368. u32 offset)
  369. {
  370. u32 val = gswip_mii_r(priv, offset);
  371. val &= ~(clear);
  372. val |= set;
  373. gswip_mii_w(priv, val, offset);
  374. }
  375. static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
  376. int port)
  377. {
  378. /* There's no MII_CFG register for the CPU port */
  379. if (!dsa_is_cpu_port(priv->ds, port))
  380. gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
  381. }
  382. static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
  383. int port)
  384. {
  385. switch (port) {
  386. case 0:
  387. gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
  388. break;
  389. case 1:
  390. gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
  391. break;
  392. case 5:
  393. gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
  394. break;
  395. }
  396. }
  397. static int gswip_mdio_poll(struct gswip_priv *priv)
  398. {
  399. int cnt = 100;
  400. while (likely(cnt--)) {
  401. u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
  402. if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
  403. return 0;
  404. usleep_range(20, 40);
  405. }
  406. return -ETIMEDOUT;
  407. }
  408. static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
  409. {
  410. struct gswip_priv *priv = bus->priv;
  411. int err;
  412. err = gswip_mdio_poll(priv);
  413. if (err) {
  414. dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
  415. return err;
  416. }
  417. gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
  418. gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
  419. ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
  420. (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
  421. GSWIP_MDIO_CTRL);
  422. return 0;
  423. }
  424. static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
  425. {
  426. struct gswip_priv *priv = bus->priv;
  427. int err;
  428. err = gswip_mdio_poll(priv);
  429. if (err) {
  430. dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
  431. return err;
  432. }
  433. gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
  434. ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
  435. (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
  436. GSWIP_MDIO_CTRL);
  437. err = gswip_mdio_poll(priv);
  438. if (err) {
  439. dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
  440. return err;
  441. }
  442. return gswip_mdio_r(priv, GSWIP_MDIO_READ);
  443. }
  444. static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
  445. {
  446. struct dsa_switch *ds = priv->ds;
  447. ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
  448. if (!ds->slave_mii_bus)
  449. return -ENOMEM;
  450. ds->slave_mii_bus->priv = priv;
  451. ds->slave_mii_bus->read = gswip_mdio_rd;
  452. ds->slave_mii_bus->write = gswip_mdio_wr;
  453. ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
  454. snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
  455. dev_name(priv->dev));
  456. ds->slave_mii_bus->parent = priv->dev;
  457. ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
  458. return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
  459. }
  460. static int gswip_pce_table_entry_read(struct gswip_priv *priv,
  461. struct gswip_pce_table_entry *tbl)
  462. {
  463. int i;
  464. int err;
  465. u16 crtl;
  466. u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
  467. GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
  468. err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  469. GSWIP_PCE_TBL_CTRL_BAS);
  470. if (err)
  471. return err;
  472. gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
  473. gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
  474. GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
  475. tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
  476. GSWIP_PCE_TBL_CTRL);
  477. err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  478. GSWIP_PCE_TBL_CTRL_BAS);
  479. if (err)
  480. return err;
  481. for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
  482. tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
  483. for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
  484. tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
  485. tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
  486. crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
  487. tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
  488. tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
  489. tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
  490. return 0;
  491. }
  492. static int gswip_pce_table_entry_write(struct gswip_priv *priv,
  493. struct gswip_pce_table_entry *tbl)
  494. {
  495. int i;
  496. int err;
  497. u16 crtl;
  498. u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
  499. GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
  500. err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  501. GSWIP_PCE_TBL_CTRL_BAS);
  502. if (err)
  503. return err;
  504. gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
  505. gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
  506. GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
  507. tbl->table | addr_mode,
  508. GSWIP_PCE_TBL_CTRL);
  509. for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
  510. gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
  511. for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
  512. gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
  513. gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
  514. GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
  515. tbl->table | addr_mode,
  516. GSWIP_PCE_TBL_CTRL);
  517. gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
  518. crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
  519. crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
  520. GSWIP_PCE_TBL_CTRL_GMAP_MASK);
  521. if (tbl->type)
  522. crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
  523. if (tbl->valid)
  524. crtl |= GSWIP_PCE_TBL_CTRL_VLD;
  525. crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
  526. crtl |= GSWIP_PCE_TBL_CTRL_BAS;
  527. gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
  528. return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  529. GSWIP_PCE_TBL_CTRL_BAS);
  530. }
  531. /* Add the LAN port into a bridge with the CPU port by
  532. * default. This prevents automatic forwarding of
  533. * packages between the LAN ports when no explicit
  534. * bridge is configured.
  535. */
  536. static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
  537. {
  538. struct gswip_pce_table_entry vlan_active = {0,};
  539. struct gswip_pce_table_entry vlan_mapping = {0,};
  540. unsigned int cpu_port = priv->hw_info->cpu_port;
  541. unsigned int max_ports = priv->hw_info->max_ports;
  542. int err;
  543. if (port >= max_ports) {
  544. dev_err(priv->dev, "single port for %i supported\n", port);
  545. return -EIO;
  546. }
  547. vlan_active.index = port + 1;
  548. vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
  549. vlan_active.key[0] = 0; /* vid */
  550. vlan_active.val[0] = port + 1 /* fid */;
  551. vlan_active.valid = add;
  552. err = gswip_pce_table_entry_write(priv, &vlan_active);
  553. if (err) {
  554. dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
  555. return err;
  556. }
  557. if (!add)
  558. return 0;
  559. vlan_mapping.index = port + 1;
  560. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  561. vlan_mapping.val[0] = 0 /* vid */;
  562. vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
  563. vlan_mapping.val[2] = 0;
  564. err = gswip_pce_table_entry_write(priv, &vlan_mapping);
  565. if (err) {
  566. dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
  567. return err;
  568. }
  569. return 0;
  570. }
  571. static int gswip_port_enable(struct dsa_switch *ds, int port,
  572. struct phy_device *phydev)
  573. {
  574. struct gswip_priv *priv = ds->priv;
  575. int err;
  576. if (!dsa_is_user_port(ds, port))
  577. return 0;
  578. if (!dsa_is_cpu_port(ds, port)) {
  579. err = gswip_add_single_port_br(priv, port, true);
  580. if (err)
  581. return err;
  582. }
  583. /* RMON Counter Enable for port */
  584. gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
  585. /* enable port fetch/store dma & VLAN Modification */
  586. gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
  587. GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
  588. GSWIP_FDMA_PCTRLp(port));
  589. gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
  590. GSWIP_SDMA_PCTRLp(port));
  591. if (!dsa_is_cpu_port(ds, port)) {
  592. u32 mdio_phy = 0;
  593. if (phydev)
  594. mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
  595. gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
  596. GSWIP_MDIO_PHYp(port));
  597. }
  598. return 0;
  599. }
  600. static void gswip_port_disable(struct dsa_switch *ds, int port)
  601. {
  602. struct gswip_priv *priv = ds->priv;
  603. if (!dsa_is_user_port(ds, port))
  604. return;
  605. gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
  606. GSWIP_FDMA_PCTRLp(port));
  607. gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
  608. GSWIP_SDMA_PCTRLp(port));
  609. }
  610. static int gswip_pce_load_microcode(struct gswip_priv *priv)
  611. {
  612. int i;
  613. int err;
  614. gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
  615. GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
  616. GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
  617. gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
  618. for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
  619. gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
  620. gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
  621. GSWIP_PCE_TBL_VAL(0));
  622. gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
  623. GSWIP_PCE_TBL_VAL(1));
  624. gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
  625. GSWIP_PCE_TBL_VAL(2));
  626. gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
  627. GSWIP_PCE_TBL_VAL(3));
  628. /* start the table access: */
  629. gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
  630. GSWIP_PCE_TBL_CTRL);
  631. err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  632. GSWIP_PCE_TBL_CTRL_BAS);
  633. if (err)
  634. return err;
  635. }
  636. /* tell the switch that the microcode is loaded */
  637. gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
  638. GSWIP_PCE_GCTRL_0);
  639. return 0;
  640. }
  641. static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
  642. bool vlan_filtering)
  643. {
  644. struct gswip_priv *priv = ds->priv;
  645. struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
  646. /* Do not allow changing the VLAN filtering options while in bridge */
  647. if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering && bridge)
  648. return -EIO;
  649. if (vlan_filtering) {
  650. /* Use port based VLAN tag */
  651. gswip_switch_mask(priv,
  652. GSWIP_PCE_VCTRL_VSR,
  653. GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
  654. GSWIP_PCE_VCTRL_VEMR,
  655. GSWIP_PCE_VCTRL(port));
  656. gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
  657. GSWIP_PCE_PCTRL_0p(port));
  658. } else {
  659. /* Use port based VLAN tag */
  660. gswip_switch_mask(priv,
  661. GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
  662. GSWIP_PCE_VCTRL_VEMR,
  663. GSWIP_PCE_VCTRL_VSR,
  664. GSWIP_PCE_VCTRL(port));
  665. gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
  666. GSWIP_PCE_PCTRL_0p(port));
  667. }
  668. return 0;
  669. }
  670. static int gswip_setup(struct dsa_switch *ds)
  671. {
  672. struct gswip_priv *priv = ds->priv;
  673. unsigned int cpu_port = priv->hw_info->cpu_port;
  674. int i;
  675. int err;
  676. gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
  677. usleep_range(5000, 10000);
  678. gswip_switch_w(priv, 0, GSWIP_SWRES);
  679. /* disable port fetch/store dma on all ports */
  680. for (i = 0; i < priv->hw_info->max_ports; i++) {
  681. gswip_port_disable(ds, i);
  682. gswip_port_vlan_filtering(ds, i, false);
  683. }
  684. /* enable Switch */
  685. gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
  686. err = gswip_pce_load_microcode(priv);
  687. if (err) {
  688. dev_err(priv->dev, "writing PCE microcode failed, %i", err);
  689. return err;
  690. }
  691. /* Default unknown Broadcast/Multicast/Unicast port maps */
  692. gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
  693. gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
  694. gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
  695. /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
  696. * interoperability problem with this auto polling mechanism because
  697. * their status registers think that the link is in a different state
  698. * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
  699. * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
  700. * auto polling state machine consider the link being negotiated with
  701. * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
  702. * to the switch port being completely dead (RX and TX are both not
  703. * working).
  704. * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
  705. * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
  706. * it would work fine for a few minutes to hours and then stop, on
  707. * other device it would no traffic could be sent or received at all.
  708. * Testing shows that when PHY auto polling is disabled these problems
  709. * go away.
  710. */
  711. gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
  712. /* Configure the MDIO Clock 2.5 MHz */
  713. gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
  714. /* Disable the xMII interface and clear it's isolation bit */
  715. for (i = 0; i < priv->hw_info->max_ports; i++)
  716. gswip_mii_mask_cfg(priv,
  717. GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
  718. 0, i);
  719. /* enable special tag insertion on cpu port */
  720. gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
  721. GSWIP_FDMA_PCTRLp(cpu_port));
  722. /* accept special tag in ingress direction */
  723. gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
  724. GSWIP_PCE_PCTRL_0p(cpu_port));
  725. gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
  726. GSWIP_MAC_CTRL_2p(cpu_port));
  727. gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
  728. GSWIP_MAC_FLEN);
  729. gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
  730. GSWIP_BM_QUEUE_GCTRL);
  731. /* VLAN aware Switching */
  732. gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
  733. /* Flush MAC Table */
  734. gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
  735. err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
  736. GSWIP_PCE_GCTRL_0_MTFL);
  737. if (err) {
  738. dev_err(priv->dev, "MAC flushing didn't finish\n");
  739. return err;
  740. }
  741. gswip_port_enable(ds, cpu_port, NULL);
  742. return 0;
  743. }
  744. static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
  745. int port)
  746. {
  747. return DSA_TAG_PROTO_GSWIP;
  748. }
  749. static int gswip_vlan_active_create(struct gswip_priv *priv,
  750. struct net_device *bridge,
  751. int fid, u16 vid)
  752. {
  753. struct gswip_pce_table_entry vlan_active = {0,};
  754. unsigned int max_ports = priv->hw_info->max_ports;
  755. int idx = -1;
  756. int err;
  757. int i;
  758. /* Look for a free slot */
  759. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  760. if (!priv->vlans[i].bridge) {
  761. idx = i;
  762. break;
  763. }
  764. }
  765. if (idx == -1)
  766. return -ENOSPC;
  767. if (fid == -1)
  768. fid = idx;
  769. vlan_active.index = idx;
  770. vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
  771. vlan_active.key[0] = vid;
  772. vlan_active.val[0] = fid;
  773. vlan_active.valid = true;
  774. err = gswip_pce_table_entry_write(priv, &vlan_active);
  775. if (err) {
  776. dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
  777. return err;
  778. }
  779. priv->vlans[idx].bridge = bridge;
  780. priv->vlans[idx].vid = vid;
  781. priv->vlans[idx].fid = fid;
  782. return idx;
  783. }
  784. static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
  785. {
  786. struct gswip_pce_table_entry vlan_active = {0,};
  787. int err;
  788. vlan_active.index = idx;
  789. vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
  790. vlan_active.valid = false;
  791. err = gswip_pce_table_entry_write(priv, &vlan_active);
  792. if (err)
  793. dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
  794. priv->vlans[idx].bridge = NULL;
  795. return err;
  796. }
  797. static int gswip_vlan_add_unaware(struct gswip_priv *priv,
  798. struct net_device *bridge, int port)
  799. {
  800. struct gswip_pce_table_entry vlan_mapping = {0,};
  801. unsigned int max_ports = priv->hw_info->max_ports;
  802. unsigned int cpu_port = priv->hw_info->cpu_port;
  803. bool active_vlan_created = false;
  804. int idx = -1;
  805. int i;
  806. int err;
  807. /* Check if there is already a page for this bridge */
  808. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  809. if (priv->vlans[i].bridge == bridge) {
  810. idx = i;
  811. break;
  812. }
  813. }
  814. /* If this bridge is not programmed yet, add a Active VLAN table
  815. * entry in a free slot and prepare the VLAN mapping table entry.
  816. */
  817. if (idx == -1) {
  818. idx = gswip_vlan_active_create(priv, bridge, -1, 0);
  819. if (idx < 0)
  820. return idx;
  821. active_vlan_created = true;
  822. vlan_mapping.index = idx;
  823. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  824. /* VLAN ID byte, maps to the VLAN ID of vlan active table */
  825. vlan_mapping.val[0] = 0;
  826. } else {
  827. /* Read the existing VLAN mapping entry from the switch */
  828. vlan_mapping.index = idx;
  829. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  830. err = gswip_pce_table_entry_read(priv, &vlan_mapping);
  831. if (err) {
  832. dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
  833. err);
  834. return err;
  835. }
  836. }
  837. /* Update the VLAN mapping entry and write it to the switch */
  838. vlan_mapping.val[1] |= BIT(cpu_port);
  839. vlan_mapping.val[1] |= BIT(port);
  840. err = gswip_pce_table_entry_write(priv, &vlan_mapping);
  841. if (err) {
  842. dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
  843. /* In case an Active VLAN was creaetd delete it again */
  844. if (active_vlan_created)
  845. gswip_vlan_active_remove(priv, idx);
  846. return err;
  847. }
  848. gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
  849. return 0;
  850. }
  851. static int gswip_vlan_add_aware(struct gswip_priv *priv,
  852. struct net_device *bridge, int port,
  853. u16 vid, bool untagged,
  854. bool pvid)
  855. {
  856. struct gswip_pce_table_entry vlan_mapping = {0,};
  857. unsigned int max_ports = priv->hw_info->max_ports;
  858. unsigned int cpu_port = priv->hw_info->cpu_port;
  859. bool active_vlan_created = false;
  860. int idx = -1;
  861. int fid = -1;
  862. int i;
  863. int err;
  864. /* Check if there is already a page for this bridge */
  865. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  866. if (priv->vlans[i].bridge == bridge) {
  867. if (fid != -1 && fid != priv->vlans[i].fid)
  868. dev_err(priv->dev, "one bridge with multiple flow ids\n");
  869. fid = priv->vlans[i].fid;
  870. if (priv->vlans[i].vid == vid) {
  871. idx = i;
  872. break;
  873. }
  874. }
  875. }
  876. /* If this bridge is not programmed yet, add a Active VLAN table
  877. * entry in a free slot and prepare the VLAN mapping table entry.
  878. */
  879. if (idx == -1) {
  880. idx = gswip_vlan_active_create(priv, bridge, fid, vid);
  881. if (idx < 0)
  882. return idx;
  883. active_vlan_created = true;
  884. vlan_mapping.index = idx;
  885. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  886. /* VLAN ID byte, maps to the VLAN ID of vlan active table */
  887. vlan_mapping.val[0] = vid;
  888. } else {
  889. /* Read the existing VLAN mapping entry from the switch */
  890. vlan_mapping.index = idx;
  891. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  892. err = gswip_pce_table_entry_read(priv, &vlan_mapping);
  893. if (err) {
  894. dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
  895. err);
  896. return err;
  897. }
  898. }
  899. vlan_mapping.val[0] = vid;
  900. /* Update the VLAN mapping entry and write it to the switch */
  901. vlan_mapping.val[1] |= BIT(cpu_port);
  902. vlan_mapping.val[2] |= BIT(cpu_port);
  903. vlan_mapping.val[1] |= BIT(port);
  904. if (untagged)
  905. vlan_mapping.val[2] &= ~BIT(port);
  906. else
  907. vlan_mapping.val[2] |= BIT(port);
  908. err = gswip_pce_table_entry_write(priv, &vlan_mapping);
  909. if (err) {
  910. dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
  911. /* In case an Active VLAN was creaetd delete it again */
  912. if (active_vlan_created)
  913. gswip_vlan_active_remove(priv, idx);
  914. return err;
  915. }
  916. if (pvid)
  917. gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
  918. return 0;
  919. }
  920. static int gswip_vlan_remove(struct gswip_priv *priv,
  921. struct net_device *bridge, int port,
  922. u16 vid, bool pvid, bool vlan_aware)
  923. {
  924. struct gswip_pce_table_entry vlan_mapping = {0,};
  925. unsigned int max_ports = priv->hw_info->max_ports;
  926. unsigned int cpu_port = priv->hw_info->cpu_port;
  927. int idx = -1;
  928. int i;
  929. int err;
  930. /* Check if there is already a page for this bridge */
  931. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  932. if (priv->vlans[i].bridge == bridge &&
  933. (!vlan_aware || priv->vlans[i].vid == vid)) {
  934. idx = i;
  935. break;
  936. }
  937. }
  938. if (idx == -1) {
  939. dev_err(priv->dev, "bridge to leave does not exists\n");
  940. return -ENOENT;
  941. }
  942. vlan_mapping.index = idx;
  943. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  944. err = gswip_pce_table_entry_read(priv, &vlan_mapping);
  945. if (err) {
  946. dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
  947. return err;
  948. }
  949. vlan_mapping.val[1] &= ~BIT(port);
  950. vlan_mapping.val[2] &= ~BIT(port);
  951. err = gswip_pce_table_entry_write(priv, &vlan_mapping);
  952. if (err) {
  953. dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
  954. return err;
  955. }
  956. /* In case all ports are removed from the bridge, remove the VLAN */
  957. if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
  958. err = gswip_vlan_active_remove(priv, idx);
  959. if (err) {
  960. dev_err(priv->dev, "failed to write active VLAN: %d\n",
  961. err);
  962. return err;
  963. }
  964. }
  965. /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
  966. if (pvid)
  967. gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
  968. return 0;
  969. }
  970. static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
  971. struct net_device *bridge)
  972. {
  973. struct gswip_priv *priv = ds->priv;
  974. int err;
  975. /* When the bridge uses VLAN filtering we have to configure VLAN
  976. * specific bridges. No bridge is configured here.
  977. */
  978. if (!br_vlan_enabled(bridge)) {
  979. err = gswip_vlan_add_unaware(priv, bridge, port);
  980. if (err)
  981. return err;
  982. priv->port_vlan_filter &= ~BIT(port);
  983. } else {
  984. priv->port_vlan_filter |= BIT(port);
  985. }
  986. return gswip_add_single_port_br(priv, port, false);
  987. }
  988. static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
  989. struct net_device *bridge)
  990. {
  991. struct gswip_priv *priv = ds->priv;
  992. gswip_add_single_port_br(priv, port, true);
  993. /* When the bridge uses VLAN filtering we have to configure VLAN
  994. * specific bridges. No bridge is configured here.
  995. */
  996. if (!br_vlan_enabled(bridge))
  997. gswip_vlan_remove(priv, bridge, port, 0, true, false);
  998. }
  999. static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
  1000. const struct switchdev_obj_port_vlan *vlan)
  1001. {
  1002. struct gswip_priv *priv = ds->priv;
  1003. struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
  1004. unsigned int max_ports = priv->hw_info->max_ports;
  1005. u16 vid;
  1006. int i;
  1007. int pos = max_ports;
  1008. /* We only support VLAN filtering on bridges */
  1009. if (!dsa_is_cpu_port(ds, port) && !bridge)
  1010. return -EOPNOTSUPP;
  1011. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1012. int idx = -1;
  1013. /* Check if there is already a page for this VLAN */
  1014. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  1015. if (priv->vlans[i].bridge == bridge &&
  1016. priv->vlans[i].vid == vid) {
  1017. idx = i;
  1018. break;
  1019. }
  1020. }
  1021. /* If this VLAN is not programmed yet, we have to reserve
  1022. * one entry in the VLAN table. Make sure we start at the
  1023. * next position round.
  1024. */
  1025. if (idx == -1) {
  1026. /* Look for a free slot */
  1027. for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
  1028. if (!priv->vlans[pos].bridge) {
  1029. idx = pos;
  1030. pos++;
  1031. break;
  1032. }
  1033. }
  1034. if (idx == -1)
  1035. return -ENOSPC;
  1036. }
  1037. }
  1038. return 0;
  1039. }
  1040. static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
  1041. const struct switchdev_obj_port_vlan *vlan)
  1042. {
  1043. struct gswip_priv *priv = ds->priv;
  1044. struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
  1045. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1046. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1047. u16 vid;
  1048. /* We have to receive all packets on the CPU port and should not
  1049. * do any VLAN filtering here. This is also called with bridge
  1050. * NULL and then we do not know for which bridge to configure
  1051. * this.
  1052. */
  1053. if (dsa_is_cpu_port(ds, port))
  1054. return;
  1055. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
  1056. gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid);
  1057. }
  1058. static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
  1059. const struct switchdev_obj_port_vlan *vlan)
  1060. {
  1061. struct gswip_priv *priv = ds->priv;
  1062. struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
  1063. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1064. u16 vid;
  1065. int err;
  1066. /* We have to receive all packets on the CPU port and should not
  1067. * do any VLAN filtering here. This is also called with bridge
  1068. * NULL and then we do not know for which bridge to configure
  1069. * this.
  1070. */
  1071. if (dsa_is_cpu_port(ds, port))
  1072. return 0;
  1073. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1074. err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true);
  1075. if (err)
  1076. return err;
  1077. }
  1078. return 0;
  1079. }
  1080. static void gswip_port_fast_age(struct dsa_switch *ds, int port)
  1081. {
  1082. struct gswip_priv *priv = ds->priv;
  1083. struct gswip_pce_table_entry mac_bridge = {0,};
  1084. int i;
  1085. int err;
  1086. for (i = 0; i < 2048; i++) {
  1087. mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
  1088. mac_bridge.index = i;
  1089. err = gswip_pce_table_entry_read(priv, &mac_bridge);
  1090. if (err) {
  1091. dev_err(priv->dev, "failed to read mac bridge: %d\n",
  1092. err);
  1093. return;
  1094. }
  1095. if (!mac_bridge.valid)
  1096. continue;
  1097. if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
  1098. continue;
  1099. if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
  1100. continue;
  1101. mac_bridge.valid = false;
  1102. err = gswip_pce_table_entry_write(priv, &mac_bridge);
  1103. if (err) {
  1104. dev_err(priv->dev, "failed to write mac bridge: %d\n",
  1105. err);
  1106. return;
  1107. }
  1108. }
  1109. }
  1110. static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
  1111. {
  1112. struct gswip_priv *priv = ds->priv;
  1113. u32 stp_state;
  1114. switch (state) {
  1115. case BR_STATE_DISABLED:
  1116. gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
  1117. GSWIP_SDMA_PCTRLp(port));
  1118. return;
  1119. case BR_STATE_BLOCKING:
  1120. case BR_STATE_LISTENING:
  1121. stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
  1122. break;
  1123. case BR_STATE_LEARNING:
  1124. stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
  1125. break;
  1126. case BR_STATE_FORWARDING:
  1127. stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
  1128. break;
  1129. default:
  1130. dev_err(priv->dev, "invalid STP state: %d\n", state);
  1131. return;
  1132. }
  1133. gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
  1134. GSWIP_SDMA_PCTRLp(port));
  1135. gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
  1136. GSWIP_PCE_PCTRL_0p(port));
  1137. }
  1138. static int gswip_port_fdb(struct dsa_switch *ds, int port,
  1139. const unsigned char *addr, u16 vid, bool add)
  1140. {
  1141. struct gswip_priv *priv = ds->priv;
  1142. struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
  1143. struct gswip_pce_table_entry mac_bridge = {0,};
  1144. unsigned int cpu_port = priv->hw_info->cpu_port;
  1145. int fid = -1;
  1146. int i;
  1147. int err;
  1148. if (!bridge)
  1149. return -EINVAL;
  1150. for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
  1151. if (priv->vlans[i].bridge == bridge) {
  1152. fid = priv->vlans[i].fid;
  1153. break;
  1154. }
  1155. }
  1156. if (fid == -1) {
  1157. dev_err(priv->dev, "Port not part of a bridge\n");
  1158. return -EINVAL;
  1159. }
  1160. mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
  1161. mac_bridge.key_mode = true;
  1162. mac_bridge.key[0] = addr[5] | (addr[4] << 8);
  1163. mac_bridge.key[1] = addr[3] | (addr[2] << 8);
  1164. mac_bridge.key[2] = addr[1] | (addr[0] << 8);
  1165. mac_bridge.key[3] = fid;
  1166. mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
  1167. mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
  1168. mac_bridge.valid = add;
  1169. err = gswip_pce_table_entry_write(priv, &mac_bridge);
  1170. if (err)
  1171. dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
  1172. return err;
  1173. }
  1174. static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
  1175. const unsigned char *addr, u16 vid)
  1176. {
  1177. return gswip_port_fdb(ds, port, addr, vid, true);
  1178. }
  1179. static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
  1180. const unsigned char *addr, u16 vid)
  1181. {
  1182. return gswip_port_fdb(ds, port, addr, vid, false);
  1183. }
  1184. static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
  1185. dsa_fdb_dump_cb_t *cb, void *data)
  1186. {
  1187. struct gswip_priv *priv = ds->priv;
  1188. struct gswip_pce_table_entry mac_bridge = {0,};
  1189. unsigned char addr[6];
  1190. int i;
  1191. int err;
  1192. for (i = 0; i < 2048; i++) {
  1193. mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
  1194. mac_bridge.index = i;
  1195. err = gswip_pce_table_entry_read(priv, &mac_bridge);
  1196. if (err) {
  1197. dev_err(priv->dev, "failed to write mac bridge: %d\n",
  1198. err);
  1199. return err;
  1200. }
  1201. if (!mac_bridge.valid)
  1202. continue;
  1203. addr[5] = mac_bridge.key[0] & 0xff;
  1204. addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
  1205. addr[3] = mac_bridge.key[1] & 0xff;
  1206. addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
  1207. addr[1] = mac_bridge.key[2] & 0xff;
  1208. addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
  1209. if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
  1210. if (mac_bridge.val[0] & BIT(port)) {
  1211. err = cb(addr, 0, true, data);
  1212. if (err)
  1213. return err;
  1214. }
  1215. } else {
  1216. if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
  1217. err = cb(addr, 0, false, data);
  1218. if (err)
  1219. return err;
  1220. }
  1221. }
  1222. }
  1223. return 0;
  1224. }
  1225. static void gswip_phylink_validate(struct dsa_switch *ds, int port,
  1226. unsigned long *supported,
  1227. struct phylink_link_state *state)
  1228. {
  1229. __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
  1230. switch (port) {
  1231. case 0:
  1232. case 1:
  1233. if (!phy_interface_mode_is_rgmii(state->interface) &&
  1234. state->interface != PHY_INTERFACE_MODE_MII &&
  1235. state->interface != PHY_INTERFACE_MODE_REVMII &&
  1236. state->interface != PHY_INTERFACE_MODE_RMII)
  1237. goto unsupported;
  1238. break;
  1239. case 2:
  1240. case 3:
  1241. case 4:
  1242. if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
  1243. goto unsupported;
  1244. break;
  1245. case 5:
  1246. if (!phy_interface_mode_is_rgmii(state->interface) &&
  1247. state->interface != PHY_INTERFACE_MODE_INTERNAL)
  1248. goto unsupported;
  1249. break;
  1250. default:
  1251. bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
  1252. dev_err(ds->dev, "Unsupported port: %i\n", port);
  1253. return;
  1254. }
  1255. /* Allow all the expected bits */
  1256. phylink_set(mask, Autoneg);
  1257. phylink_set_port_modes(mask);
  1258. phylink_set(mask, Pause);
  1259. phylink_set(mask, Asym_Pause);
  1260. /* With the exclusion of MII, Reverse MII and Reduced MII, we
  1261. * support Gigabit, including Half duplex
  1262. */
  1263. if (state->interface != PHY_INTERFACE_MODE_MII &&
  1264. state->interface != PHY_INTERFACE_MODE_REVMII &&
  1265. state->interface != PHY_INTERFACE_MODE_RMII) {
  1266. phylink_set(mask, 1000baseT_Full);
  1267. phylink_set(mask, 1000baseT_Half);
  1268. }
  1269. phylink_set(mask, 10baseT_Half);
  1270. phylink_set(mask, 10baseT_Full);
  1271. phylink_set(mask, 100baseT_Half);
  1272. phylink_set(mask, 100baseT_Full);
  1273. bitmap_and(supported, supported, mask,
  1274. __ETHTOOL_LINK_MODE_MASK_NBITS);
  1275. bitmap_and(state->advertising, state->advertising, mask,
  1276. __ETHTOOL_LINK_MODE_MASK_NBITS);
  1277. return;
  1278. unsupported:
  1279. bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
  1280. dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
  1281. phy_modes(state->interface), port);
  1282. return;
  1283. }
  1284. static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
  1285. {
  1286. u32 mdio_phy;
  1287. if (link)
  1288. mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
  1289. else
  1290. mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
  1291. gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
  1292. GSWIP_MDIO_PHYp(port));
  1293. }
  1294. static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
  1295. phy_interface_t interface)
  1296. {
  1297. u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
  1298. switch (speed) {
  1299. case SPEED_10:
  1300. mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
  1301. if (interface == PHY_INTERFACE_MODE_RMII)
  1302. mii_cfg = GSWIP_MII_CFG_RATE_M50;
  1303. else
  1304. mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
  1305. mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
  1306. break;
  1307. case SPEED_100:
  1308. mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
  1309. if (interface == PHY_INTERFACE_MODE_RMII)
  1310. mii_cfg = GSWIP_MII_CFG_RATE_M50;
  1311. else
  1312. mii_cfg = GSWIP_MII_CFG_RATE_M25;
  1313. mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
  1314. break;
  1315. case SPEED_1000:
  1316. mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
  1317. mii_cfg = GSWIP_MII_CFG_RATE_M125;
  1318. mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
  1319. break;
  1320. }
  1321. gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
  1322. GSWIP_MDIO_PHYp(port));
  1323. gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
  1324. gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
  1325. GSWIP_MAC_CTRL_0p(port));
  1326. }
  1327. static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
  1328. {
  1329. u32 mac_ctrl_0, mdio_phy;
  1330. if (duplex == DUPLEX_FULL) {
  1331. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
  1332. mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
  1333. } else {
  1334. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
  1335. mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
  1336. }
  1337. gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
  1338. GSWIP_MAC_CTRL_0p(port));
  1339. gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
  1340. GSWIP_MDIO_PHYp(port));
  1341. }
  1342. static void gswip_port_set_pause(struct gswip_priv *priv, int port,
  1343. bool tx_pause, bool rx_pause)
  1344. {
  1345. u32 mac_ctrl_0, mdio_phy;
  1346. if (tx_pause && rx_pause) {
  1347. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
  1348. mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
  1349. GSWIP_MDIO_PHY_FCONRX_EN;
  1350. } else if (tx_pause) {
  1351. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
  1352. mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
  1353. GSWIP_MDIO_PHY_FCONRX_DIS;
  1354. } else if (rx_pause) {
  1355. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
  1356. mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
  1357. GSWIP_MDIO_PHY_FCONRX_EN;
  1358. } else {
  1359. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
  1360. mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
  1361. GSWIP_MDIO_PHY_FCONRX_DIS;
  1362. }
  1363. gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
  1364. mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
  1365. gswip_mdio_mask(priv,
  1366. GSWIP_MDIO_PHY_FCONTX_MASK |
  1367. GSWIP_MDIO_PHY_FCONRX_MASK,
  1368. mdio_phy, GSWIP_MDIO_PHYp(port));
  1369. }
  1370. static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
  1371. unsigned int mode,
  1372. const struct phylink_link_state *state)
  1373. {
  1374. struct gswip_priv *priv = ds->priv;
  1375. u32 miicfg = 0;
  1376. miicfg |= GSWIP_MII_CFG_LDCLKDIS;
  1377. switch (state->interface) {
  1378. case PHY_INTERFACE_MODE_MII:
  1379. case PHY_INTERFACE_MODE_INTERNAL:
  1380. miicfg |= GSWIP_MII_CFG_MODE_MIIM;
  1381. break;
  1382. case PHY_INTERFACE_MODE_REVMII:
  1383. miicfg |= GSWIP_MII_CFG_MODE_MIIP;
  1384. break;
  1385. case PHY_INTERFACE_MODE_RMII:
  1386. miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
  1387. /* Configure the RMII clock as output: */
  1388. miicfg |= GSWIP_MII_CFG_RMII_CLK;
  1389. break;
  1390. case PHY_INTERFACE_MODE_RGMII:
  1391. case PHY_INTERFACE_MODE_RGMII_ID:
  1392. case PHY_INTERFACE_MODE_RGMII_RXID:
  1393. case PHY_INTERFACE_MODE_RGMII_TXID:
  1394. miicfg |= GSWIP_MII_CFG_MODE_RGMII;
  1395. break;
  1396. default:
  1397. dev_err(ds->dev,
  1398. "Unsupported interface: %d\n", state->interface);
  1399. return;
  1400. }
  1401. gswip_mii_mask_cfg(priv,
  1402. GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
  1403. GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
  1404. miicfg, port);
  1405. gswip_port_set_speed(priv, port, state->speed, state->interface);
  1406. gswip_port_set_duplex(priv, port, state->duplex);
  1407. gswip_port_set_pause(priv, port, !!(state->pause & MLO_PAUSE_TX),
  1408. !!(state->pause & MLO_PAUSE_RX));
  1409. switch (state->interface) {
  1410. case PHY_INTERFACE_MODE_RGMII_ID:
  1411. gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
  1412. GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
  1413. break;
  1414. case PHY_INTERFACE_MODE_RGMII_RXID:
  1415. gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
  1416. break;
  1417. case PHY_INTERFACE_MODE_RGMII_TXID:
  1418. gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
  1419. break;
  1420. default:
  1421. break;
  1422. }
  1423. }
  1424. static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
  1425. unsigned int mode,
  1426. phy_interface_t interface)
  1427. {
  1428. struct gswip_priv *priv = ds->priv;
  1429. gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
  1430. if (!dsa_is_cpu_port(ds, port))
  1431. gswip_port_set_link(priv, port, false);
  1432. }
  1433. static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
  1434. unsigned int mode,
  1435. phy_interface_t interface,
  1436. struct phy_device *phydev)
  1437. {
  1438. struct gswip_priv *priv = ds->priv;
  1439. if (!dsa_is_cpu_port(ds, port))
  1440. gswip_port_set_link(priv, port, true);
  1441. gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
  1442. }
  1443. static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
  1444. uint8_t *data)
  1445. {
  1446. int i;
  1447. if (stringset != ETH_SS_STATS)
  1448. return;
  1449. for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
  1450. strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
  1451. ETH_GSTRING_LEN);
  1452. }
  1453. static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
  1454. u32 index)
  1455. {
  1456. u32 result;
  1457. int err;
  1458. gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
  1459. gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
  1460. GSWIP_BM_RAM_CTRL_OPMOD,
  1461. table | GSWIP_BM_RAM_CTRL_BAS,
  1462. GSWIP_BM_RAM_CTRL);
  1463. err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
  1464. GSWIP_BM_RAM_CTRL_BAS);
  1465. if (err) {
  1466. dev_err(priv->dev, "timeout while reading table: %u, index: %u",
  1467. table, index);
  1468. return 0;
  1469. }
  1470. result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
  1471. result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
  1472. return result;
  1473. }
  1474. static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
  1475. uint64_t *data)
  1476. {
  1477. struct gswip_priv *priv = ds->priv;
  1478. const struct gswip_rmon_cnt_desc *rmon_cnt;
  1479. int i;
  1480. u64 high;
  1481. for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
  1482. rmon_cnt = &gswip_rmon_cnt[i];
  1483. data[i] = gswip_bcm_ram_entry_read(priv, port,
  1484. rmon_cnt->offset);
  1485. if (rmon_cnt->size == 2) {
  1486. high = gswip_bcm_ram_entry_read(priv, port,
  1487. rmon_cnt->offset + 1);
  1488. data[i] |= high << 32;
  1489. }
  1490. }
  1491. }
  1492. static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
  1493. {
  1494. if (sset != ETH_SS_STATS)
  1495. return 0;
  1496. return ARRAY_SIZE(gswip_rmon_cnt);
  1497. }
  1498. static const struct dsa_switch_ops gswip_switch_ops = {
  1499. .get_tag_protocol = gswip_get_tag_protocol,
  1500. .setup = gswip_setup,
  1501. .port_enable = gswip_port_enable,
  1502. .port_disable = gswip_port_disable,
  1503. .port_bridge_join = gswip_port_bridge_join,
  1504. .port_bridge_leave = gswip_port_bridge_leave,
  1505. .port_fast_age = gswip_port_fast_age,
  1506. .port_vlan_filtering = gswip_port_vlan_filtering,
  1507. .port_vlan_prepare = gswip_port_vlan_prepare,
  1508. .port_vlan_add = gswip_port_vlan_add,
  1509. .port_vlan_del = gswip_port_vlan_del,
  1510. .port_stp_state_set = gswip_port_stp_state_set,
  1511. .port_fdb_add = gswip_port_fdb_add,
  1512. .port_fdb_del = gswip_port_fdb_del,
  1513. .port_fdb_dump = gswip_port_fdb_dump,
  1514. .phylink_validate = gswip_phylink_validate,
  1515. .phylink_mac_config = gswip_phylink_mac_config,
  1516. .phylink_mac_link_down = gswip_phylink_mac_link_down,
  1517. .phylink_mac_link_up = gswip_phylink_mac_link_up,
  1518. .get_strings = gswip_get_strings,
  1519. .get_ethtool_stats = gswip_get_ethtool_stats,
  1520. .get_sset_count = gswip_get_sset_count,
  1521. };
  1522. static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
  1523. .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
  1524. .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
  1525. };
  1526. static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
  1527. .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
  1528. .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
  1529. };
  1530. static const struct xway_gphy_match_data xrx300_gphy_data = {
  1531. .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
  1532. .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
  1533. };
  1534. static const struct of_device_id xway_gphy_match[] = {
  1535. { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
  1536. { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
  1537. { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
  1538. { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
  1539. { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
  1540. {},
  1541. };
  1542. static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
  1543. {
  1544. struct device *dev = priv->dev;
  1545. const struct firmware *fw;
  1546. void *fw_addr;
  1547. dma_addr_t dma_addr;
  1548. dma_addr_t dev_addr;
  1549. size_t size;
  1550. int ret;
  1551. ret = clk_prepare_enable(gphy_fw->clk_gate);
  1552. if (ret)
  1553. return ret;
  1554. reset_control_assert(gphy_fw->reset);
  1555. ret = request_firmware(&fw, gphy_fw->fw_name, dev);
  1556. if (ret) {
  1557. dev_err(dev, "failed to load firmware: %s, error: %i\n",
  1558. gphy_fw->fw_name, ret);
  1559. return ret;
  1560. }
  1561. /* GPHY cores need the firmware code in a persistent and contiguous
  1562. * memory area with a 16 kB boundary aligned start address.
  1563. */
  1564. size = fw->size + XRX200_GPHY_FW_ALIGN;
  1565. fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
  1566. if (fw_addr) {
  1567. fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
  1568. dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
  1569. memcpy(fw_addr, fw->data, fw->size);
  1570. } else {
  1571. dev_err(dev, "failed to alloc firmware memory\n");
  1572. release_firmware(fw);
  1573. return -ENOMEM;
  1574. }
  1575. release_firmware(fw);
  1576. ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
  1577. if (ret)
  1578. return ret;
  1579. reset_control_deassert(gphy_fw->reset);
  1580. return ret;
  1581. }
  1582. static int gswip_gphy_fw_probe(struct gswip_priv *priv,
  1583. struct gswip_gphy_fw *gphy_fw,
  1584. struct device_node *gphy_fw_np, int i)
  1585. {
  1586. struct device *dev = priv->dev;
  1587. u32 gphy_mode;
  1588. int ret;
  1589. char gphyname[10];
  1590. snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
  1591. gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
  1592. if (IS_ERR(gphy_fw->clk_gate)) {
  1593. dev_err(dev, "Failed to lookup gate clock\n");
  1594. return PTR_ERR(gphy_fw->clk_gate);
  1595. }
  1596. ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
  1597. if (ret)
  1598. return ret;
  1599. ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
  1600. /* Default to GE mode */
  1601. if (ret)
  1602. gphy_mode = GPHY_MODE_GE;
  1603. switch (gphy_mode) {
  1604. case GPHY_MODE_FE:
  1605. gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
  1606. break;
  1607. case GPHY_MODE_GE:
  1608. gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
  1609. break;
  1610. default:
  1611. dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
  1612. return -EINVAL;
  1613. }
  1614. gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
  1615. if (IS_ERR(gphy_fw->reset)) {
  1616. if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
  1617. dev_err(dev, "Failed to lookup gphy reset\n");
  1618. return PTR_ERR(gphy_fw->reset);
  1619. }
  1620. return gswip_gphy_fw_load(priv, gphy_fw);
  1621. }
  1622. static void gswip_gphy_fw_remove(struct gswip_priv *priv,
  1623. struct gswip_gphy_fw *gphy_fw)
  1624. {
  1625. int ret;
  1626. /* check if the device was fully probed */
  1627. if (!gphy_fw->fw_name)
  1628. return;
  1629. ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
  1630. if (ret)
  1631. dev_err(priv->dev, "can not reset GPHY FW pointer");
  1632. clk_disable_unprepare(gphy_fw->clk_gate);
  1633. reset_control_put(gphy_fw->reset);
  1634. }
  1635. static int gswip_gphy_fw_list(struct gswip_priv *priv,
  1636. struct device_node *gphy_fw_list_np, u32 version)
  1637. {
  1638. struct device *dev = priv->dev;
  1639. struct device_node *gphy_fw_np;
  1640. const struct of_device_id *match;
  1641. int err;
  1642. int i = 0;
  1643. /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
  1644. * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
  1645. * needs a different GPHY firmware.
  1646. */
  1647. if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
  1648. switch (version) {
  1649. case GSWIP_VERSION_2_0:
  1650. priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
  1651. break;
  1652. case GSWIP_VERSION_2_1:
  1653. priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
  1654. break;
  1655. default:
  1656. dev_err(dev, "unknown GSWIP version: 0x%x", version);
  1657. return -ENOENT;
  1658. }
  1659. }
  1660. match = of_match_node(xway_gphy_match, gphy_fw_list_np);
  1661. if (match && match->data)
  1662. priv->gphy_fw_name_cfg = match->data;
  1663. if (!priv->gphy_fw_name_cfg) {
  1664. dev_err(dev, "GPHY compatible type not supported");
  1665. return -ENOENT;
  1666. }
  1667. priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
  1668. if (!priv->num_gphy_fw)
  1669. return -ENOENT;
  1670. priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
  1671. "lantiq,rcu");
  1672. if (IS_ERR(priv->rcu_regmap))
  1673. return PTR_ERR(priv->rcu_regmap);
  1674. priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
  1675. sizeof(*priv->gphy_fw),
  1676. GFP_KERNEL | __GFP_ZERO);
  1677. if (!priv->gphy_fw)
  1678. return -ENOMEM;
  1679. for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
  1680. err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
  1681. gphy_fw_np, i);
  1682. if (err)
  1683. goto remove_gphy;
  1684. i++;
  1685. }
  1686. /* The standalone PHY11G requires 300ms to be fully
  1687. * initialized and ready for any MDIO communication after being
  1688. * taken out of reset. For the SoC-internal GPHY variant there
  1689. * is no (known) documentation for the minimum time after a
  1690. * reset. Use the same value as for the standalone variant as
  1691. * some users have reported internal PHYs not being detected
  1692. * without any delay.
  1693. */
  1694. msleep(300);
  1695. return 0;
  1696. remove_gphy:
  1697. for (i = 0; i < priv->num_gphy_fw; i++)
  1698. gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
  1699. return err;
  1700. }
  1701. static int gswip_probe(struct platform_device *pdev)
  1702. {
  1703. struct gswip_priv *priv;
  1704. struct device_node *mdio_np, *gphy_fw_np;
  1705. struct device *dev = &pdev->dev;
  1706. int err;
  1707. int i;
  1708. u32 version;
  1709. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1710. if (!priv)
  1711. return -ENOMEM;
  1712. priv->gswip = devm_platform_ioremap_resource(pdev, 0);
  1713. if (IS_ERR(priv->gswip))
  1714. return PTR_ERR(priv->gswip);
  1715. priv->mdio = devm_platform_ioremap_resource(pdev, 1);
  1716. if (IS_ERR(priv->mdio))
  1717. return PTR_ERR(priv->mdio);
  1718. priv->mii = devm_platform_ioremap_resource(pdev, 2);
  1719. if (IS_ERR(priv->mii))
  1720. return PTR_ERR(priv->mii);
  1721. priv->hw_info = of_device_get_match_data(dev);
  1722. if (!priv->hw_info)
  1723. return -EINVAL;
  1724. priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports);
  1725. if (!priv->ds)
  1726. return -ENOMEM;
  1727. priv->ds->priv = priv;
  1728. priv->ds->ops = &gswip_switch_ops;
  1729. priv->dev = dev;
  1730. version = gswip_switch_r(priv, GSWIP_VERSION);
  1731. /* bring up the mdio bus */
  1732. gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
  1733. if (gphy_fw_np) {
  1734. err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
  1735. of_node_put(gphy_fw_np);
  1736. if (err) {
  1737. dev_err(dev, "gphy fw probe failed\n");
  1738. return err;
  1739. }
  1740. }
  1741. /* bring up the mdio bus */
  1742. mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
  1743. if (mdio_np) {
  1744. err = gswip_mdio(priv, mdio_np);
  1745. if (err) {
  1746. dev_err(dev, "mdio probe failed\n");
  1747. goto put_mdio_node;
  1748. }
  1749. }
  1750. err = dsa_register_switch(priv->ds);
  1751. if (err) {
  1752. dev_err(dev, "dsa switch register failed: %i\n", err);
  1753. goto mdio_bus;
  1754. }
  1755. if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
  1756. dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
  1757. priv->hw_info->cpu_port);
  1758. err = -EINVAL;
  1759. goto disable_switch;
  1760. }
  1761. platform_set_drvdata(pdev, priv);
  1762. dev_info(dev, "probed GSWIP version %lx mod %lx\n",
  1763. (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
  1764. (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
  1765. return 0;
  1766. disable_switch:
  1767. gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
  1768. dsa_unregister_switch(priv->ds);
  1769. mdio_bus:
  1770. if (mdio_np)
  1771. mdiobus_unregister(priv->ds->slave_mii_bus);
  1772. put_mdio_node:
  1773. of_node_put(mdio_np);
  1774. for (i = 0; i < priv->num_gphy_fw; i++)
  1775. gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
  1776. return err;
  1777. }
  1778. static int gswip_remove(struct platform_device *pdev)
  1779. {
  1780. struct gswip_priv *priv = platform_get_drvdata(pdev);
  1781. int i;
  1782. /* disable the switch */
  1783. gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
  1784. dsa_unregister_switch(priv->ds);
  1785. if (priv->ds->slave_mii_bus) {
  1786. mdiobus_unregister(priv->ds->slave_mii_bus);
  1787. of_node_put(priv->ds->slave_mii_bus->dev.of_node);
  1788. }
  1789. for (i = 0; i < priv->num_gphy_fw; i++)
  1790. gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
  1791. return 0;
  1792. }
  1793. static const struct gswip_hw_info gswip_xrx200 = {
  1794. .max_ports = 7,
  1795. .cpu_port = 6,
  1796. };
  1797. static const struct of_device_id gswip_of_match[] = {
  1798. { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
  1799. {},
  1800. };
  1801. MODULE_DEVICE_TABLE(of, gswip_of_match);
  1802. static struct platform_driver gswip_driver = {
  1803. .probe = gswip_probe,
  1804. .remove = gswip_remove,
  1805. .driver = {
  1806. .name = "gswip",
  1807. .of_match_table = gswip_of_match,
  1808. },
  1809. };
  1810. module_platform_driver(gswip_driver);
  1811. MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
  1812. MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
  1813. MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
  1814. MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
  1815. MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
  1816. MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
  1817. MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
  1818. MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
  1819. MODULE_LICENSE("GPL v2");