netcp_ethss.c 107 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886
  1. /*
  2. * Keystone GBE and XGBE subsystem code
  3. *
  4. * Copyright (C) 2014 Texas Instruments Incorporated
  5. * Authors: Sandeep Nair <sandeep_n@ti.com>
  6. * Sandeep Paulraj <s-paulraj@ti.com>
  7. * Cyril Chemparathy <cyril@ti.com>
  8. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  9. * Wingman Kwok <w-kwok2@ti.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation version 2.
  14. *
  15. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  16. * kind, whether express or implied; without even the implied warranty
  17. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/io.h>
  21. #include <linux/module.h>
  22. #include <linux/of_mdio.h>
  23. #include <linux/of_net.h>
  24. #include <linux/of_address.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/ptp_classify.h>
  27. #include <linux/net_tstamp.h>
  28. #include <linux/ethtool.h>
  29. #include "cpsw.h"
  30. #include "cpsw_ale.h"
  31. #include "netcp.h"
  32. #include "cpts.h"
  33. #define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
  34. #define NETCP_DRIVER_VERSION "v1.0"
  35. #define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
  36. #define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
  37. #define GBE_MINOR_VERSION(reg) (reg & 0xff)
  38. #define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
  39. /* 1G Ethernet SS defines */
  40. #define GBE_MODULE_NAME "netcp-gbe"
  41. #define GBE_SS_VERSION_14 0x4ed2
  42. #define GBE_SS_REG_INDEX 0
  43. #define GBE_SGMII34_REG_INDEX 1
  44. #define GBE_SM_REG_INDEX 2
  45. /* offset relative to base of GBE_SS_REG_INDEX */
  46. #define GBE13_SGMII_MODULE_OFFSET 0x100
  47. /* offset relative to base of GBE_SM_REG_INDEX */
  48. #define GBE13_HOST_PORT_OFFSET 0x34
  49. #define GBE13_SLAVE_PORT_OFFSET 0x60
  50. #define GBE13_EMAC_OFFSET 0x100
  51. #define GBE13_SLAVE_PORT2_OFFSET 0x200
  52. #define GBE13_HW_STATS_OFFSET 0x300
  53. #define GBE13_CPTS_OFFSET 0x500
  54. #define GBE13_ALE_OFFSET 0x600
  55. #define GBE13_HOST_PORT_NUM 0
  56. #define GBE13_NUM_ALE_ENTRIES 1024
  57. /* 1G Ethernet NU SS defines */
  58. #define GBENU_MODULE_NAME "netcp-gbenu"
  59. #define GBE_SS_ID_NU 0x4ee6
  60. #define GBE_SS_ID_2U 0x4ee8
  61. #define IS_SS_ID_MU(d) \
  62. ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
  63. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
  64. #define IS_SS_ID_NU(d) \
  65. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
  66. #define IS_SS_ID_VER_14(d) \
  67. (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
  68. #define IS_SS_ID_2U(d) \
  69. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
  70. #define GBENU_SS_REG_INDEX 0
  71. #define GBENU_SM_REG_INDEX 1
  72. #define GBENU_SGMII_MODULE_OFFSET 0x100
  73. #define GBENU_HOST_PORT_OFFSET 0x1000
  74. #define GBENU_SLAVE_PORT_OFFSET 0x2000
  75. #define GBENU_EMAC_OFFSET 0x2330
  76. #define GBENU_HW_STATS_OFFSET 0x1a000
  77. #define GBENU_CPTS_OFFSET 0x1d000
  78. #define GBENU_ALE_OFFSET 0x1e000
  79. #define GBENU_HOST_PORT_NUM 0
  80. #define GBENU_SGMII_MODULE_SIZE 0x100
  81. /* 10G Ethernet SS defines */
  82. #define XGBE_MODULE_NAME "netcp-xgbe"
  83. #define XGBE_SS_VERSION_10 0x4ee4
  84. #define XGBE_SS_REG_INDEX 0
  85. #define XGBE_SM_REG_INDEX 1
  86. #define XGBE_SERDES_REG_INDEX 2
  87. /* offset relative to base of XGBE_SS_REG_INDEX */
  88. #define XGBE10_SGMII_MODULE_OFFSET 0x100
  89. #define IS_SS_ID_XGBE(d) ((d)->ss_version == XGBE_SS_VERSION_10)
  90. /* offset relative to base of XGBE_SM_REG_INDEX */
  91. #define XGBE10_HOST_PORT_OFFSET 0x34
  92. #define XGBE10_SLAVE_PORT_OFFSET 0x64
  93. #define XGBE10_EMAC_OFFSET 0x400
  94. #define XGBE10_CPTS_OFFSET 0x600
  95. #define XGBE10_ALE_OFFSET 0x700
  96. #define XGBE10_HW_STATS_OFFSET 0x800
  97. #define XGBE10_HOST_PORT_NUM 0
  98. #define XGBE10_NUM_ALE_ENTRIES 2048
  99. #define GBE_TIMER_INTERVAL (HZ / 2)
  100. /* Soft reset register values */
  101. #define SOFT_RESET_MASK BIT(0)
  102. #define SOFT_RESET BIT(0)
  103. #define DEVICE_EMACSL_RESET_POLL_COUNT 100
  104. #define GMACSL_RET_WARN_RESET_INCOMPLETE -2
  105. #define MACSL_RX_ENABLE_CSF BIT(23)
  106. #define MACSL_ENABLE_EXT_CTL BIT(18)
  107. #define MACSL_XGMII_ENABLE BIT(13)
  108. #define MACSL_XGIG_MODE BIT(8)
  109. #define MACSL_GIG_MODE BIT(7)
  110. #define MACSL_GMII_ENABLE BIT(5)
  111. #define MACSL_FULLDUPLEX BIT(0)
  112. #define GBE_CTL_P0_ENABLE BIT(2)
  113. #define ETH_SW_CTL_P0_TX_CRC_REMOVE BIT(13)
  114. #define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
  115. #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
  116. #define GBE_STATS_CD_SEL BIT(28)
  117. #define GBE_PORT_MASK(x) (BIT(x) - 1)
  118. #define GBE_MASK_NO_PORTS 0
  119. #define GBE_DEF_1G_MAC_CONTROL \
  120. (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
  121. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  122. #define GBE_DEF_10G_MAC_CONTROL \
  123. (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
  124. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  125. #define GBE_STATSA_MODULE 0
  126. #define GBE_STATSB_MODULE 1
  127. #define GBE_STATSC_MODULE 2
  128. #define GBE_STATSD_MODULE 3
  129. #define GBENU_STATS0_MODULE 0
  130. #define GBENU_STATS1_MODULE 1
  131. #define GBENU_STATS2_MODULE 2
  132. #define GBENU_STATS3_MODULE 3
  133. #define GBENU_STATS4_MODULE 4
  134. #define GBENU_STATS5_MODULE 5
  135. #define GBENU_STATS6_MODULE 6
  136. #define GBENU_STATS7_MODULE 7
  137. #define GBENU_STATS8_MODULE 8
  138. #define XGBE_STATS0_MODULE 0
  139. #define XGBE_STATS1_MODULE 1
  140. #define XGBE_STATS2_MODULE 2
  141. /* s: 0-based slave_port */
  142. #define SGMII_BASE(d, s) \
  143. (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
  144. #define GBE_TX_QUEUE 648
  145. #define GBE_TXHOOK_ORDER 0
  146. #define GBE_RXHOOK_ORDER 0
  147. #define GBE_DEFAULT_ALE_AGEOUT 30
  148. #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
  149. #define SLAVE_LINK_IS_RGMII(s) \
  150. (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
  151. ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
  152. #define SLAVE_LINK_IS_SGMII(s) \
  153. ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
  154. #define NETCP_LINK_STATE_INVALID -1
  155. #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  156. offsetof(struct gbe##_##rb, rn)
  157. #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  158. offsetof(struct gbenu##_##rb, rn)
  159. #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  160. offsetof(struct xgbe##_##rb, rn)
  161. #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
  162. #define HOST_TX_PRI_MAP_DEFAULT 0x00000000
  163. #if IS_ENABLED(CONFIG_TI_CPTS)
  164. /* Px_TS_CTL register fields */
  165. #define TS_RX_ANX_F_EN BIT(0)
  166. #define TS_RX_VLAN_LT1_EN BIT(1)
  167. #define TS_RX_VLAN_LT2_EN BIT(2)
  168. #define TS_RX_ANX_D_EN BIT(3)
  169. #define TS_TX_ANX_F_EN BIT(4)
  170. #define TS_TX_VLAN_LT1_EN BIT(5)
  171. #define TS_TX_VLAN_LT2_EN BIT(6)
  172. #define TS_TX_ANX_D_EN BIT(7)
  173. #define TS_LT2_EN BIT(8)
  174. #define TS_RX_ANX_E_EN BIT(9)
  175. #define TS_TX_ANX_E_EN BIT(10)
  176. #define TS_MSG_TYPE_EN_SHIFT 16
  177. #define TS_MSG_TYPE_EN_MASK 0xffff
  178. /* Px_TS_SEQ_LTYPE register fields */
  179. #define TS_SEQ_ID_OFS_SHIFT 16
  180. #define TS_SEQ_ID_OFS_MASK 0x3f
  181. /* Px_TS_CTL_LTYPE2 register fields */
  182. #define TS_107 BIT(16)
  183. #define TS_129 BIT(17)
  184. #define TS_130 BIT(18)
  185. #define TS_131 BIT(19)
  186. #define TS_132 BIT(20)
  187. #define TS_319 BIT(21)
  188. #define TS_320 BIT(22)
  189. #define TS_TTL_NONZERO BIT(23)
  190. #define TS_UNI_EN BIT(24)
  191. #define TS_UNI_EN_SHIFT 24
  192. #define TS_TX_ANX_ALL_EN \
  193. (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
  194. #define TS_RX_ANX_ALL_EN \
  195. (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
  196. #define TS_CTL_DST_PORT TS_319
  197. #define TS_CTL_DST_PORT_SHIFT 21
  198. #define TS_CTL_MADDR_ALL \
  199. (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
  200. #define TS_CTL_MADDR_SHIFT 16
  201. /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
  202. #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
  203. #endif /* CONFIG_TI_CPTS */
  204. struct xgbe_ss_regs {
  205. u32 id_ver;
  206. u32 synce_count;
  207. u32 synce_mux;
  208. u32 control;
  209. };
  210. struct xgbe_switch_regs {
  211. u32 id_ver;
  212. u32 control;
  213. u32 emcontrol;
  214. u32 stat_port_en;
  215. u32 ptype;
  216. u32 soft_idle;
  217. u32 thru_rate;
  218. u32 gap_thresh;
  219. u32 tx_start_wds;
  220. u32 flow_control;
  221. u32 cppi_thresh;
  222. };
  223. struct xgbe_port_regs {
  224. u32 blk_cnt;
  225. u32 port_vlan;
  226. u32 tx_pri_map;
  227. u32 sa_lo;
  228. u32 sa_hi;
  229. u32 ts_ctl;
  230. u32 ts_seq_ltype;
  231. u32 ts_vlan;
  232. u32 ts_ctl_ltype2;
  233. u32 ts_ctl2;
  234. u32 control;
  235. };
  236. struct xgbe_host_port_regs {
  237. u32 blk_cnt;
  238. u32 port_vlan;
  239. u32 tx_pri_map;
  240. u32 src_id;
  241. u32 rx_pri_map;
  242. u32 rx_maxlen;
  243. };
  244. struct xgbe_emac_regs {
  245. u32 id_ver;
  246. u32 mac_control;
  247. u32 mac_status;
  248. u32 soft_reset;
  249. u32 rx_maxlen;
  250. u32 __reserved_0;
  251. u32 rx_pause;
  252. u32 tx_pause;
  253. u32 em_control;
  254. u32 __reserved_1;
  255. u32 tx_gap;
  256. u32 rsvd[4];
  257. };
  258. struct xgbe_host_hw_stats {
  259. u32 rx_good_frames;
  260. u32 rx_broadcast_frames;
  261. u32 rx_multicast_frames;
  262. u32 __rsvd_0[3];
  263. u32 rx_oversized_frames;
  264. u32 __rsvd_1;
  265. u32 rx_undersized_frames;
  266. u32 __rsvd_2;
  267. u32 overrun_type4;
  268. u32 overrun_type5;
  269. u32 rx_bytes;
  270. u32 tx_good_frames;
  271. u32 tx_broadcast_frames;
  272. u32 tx_multicast_frames;
  273. u32 __rsvd_3[9];
  274. u32 tx_bytes;
  275. u32 tx_64byte_frames;
  276. u32 tx_65_to_127byte_frames;
  277. u32 tx_128_to_255byte_frames;
  278. u32 tx_256_to_511byte_frames;
  279. u32 tx_512_to_1023byte_frames;
  280. u32 tx_1024byte_frames;
  281. u32 net_bytes;
  282. u32 rx_sof_overruns;
  283. u32 rx_mof_overruns;
  284. u32 rx_dma_overruns;
  285. };
  286. struct xgbe_hw_stats {
  287. u32 rx_good_frames;
  288. u32 rx_broadcast_frames;
  289. u32 rx_multicast_frames;
  290. u32 rx_pause_frames;
  291. u32 rx_crc_errors;
  292. u32 rx_align_code_errors;
  293. u32 rx_oversized_frames;
  294. u32 rx_jabber_frames;
  295. u32 rx_undersized_frames;
  296. u32 rx_fragments;
  297. u32 overrun_type4;
  298. u32 overrun_type5;
  299. u32 rx_bytes;
  300. u32 tx_good_frames;
  301. u32 tx_broadcast_frames;
  302. u32 tx_multicast_frames;
  303. u32 tx_pause_frames;
  304. u32 tx_deferred_frames;
  305. u32 tx_collision_frames;
  306. u32 tx_single_coll_frames;
  307. u32 tx_mult_coll_frames;
  308. u32 tx_excessive_collisions;
  309. u32 tx_late_collisions;
  310. u32 tx_underrun;
  311. u32 tx_carrier_sense_errors;
  312. u32 tx_bytes;
  313. u32 tx_64byte_frames;
  314. u32 tx_65_to_127byte_frames;
  315. u32 tx_128_to_255byte_frames;
  316. u32 tx_256_to_511byte_frames;
  317. u32 tx_512_to_1023byte_frames;
  318. u32 tx_1024byte_frames;
  319. u32 net_bytes;
  320. u32 rx_sof_overruns;
  321. u32 rx_mof_overruns;
  322. u32 rx_dma_overruns;
  323. };
  324. struct gbenu_ss_regs {
  325. u32 id_ver;
  326. u32 synce_count; /* NU */
  327. u32 synce_mux; /* NU */
  328. u32 control; /* 2U */
  329. u32 __rsvd_0[2]; /* 2U */
  330. u32 rgmii_status; /* 2U */
  331. u32 ss_status; /* 2U */
  332. };
  333. struct gbenu_switch_regs {
  334. u32 id_ver;
  335. u32 control;
  336. u32 __rsvd_0[2];
  337. u32 emcontrol;
  338. u32 stat_port_en;
  339. u32 ptype; /* NU */
  340. u32 soft_idle;
  341. u32 thru_rate; /* NU */
  342. u32 gap_thresh; /* NU */
  343. u32 tx_start_wds; /* NU */
  344. u32 eee_prescale; /* 2U */
  345. u32 tx_g_oflow_thresh_set; /* NU */
  346. u32 tx_g_oflow_thresh_clr; /* NU */
  347. u32 tx_g_buf_thresh_set_l; /* NU */
  348. u32 tx_g_buf_thresh_set_h; /* NU */
  349. u32 tx_g_buf_thresh_clr_l; /* NU */
  350. u32 tx_g_buf_thresh_clr_h; /* NU */
  351. };
  352. struct gbenu_port_regs {
  353. u32 __rsvd_0;
  354. u32 control;
  355. u32 max_blks; /* 2U */
  356. u32 mem_align1;
  357. u32 blk_cnt;
  358. u32 port_vlan;
  359. u32 tx_pri_map; /* NU */
  360. u32 pri_ctl; /* 2U */
  361. u32 rx_pri_map;
  362. u32 rx_maxlen;
  363. u32 tx_blks_pri; /* NU */
  364. u32 __rsvd_1;
  365. u32 idle2lpi; /* 2U */
  366. u32 lpi2idle; /* 2U */
  367. u32 eee_status; /* 2U */
  368. u32 __rsvd_2;
  369. u32 __rsvd_3[176]; /* NU: more to add */
  370. u32 __rsvd_4[2];
  371. u32 sa_lo;
  372. u32 sa_hi;
  373. u32 ts_ctl;
  374. u32 ts_seq_ltype;
  375. u32 ts_vlan;
  376. u32 ts_ctl_ltype2;
  377. u32 ts_ctl2;
  378. };
  379. struct gbenu_host_port_regs {
  380. u32 __rsvd_0;
  381. u32 control;
  382. u32 flow_id_offset; /* 2U */
  383. u32 __rsvd_1;
  384. u32 blk_cnt;
  385. u32 port_vlan;
  386. u32 tx_pri_map; /* NU */
  387. u32 pri_ctl;
  388. u32 rx_pri_map;
  389. u32 rx_maxlen;
  390. u32 tx_blks_pri; /* NU */
  391. u32 __rsvd_2;
  392. u32 idle2lpi; /* 2U */
  393. u32 lpi2wake; /* 2U */
  394. u32 eee_status; /* 2U */
  395. u32 __rsvd_3;
  396. u32 __rsvd_4[184]; /* NU */
  397. u32 host_blks_pri; /* NU */
  398. };
  399. struct gbenu_emac_regs {
  400. u32 mac_control;
  401. u32 mac_status;
  402. u32 soft_reset;
  403. u32 boff_test;
  404. u32 rx_pause;
  405. u32 __rsvd_0[11]; /* NU */
  406. u32 tx_pause;
  407. u32 __rsvd_1[11]; /* NU */
  408. u32 em_control;
  409. u32 tx_gap;
  410. };
  411. /* Some hw stat regs are applicable to slave port only.
  412. * This is handled by gbenu_et_stats struct. Also some
  413. * are for SS version NU and some are for 2U.
  414. */
  415. struct gbenu_hw_stats {
  416. u32 rx_good_frames;
  417. u32 rx_broadcast_frames;
  418. u32 rx_multicast_frames;
  419. u32 rx_pause_frames; /* slave */
  420. u32 rx_crc_errors;
  421. u32 rx_align_code_errors; /* slave */
  422. u32 rx_oversized_frames;
  423. u32 rx_jabber_frames; /* slave */
  424. u32 rx_undersized_frames;
  425. u32 rx_fragments; /* slave */
  426. u32 ale_drop;
  427. u32 ale_overrun_drop;
  428. u32 rx_bytes;
  429. u32 tx_good_frames;
  430. u32 tx_broadcast_frames;
  431. u32 tx_multicast_frames;
  432. u32 tx_pause_frames; /* slave */
  433. u32 tx_deferred_frames; /* slave */
  434. u32 tx_collision_frames; /* slave */
  435. u32 tx_single_coll_frames; /* slave */
  436. u32 tx_mult_coll_frames; /* slave */
  437. u32 tx_excessive_collisions; /* slave */
  438. u32 tx_late_collisions; /* slave */
  439. u32 rx_ipg_error; /* slave 10G only */
  440. u32 tx_carrier_sense_errors; /* slave */
  441. u32 tx_bytes;
  442. u32 tx_64B_frames;
  443. u32 tx_65_to_127B_frames;
  444. u32 tx_128_to_255B_frames;
  445. u32 tx_256_to_511B_frames;
  446. u32 tx_512_to_1023B_frames;
  447. u32 tx_1024B_frames;
  448. u32 net_bytes;
  449. u32 rx_bottom_fifo_drop;
  450. u32 rx_port_mask_drop;
  451. u32 rx_top_fifo_drop;
  452. u32 ale_rate_limit_drop;
  453. u32 ale_vid_ingress_drop;
  454. u32 ale_da_eq_sa_drop;
  455. u32 __rsvd_0[3];
  456. u32 ale_unknown_ucast;
  457. u32 ale_unknown_ucast_bytes;
  458. u32 ale_unknown_mcast;
  459. u32 ale_unknown_mcast_bytes;
  460. u32 ale_unknown_bcast;
  461. u32 ale_unknown_bcast_bytes;
  462. u32 ale_pol_match;
  463. u32 ale_pol_match_red; /* NU */
  464. u32 ale_pol_match_yellow; /* NU */
  465. u32 __rsvd_1[44];
  466. u32 tx_mem_protect_err;
  467. /* following NU only */
  468. u32 tx_pri0;
  469. u32 tx_pri1;
  470. u32 tx_pri2;
  471. u32 tx_pri3;
  472. u32 tx_pri4;
  473. u32 tx_pri5;
  474. u32 tx_pri6;
  475. u32 tx_pri7;
  476. u32 tx_pri0_bcnt;
  477. u32 tx_pri1_bcnt;
  478. u32 tx_pri2_bcnt;
  479. u32 tx_pri3_bcnt;
  480. u32 tx_pri4_bcnt;
  481. u32 tx_pri5_bcnt;
  482. u32 tx_pri6_bcnt;
  483. u32 tx_pri7_bcnt;
  484. u32 tx_pri0_drop;
  485. u32 tx_pri1_drop;
  486. u32 tx_pri2_drop;
  487. u32 tx_pri3_drop;
  488. u32 tx_pri4_drop;
  489. u32 tx_pri5_drop;
  490. u32 tx_pri6_drop;
  491. u32 tx_pri7_drop;
  492. u32 tx_pri0_drop_bcnt;
  493. u32 tx_pri1_drop_bcnt;
  494. u32 tx_pri2_drop_bcnt;
  495. u32 tx_pri3_drop_bcnt;
  496. u32 tx_pri4_drop_bcnt;
  497. u32 tx_pri5_drop_bcnt;
  498. u32 tx_pri6_drop_bcnt;
  499. u32 tx_pri7_drop_bcnt;
  500. };
  501. #define GBENU_HW_STATS_REG_MAP_SZ 0x200
  502. struct gbe_ss_regs {
  503. u32 id_ver;
  504. u32 synce_count;
  505. u32 synce_mux;
  506. };
  507. struct gbe_ss_regs_ofs {
  508. u16 id_ver;
  509. u16 control;
  510. u16 rgmii_status; /* 2U */
  511. };
  512. struct gbe_switch_regs {
  513. u32 id_ver;
  514. u32 control;
  515. u32 soft_reset;
  516. u32 stat_port_en;
  517. u32 ptype;
  518. u32 soft_idle;
  519. u32 thru_rate;
  520. u32 gap_thresh;
  521. u32 tx_start_wds;
  522. u32 flow_control;
  523. };
  524. struct gbe_switch_regs_ofs {
  525. u16 id_ver;
  526. u16 control;
  527. u16 soft_reset;
  528. u16 emcontrol;
  529. u16 stat_port_en;
  530. u16 ptype;
  531. u16 flow_control;
  532. };
  533. struct gbe_port_regs {
  534. u32 max_blks;
  535. u32 blk_cnt;
  536. u32 port_vlan;
  537. u32 tx_pri_map;
  538. u32 sa_lo;
  539. u32 sa_hi;
  540. u32 ts_ctl;
  541. u32 ts_seq_ltype;
  542. u32 ts_vlan;
  543. u32 ts_ctl_ltype2;
  544. u32 ts_ctl2;
  545. };
  546. struct gbe_port_regs_ofs {
  547. u16 port_vlan;
  548. u16 tx_pri_map;
  549. u16 rx_pri_map;
  550. u16 sa_lo;
  551. u16 sa_hi;
  552. u16 ts_ctl;
  553. u16 ts_seq_ltype;
  554. u16 ts_vlan;
  555. u16 ts_ctl_ltype2;
  556. u16 ts_ctl2;
  557. u16 rx_maxlen; /* 2U, NU */
  558. };
  559. struct gbe_host_port_regs {
  560. u32 src_id;
  561. u32 port_vlan;
  562. u32 rx_pri_map;
  563. u32 rx_maxlen;
  564. };
  565. struct gbe_host_port_regs_ofs {
  566. u16 port_vlan;
  567. u16 tx_pri_map;
  568. u16 rx_maxlen;
  569. };
  570. struct gbe_emac_regs {
  571. u32 id_ver;
  572. u32 mac_control;
  573. u32 mac_status;
  574. u32 soft_reset;
  575. u32 rx_maxlen;
  576. u32 __reserved_0;
  577. u32 rx_pause;
  578. u32 tx_pause;
  579. u32 __reserved_1;
  580. u32 rx_pri_map;
  581. u32 rsvd[6];
  582. };
  583. struct gbe_emac_regs_ofs {
  584. u16 mac_control;
  585. u16 soft_reset;
  586. u16 rx_maxlen;
  587. };
  588. struct gbe_hw_stats {
  589. u32 rx_good_frames;
  590. u32 rx_broadcast_frames;
  591. u32 rx_multicast_frames;
  592. u32 rx_pause_frames;
  593. u32 rx_crc_errors;
  594. u32 rx_align_code_errors;
  595. u32 rx_oversized_frames;
  596. u32 rx_jabber_frames;
  597. u32 rx_undersized_frames;
  598. u32 rx_fragments;
  599. u32 __pad_0[2];
  600. u32 rx_bytes;
  601. u32 tx_good_frames;
  602. u32 tx_broadcast_frames;
  603. u32 tx_multicast_frames;
  604. u32 tx_pause_frames;
  605. u32 tx_deferred_frames;
  606. u32 tx_collision_frames;
  607. u32 tx_single_coll_frames;
  608. u32 tx_mult_coll_frames;
  609. u32 tx_excessive_collisions;
  610. u32 tx_late_collisions;
  611. u32 tx_underrun;
  612. u32 tx_carrier_sense_errors;
  613. u32 tx_bytes;
  614. u32 tx_64byte_frames;
  615. u32 tx_65_to_127byte_frames;
  616. u32 tx_128_to_255byte_frames;
  617. u32 tx_256_to_511byte_frames;
  618. u32 tx_512_to_1023byte_frames;
  619. u32 tx_1024byte_frames;
  620. u32 net_bytes;
  621. u32 rx_sof_overruns;
  622. u32 rx_mof_overruns;
  623. u32 rx_dma_overruns;
  624. };
  625. #define GBE_MAX_HW_STAT_MODS 9
  626. #define GBE_HW_STATS_REG_MAP_SZ 0x100
  627. struct ts_ctl {
  628. int uni;
  629. u8 dst_port_map;
  630. u8 maddr_map;
  631. u8 ts_mcast_type;
  632. };
  633. struct gbe_slave {
  634. void __iomem *port_regs;
  635. void __iomem *emac_regs;
  636. struct gbe_port_regs_ofs port_regs_ofs;
  637. struct gbe_emac_regs_ofs emac_regs_ofs;
  638. int slave_num; /* 0 based logical number */
  639. int port_num; /* actual port number */
  640. atomic_t link_state;
  641. bool open;
  642. struct phy_device *phy;
  643. u32 link_interface;
  644. u32 mac_control;
  645. u8 phy_port_t;
  646. struct device_node *node;
  647. struct device_node *phy_node;
  648. struct ts_ctl ts_ctl;
  649. struct list_head slave_list;
  650. };
  651. struct gbe_priv {
  652. struct device *dev;
  653. struct netcp_device *netcp_device;
  654. struct timer_list timer;
  655. u32 num_slaves;
  656. u32 ale_entries;
  657. u32 ale_ports;
  658. bool enable_ale;
  659. u8 max_num_slaves;
  660. u8 max_num_ports; /* max_num_slaves + 1 */
  661. u8 num_stats_mods;
  662. struct netcp_tx_pipe tx_pipe;
  663. int host_port;
  664. u32 rx_packet_max;
  665. u32 ss_version;
  666. u32 stats_en_mask;
  667. void __iomem *ss_regs;
  668. void __iomem *switch_regs;
  669. void __iomem *host_port_regs;
  670. void __iomem *ale_reg;
  671. void __iomem *cpts_reg;
  672. void __iomem *sgmii_port_regs;
  673. void __iomem *sgmii_port34_regs;
  674. void __iomem *xgbe_serdes_regs;
  675. void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
  676. struct gbe_ss_regs_ofs ss_regs_ofs;
  677. struct gbe_switch_regs_ofs switch_regs_ofs;
  678. struct gbe_host_port_regs_ofs host_port_regs_ofs;
  679. struct cpsw_ale *ale;
  680. unsigned int tx_queue_id;
  681. const char *dma_chan_name;
  682. struct list_head gbe_intf_head;
  683. struct list_head secondary_slaves;
  684. struct net_device *dummy_ndev;
  685. u64 *hw_stats;
  686. u32 *hw_stats_prev;
  687. const struct netcp_ethtool_stat *et_stats;
  688. int num_et_stats;
  689. /* Lock for updating the hwstats */
  690. spinlock_t hw_stats_lock;
  691. int cpts_registered;
  692. struct cpts *cpts;
  693. };
  694. struct gbe_intf {
  695. struct net_device *ndev;
  696. struct device *dev;
  697. struct gbe_priv *gbe_dev;
  698. struct netcp_tx_pipe tx_pipe;
  699. struct gbe_slave *slave;
  700. struct list_head gbe_intf_list;
  701. unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
  702. };
  703. static struct netcp_module gbe_module;
  704. static struct netcp_module xgbe_module;
  705. /* Statistic management */
  706. struct netcp_ethtool_stat {
  707. char desc[ETH_GSTRING_LEN];
  708. int type;
  709. u32 size;
  710. int offset;
  711. };
  712. #define GBE_STATSA_INFO(field) \
  713. { \
  714. "GBE_A:"#field, GBE_STATSA_MODULE, \
  715. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  716. offsetof(struct gbe_hw_stats, field) \
  717. }
  718. #define GBE_STATSB_INFO(field) \
  719. { \
  720. "GBE_B:"#field, GBE_STATSB_MODULE, \
  721. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  722. offsetof(struct gbe_hw_stats, field) \
  723. }
  724. #define GBE_STATSC_INFO(field) \
  725. { \
  726. "GBE_C:"#field, GBE_STATSC_MODULE, \
  727. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  728. offsetof(struct gbe_hw_stats, field) \
  729. }
  730. #define GBE_STATSD_INFO(field) \
  731. { \
  732. "GBE_D:"#field, GBE_STATSD_MODULE, \
  733. FIELD_SIZEOF(struct gbe_hw_stats, field), \
  734. offsetof(struct gbe_hw_stats, field) \
  735. }
  736. static const struct netcp_ethtool_stat gbe13_et_stats[] = {
  737. /* GBE module A */
  738. GBE_STATSA_INFO(rx_good_frames),
  739. GBE_STATSA_INFO(rx_broadcast_frames),
  740. GBE_STATSA_INFO(rx_multicast_frames),
  741. GBE_STATSA_INFO(rx_pause_frames),
  742. GBE_STATSA_INFO(rx_crc_errors),
  743. GBE_STATSA_INFO(rx_align_code_errors),
  744. GBE_STATSA_INFO(rx_oversized_frames),
  745. GBE_STATSA_INFO(rx_jabber_frames),
  746. GBE_STATSA_INFO(rx_undersized_frames),
  747. GBE_STATSA_INFO(rx_fragments),
  748. GBE_STATSA_INFO(rx_bytes),
  749. GBE_STATSA_INFO(tx_good_frames),
  750. GBE_STATSA_INFO(tx_broadcast_frames),
  751. GBE_STATSA_INFO(tx_multicast_frames),
  752. GBE_STATSA_INFO(tx_pause_frames),
  753. GBE_STATSA_INFO(tx_deferred_frames),
  754. GBE_STATSA_INFO(tx_collision_frames),
  755. GBE_STATSA_INFO(tx_single_coll_frames),
  756. GBE_STATSA_INFO(tx_mult_coll_frames),
  757. GBE_STATSA_INFO(tx_excessive_collisions),
  758. GBE_STATSA_INFO(tx_late_collisions),
  759. GBE_STATSA_INFO(tx_underrun),
  760. GBE_STATSA_INFO(tx_carrier_sense_errors),
  761. GBE_STATSA_INFO(tx_bytes),
  762. GBE_STATSA_INFO(tx_64byte_frames),
  763. GBE_STATSA_INFO(tx_65_to_127byte_frames),
  764. GBE_STATSA_INFO(tx_128_to_255byte_frames),
  765. GBE_STATSA_INFO(tx_256_to_511byte_frames),
  766. GBE_STATSA_INFO(tx_512_to_1023byte_frames),
  767. GBE_STATSA_INFO(tx_1024byte_frames),
  768. GBE_STATSA_INFO(net_bytes),
  769. GBE_STATSA_INFO(rx_sof_overruns),
  770. GBE_STATSA_INFO(rx_mof_overruns),
  771. GBE_STATSA_INFO(rx_dma_overruns),
  772. /* GBE module B */
  773. GBE_STATSB_INFO(rx_good_frames),
  774. GBE_STATSB_INFO(rx_broadcast_frames),
  775. GBE_STATSB_INFO(rx_multicast_frames),
  776. GBE_STATSB_INFO(rx_pause_frames),
  777. GBE_STATSB_INFO(rx_crc_errors),
  778. GBE_STATSB_INFO(rx_align_code_errors),
  779. GBE_STATSB_INFO(rx_oversized_frames),
  780. GBE_STATSB_INFO(rx_jabber_frames),
  781. GBE_STATSB_INFO(rx_undersized_frames),
  782. GBE_STATSB_INFO(rx_fragments),
  783. GBE_STATSB_INFO(rx_bytes),
  784. GBE_STATSB_INFO(tx_good_frames),
  785. GBE_STATSB_INFO(tx_broadcast_frames),
  786. GBE_STATSB_INFO(tx_multicast_frames),
  787. GBE_STATSB_INFO(tx_pause_frames),
  788. GBE_STATSB_INFO(tx_deferred_frames),
  789. GBE_STATSB_INFO(tx_collision_frames),
  790. GBE_STATSB_INFO(tx_single_coll_frames),
  791. GBE_STATSB_INFO(tx_mult_coll_frames),
  792. GBE_STATSB_INFO(tx_excessive_collisions),
  793. GBE_STATSB_INFO(tx_late_collisions),
  794. GBE_STATSB_INFO(tx_underrun),
  795. GBE_STATSB_INFO(tx_carrier_sense_errors),
  796. GBE_STATSB_INFO(tx_bytes),
  797. GBE_STATSB_INFO(tx_64byte_frames),
  798. GBE_STATSB_INFO(tx_65_to_127byte_frames),
  799. GBE_STATSB_INFO(tx_128_to_255byte_frames),
  800. GBE_STATSB_INFO(tx_256_to_511byte_frames),
  801. GBE_STATSB_INFO(tx_512_to_1023byte_frames),
  802. GBE_STATSB_INFO(tx_1024byte_frames),
  803. GBE_STATSB_INFO(net_bytes),
  804. GBE_STATSB_INFO(rx_sof_overruns),
  805. GBE_STATSB_INFO(rx_mof_overruns),
  806. GBE_STATSB_INFO(rx_dma_overruns),
  807. /* GBE module C */
  808. GBE_STATSC_INFO(rx_good_frames),
  809. GBE_STATSC_INFO(rx_broadcast_frames),
  810. GBE_STATSC_INFO(rx_multicast_frames),
  811. GBE_STATSC_INFO(rx_pause_frames),
  812. GBE_STATSC_INFO(rx_crc_errors),
  813. GBE_STATSC_INFO(rx_align_code_errors),
  814. GBE_STATSC_INFO(rx_oversized_frames),
  815. GBE_STATSC_INFO(rx_jabber_frames),
  816. GBE_STATSC_INFO(rx_undersized_frames),
  817. GBE_STATSC_INFO(rx_fragments),
  818. GBE_STATSC_INFO(rx_bytes),
  819. GBE_STATSC_INFO(tx_good_frames),
  820. GBE_STATSC_INFO(tx_broadcast_frames),
  821. GBE_STATSC_INFO(tx_multicast_frames),
  822. GBE_STATSC_INFO(tx_pause_frames),
  823. GBE_STATSC_INFO(tx_deferred_frames),
  824. GBE_STATSC_INFO(tx_collision_frames),
  825. GBE_STATSC_INFO(tx_single_coll_frames),
  826. GBE_STATSC_INFO(tx_mult_coll_frames),
  827. GBE_STATSC_INFO(tx_excessive_collisions),
  828. GBE_STATSC_INFO(tx_late_collisions),
  829. GBE_STATSC_INFO(tx_underrun),
  830. GBE_STATSC_INFO(tx_carrier_sense_errors),
  831. GBE_STATSC_INFO(tx_bytes),
  832. GBE_STATSC_INFO(tx_64byte_frames),
  833. GBE_STATSC_INFO(tx_65_to_127byte_frames),
  834. GBE_STATSC_INFO(tx_128_to_255byte_frames),
  835. GBE_STATSC_INFO(tx_256_to_511byte_frames),
  836. GBE_STATSC_INFO(tx_512_to_1023byte_frames),
  837. GBE_STATSC_INFO(tx_1024byte_frames),
  838. GBE_STATSC_INFO(net_bytes),
  839. GBE_STATSC_INFO(rx_sof_overruns),
  840. GBE_STATSC_INFO(rx_mof_overruns),
  841. GBE_STATSC_INFO(rx_dma_overruns),
  842. /* GBE module D */
  843. GBE_STATSD_INFO(rx_good_frames),
  844. GBE_STATSD_INFO(rx_broadcast_frames),
  845. GBE_STATSD_INFO(rx_multicast_frames),
  846. GBE_STATSD_INFO(rx_pause_frames),
  847. GBE_STATSD_INFO(rx_crc_errors),
  848. GBE_STATSD_INFO(rx_align_code_errors),
  849. GBE_STATSD_INFO(rx_oversized_frames),
  850. GBE_STATSD_INFO(rx_jabber_frames),
  851. GBE_STATSD_INFO(rx_undersized_frames),
  852. GBE_STATSD_INFO(rx_fragments),
  853. GBE_STATSD_INFO(rx_bytes),
  854. GBE_STATSD_INFO(tx_good_frames),
  855. GBE_STATSD_INFO(tx_broadcast_frames),
  856. GBE_STATSD_INFO(tx_multicast_frames),
  857. GBE_STATSD_INFO(tx_pause_frames),
  858. GBE_STATSD_INFO(tx_deferred_frames),
  859. GBE_STATSD_INFO(tx_collision_frames),
  860. GBE_STATSD_INFO(tx_single_coll_frames),
  861. GBE_STATSD_INFO(tx_mult_coll_frames),
  862. GBE_STATSD_INFO(tx_excessive_collisions),
  863. GBE_STATSD_INFO(tx_late_collisions),
  864. GBE_STATSD_INFO(tx_underrun),
  865. GBE_STATSD_INFO(tx_carrier_sense_errors),
  866. GBE_STATSD_INFO(tx_bytes),
  867. GBE_STATSD_INFO(tx_64byte_frames),
  868. GBE_STATSD_INFO(tx_65_to_127byte_frames),
  869. GBE_STATSD_INFO(tx_128_to_255byte_frames),
  870. GBE_STATSD_INFO(tx_256_to_511byte_frames),
  871. GBE_STATSD_INFO(tx_512_to_1023byte_frames),
  872. GBE_STATSD_INFO(tx_1024byte_frames),
  873. GBE_STATSD_INFO(net_bytes),
  874. GBE_STATSD_INFO(rx_sof_overruns),
  875. GBE_STATSD_INFO(rx_mof_overruns),
  876. GBE_STATSD_INFO(rx_dma_overruns),
  877. };
  878. /* This is the size of entries in GBENU_STATS_HOST */
  879. #define GBENU_ET_STATS_HOST_SIZE 52
  880. #define GBENU_STATS_HOST(field) \
  881. { \
  882. "GBE_HOST:"#field, GBENU_STATS0_MODULE, \
  883. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  884. offsetof(struct gbenu_hw_stats, field) \
  885. }
  886. /* This is the size of entries in GBENU_STATS_PORT */
  887. #define GBENU_ET_STATS_PORT_SIZE 65
  888. #define GBENU_STATS_P1(field) \
  889. { \
  890. "GBE_P1:"#field, GBENU_STATS1_MODULE, \
  891. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  892. offsetof(struct gbenu_hw_stats, field) \
  893. }
  894. #define GBENU_STATS_P2(field) \
  895. { \
  896. "GBE_P2:"#field, GBENU_STATS2_MODULE, \
  897. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  898. offsetof(struct gbenu_hw_stats, field) \
  899. }
  900. #define GBENU_STATS_P3(field) \
  901. { \
  902. "GBE_P3:"#field, GBENU_STATS3_MODULE, \
  903. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  904. offsetof(struct gbenu_hw_stats, field) \
  905. }
  906. #define GBENU_STATS_P4(field) \
  907. { \
  908. "GBE_P4:"#field, GBENU_STATS4_MODULE, \
  909. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  910. offsetof(struct gbenu_hw_stats, field) \
  911. }
  912. #define GBENU_STATS_P5(field) \
  913. { \
  914. "GBE_P5:"#field, GBENU_STATS5_MODULE, \
  915. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  916. offsetof(struct gbenu_hw_stats, field) \
  917. }
  918. #define GBENU_STATS_P6(field) \
  919. { \
  920. "GBE_P6:"#field, GBENU_STATS6_MODULE, \
  921. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  922. offsetof(struct gbenu_hw_stats, field) \
  923. }
  924. #define GBENU_STATS_P7(field) \
  925. { \
  926. "GBE_P7:"#field, GBENU_STATS7_MODULE, \
  927. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  928. offsetof(struct gbenu_hw_stats, field) \
  929. }
  930. #define GBENU_STATS_P8(field) \
  931. { \
  932. "GBE_P8:"#field, GBENU_STATS8_MODULE, \
  933. FIELD_SIZEOF(struct gbenu_hw_stats, field), \
  934. offsetof(struct gbenu_hw_stats, field) \
  935. }
  936. static const struct netcp_ethtool_stat gbenu_et_stats[] = {
  937. /* GBENU Host Module */
  938. GBENU_STATS_HOST(rx_good_frames),
  939. GBENU_STATS_HOST(rx_broadcast_frames),
  940. GBENU_STATS_HOST(rx_multicast_frames),
  941. GBENU_STATS_HOST(rx_crc_errors),
  942. GBENU_STATS_HOST(rx_oversized_frames),
  943. GBENU_STATS_HOST(rx_undersized_frames),
  944. GBENU_STATS_HOST(ale_drop),
  945. GBENU_STATS_HOST(ale_overrun_drop),
  946. GBENU_STATS_HOST(rx_bytes),
  947. GBENU_STATS_HOST(tx_good_frames),
  948. GBENU_STATS_HOST(tx_broadcast_frames),
  949. GBENU_STATS_HOST(tx_multicast_frames),
  950. GBENU_STATS_HOST(tx_bytes),
  951. GBENU_STATS_HOST(tx_64B_frames),
  952. GBENU_STATS_HOST(tx_65_to_127B_frames),
  953. GBENU_STATS_HOST(tx_128_to_255B_frames),
  954. GBENU_STATS_HOST(tx_256_to_511B_frames),
  955. GBENU_STATS_HOST(tx_512_to_1023B_frames),
  956. GBENU_STATS_HOST(tx_1024B_frames),
  957. GBENU_STATS_HOST(net_bytes),
  958. GBENU_STATS_HOST(rx_bottom_fifo_drop),
  959. GBENU_STATS_HOST(rx_port_mask_drop),
  960. GBENU_STATS_HOST(rx_top_fifo_drop),
  961. GBENU_STATS_HOST(ale_rate_limit_drop),
  962. GBENU_STATS_HOST(ale_vid_ingress_drop),
  963. GBENU_STATS_HOST(ale_da_eq_sa_drop),
  964. GBENU_STATS_HOST(ale_unknown_ucast),
  965. GBENU_STATS_HOST(ale_unknown_ucast_bytes),
  966. GBENU_STATS_HOST(ale_unknown_mcast),
  967. GBENU_STATS_HOST(ale_unknown_mcast_bytes),
  968. GBENU_STATS_HOST(ale_unknown_bcast),
  969. GBENU_STATS_HOST(ale_unknown_bcast_bytes),
  970. GBENU_STATS_HOST(ale_pol_match),
  971. GBENU_STATS_HOST(ale_pol_match_red),
  972. GBENU_STATS_HOST(ale_pol_match_yellow),
  973. GBENU_STATS_HOST(tx_mem_protect_err),
  974. GBENU_STATS_HOST(tx_pri0_drop),
  975. GBENU_STATS_HOST(tx_pri1_drop),
  976. GBENU_STATS_HOST(tx_pri2_drop),
  977. GBENU_STATS_HOST(tx_pri3_drop),
  978. GBENU_STATS_HOST(tx_pri4_drop),
  979. GBENU_STATS_HOST(tx_pri5_drop),
  980. GBENU_STATS_HOST(tx_pri6_drop),
  981. GBENU_STATS_HOST(tx_pri7_drop),
  982. GBENU_STATS_HOST(tx_pri0_drop_bcnt),
  983. GBENU_STATS_HOST(tx_pri1_drop_bcnt),
  984. GBENU_STATS_HOST(tx_pri2_drop_bcnt),
  985. GBENU_STATS_HOST(tx_pri3_drop_bcnt),
  986. GBENU_STATS_HOST(tx_pri4_drop_bcnt),
  987. GBENU_STATS_HOST(tx_pri5_drop_bcnt),
  988. GBENU_STATS_HOST(tx_pri6_drop_bcnt),
  989. GBENU_STATS_HOST(tx_pri7_drop_bcnt),
  990. /* GBENU Module 1 */
  991. GBENU_STATS_P1(rx_good_frames),
  992. GBENU_STATS_P1(rx_broadcast_frames),
  993. GBENU_STATS_P1(rx_multicast_frames),
  994. GBENU_STATS_P1(rx_pause_frames),
  995. GBENU_STATS_P1(rx_crc_errors),
  996. GBENU_STATS_P1(rx_align_code_errors),
  997. GBENU_STATS_P1(rx_oversized_frames),
  998. GBENU_STATS_P1(rx_jabber_frames),
  999. GBENU_STATS_P1(rx_undersized_frames),
  1000. GBENU_STATS_P1(rx_fragments),
  1001. GBENU_STATS_P1(ale_drop),
  1002. GBENU_STATS_P1(ale_overrun_drop),
  1003. GBENU_STATS_P1(rx_bytes),
  1004. GBENU_STATS_P1(tx_good_frames),
  1005. GBENU_STATS_P1(tx_broadcast_frames),
  1006. GBENU_STATS_P1(tx_multicast_frames),
  1007. GBENU_STATS_P1(tx_pause_frames),
  1008. GBENU_STATS_P1(tx_deferred_frames),
  1009. GBENU_STATS_P1(tx_collision_frames),
  1010. GBENU_STATS_P1(tx_single_coll_frames),
  1011. GBENU_STATS_P1(tx_mult_coll_frames),
  1012. GBENU_STATS_P1(tx_excessive_collisions),
  1013. GBENU_STATS_P1(tx_late_collisions),
  1014. GBENU_STATS_P1(rx_ipg_error),
  1015. GBENU_STATS_P1(tx_carrier_sense_errors),
  1016. GBENU_STATS_P1(tx_bytes),
  1017. GBENU_STATS_P1(tx_64B_frames),
  1018. GBENU_STATS_P1(tx_65_to_127B_frames),
  1019. GBENU_STATS_P1(tx_128_to_255B_frames),
  1020. GBENU_STATS_P1(tx_256_to_511B_frames),
  1021. GBENU_STATS_P1(tx_512_to_1023B_frames),
  1022. GBENU_STATS_P1(tx_1024B_frames),
  1023. GBENU_STATS_P1(net_bytes),
  1024. GBENU_STATS_P1(rx_bottom_fifo_drop),
  1025. GBENU_STATS_P1(rx_port_mask_drop),
  1026. GBENU_STATS_P1(rx_top_fifo_drop),
  1027. GBENU_STATS_P1(ale_rate_limit_drop),
  1028. GBENU_STATS_P1(ale_vid_ingress_drop),
  1029. GBENU_STATS_P1(ale_da_eq_sa_drop),
  1030. GBENU_STATS_P1(ale_unknown_ucast),
  1031. GBENU_STATS_P1(ale_unknown_ucast_bytes),
  1032. GBENU_STATS_P1(ale_unknown_mcast),
  1033. GBENU_STATS_P1(ale_unknown_mcast_bytes),
  1034. GBENU_STATS_P1(ale_unknown_bcast),
  1035. GBENU_STATS_P1(ale_unknown_bcast_bytes),
  1036. GBENU_STATS_P1(ale_pol_match),
  1037. GBENU_STATS_P1(ale_pol_match_red),
  1038. GBENU_STATS_P1(ale_pol_match_yellow),
  1039. GBENU_STATS_P1(tx_mem_protect_err),
  1040. GBENU_STATS_P1(tx_pri0_drop),
  1041. GBENU_STATS_P1(tx_pri1_drop),
  1042. GBENU_STATS_P1(tx_pri2_drop),
  1043. GBENU_STATS_P1(tx_pri3_drop),
  1044. GBENU_STATS_P1(tx_pri4_drop),
  1045. GBENU_STATS_P1(tx_pri5_drop),
  1046. GBENU_STATS_P1(tx_pri6_drop),
  1047. GBENU_STATS_P1(tx_pri7_drop),
  1048. GBENU_STATS_P1(tx_pri0_drop_bcnt),
  1049. GBENU_STATS_P1(tx_pri1_drop_bcnt),
  1050. GBENU_STATS_P1(tx_pri2_drop_bcnt),
  1051. GBENU_STATS_P1(tx_pri3_drop_bcnt),
  1052. GBENU_STATS_P1(tx_pri4_drop_bcnt),
  1053. GBENU_STATS_P1(tx_pri5_drop_bcnt),
  1054. GBENU_STATS_P1(tx_pri6_drop_bcnt),
  1055. GBENU_STATS_P1(tx_pri7_drop_bcnt),
  1056. /* GBENU Module 2 */
  1057. GBENU_STATS_P2(rx_good_frames),
  1058. GBENU_STATS_P2(rx_broadcast_frames),
  1059. GBENU_STATS_P2(rx_multicast_frames),
  1060. GBENU_STATS_P2(rx_pause_frames),
  1061. GBENU_STATS_P2(rx_crc_errors),
  1062. GBENU_STATS_P2(rx_align_code_errors),
  1063. GBENU_STATS_P2(rx_oversized_frames),
  1064. GBENU_STATS_P2(rx_jabber_frames),
  1065. GBENU_STATS_P2(rx_undersized_frames),
  1066. GBENU_STATS_P2(rx_fragments),
  1067. GBENU_STATS_P2(ale_drop),
  1068. GBENU_STATS_P2(ale_overrun_drop),
  1069. GBENU_STATS_P2(rx_bytes),
  1070. GBENU_STATS_P2(tx_good_frames),
  1071. GBENU_STATS_P2(tx_broadcast_frames),
  1072. GBENU_STATS_P2(tx_multicast_frames),
  1073. GBENU_STATS_P2(tx_pause_frames),
  1074. GBENU_STATS_P2(tx_deferred_frames),
  1075. GBENU_STATS_P2(tx_collision_frames),
  1076. GBENU_STATS_P2(tx_single_coll_frames),
  1077. GBENU_STATS_P2(tx_mult_coll_frames),
  1078. GBENU_STATS_P2(tx_excessive_collisions),
  1079. GBENU_STATS_P2(tx_late_collisions),
  1080. GBENU_STATS_P2(rx_ipg_error),
  1081. GBENU_STATS_P2(tx_carrier_sense_errors),
  1082. GBENU_STATS_P2(tx_bytes),
  1083. GBENU_STATS_P2(tx_64B_frames),
  1084. GBENU_STATS_P2(tx_65_to_127B_frames),
  1085. GBENU_STATS_P2(tx_128_to_255B_frames),
  1086. GBENU_STATS_P2(tx_256_to_511B_frames),
  1087. GBENU_STATS_P2(tx_512_to_1023B_frames),
  1088. GBENU_STATS_P2(tx_1024B_frames),
  1089. GBENU_STATS_P2(net_bytes),
  1090. GBENU_STATS_P2(rx_bottom_fifo_drop),
  1091. GBENU_STATS_P2(rx_port_mask_drop),
  1092. GBENU_STATS_P2(rx_top_fifo_drop),
  1093. GBENU_STATS_P2(ale_rate_limit_drop),
  1094. GBENU_STATS_P2(ale_vid_ingress_drop),
  1095. GBENU_STATS_P2(ale_da_eq_sa_drop),
  1096. GBENU_STATS_P2(ale_unknown_ucast),
  1097. GBENU_STATS_P2(ale_unknown_ucast_bytes),
  1098. GBENU_STATS_P2(ale_unknown_mcast),
  1099. GBENU_STATS_P2(ale_unknown_mcast_bytes),
  1100. GBENU_STATS_P2(ale_unknown_bcast),
  1101. GBENU_STATS_P2(ale_unknown_bcast_bytes),
  1102. GBENU_STATS_P2(ale_pol_match),
  1103. GBENU_STATS_P2(ale_pol_match_red),
  1104. GBENU_STATS_P2(ale_pol_match_yellow),
  1105. GBENU_STATS_P2(tx_mem_protect_err),
  1106. GBENU_STATS_P2(tx_pri0_drop),
  1107. GBENU_STATS_P2(tx_pri1_drop),
  1108. GBENU_STATS_P2(tx_pri2_drop),
  1109. GBENU_STATS_P2(tx_pri3_drop),
  1110. GBENU_STATS_P2(tx_pri4_drop),
  1111. GBENU_STATS_P2(tx_pri5_drop),
  1112. GBENU_STATS_P2(tx_pri6_drop),
  1113. GBENU_STATS_P2(tx_pri7_drop),
  1114. GBENU_STATS_P2(tx_pri0_drop_bcnt),
  1115. GBENU_STATS_P2(tx_pri1_drop_bcnt),
  1116. GBENU_STATS_P2(tx_pri2_drop_bcnt),
  1117. GBENU_STATS_P2(tx_pri3_drop_bcnt),
  1118. GBENU_STATS_P2(tx_pri4_drop_bcnt),
  1119. GBENU_STATS_P2(tx_pri5_drop_bcnt),
  1120. GBENU_STATS_P2(tx_pri6_drop_bcnt),
  1121. GBENU_STATS_P2(tx_pri7_drop_bcnt),
  1122. /* GBENU Module 3 */
  1123. GBENU_STATS_P3(rx_good_frames),
  1124. GBENU_STATS_P3(rx_broadcast_frames),
  1125. GBENU_STATS_P3(rx_multicast_frames),
  1126. GBENU_STATS_P3(rx_pause_frames),
  1127. GBENU_STATS_P3(rx_crc_errors),
  1128. GBENU_STATS_P3(rx_align_code_errors),
  1129. GBENU_STATS_P3(rx_oversized_frames),
  1130. GBENU_STATS_P3(rx_jabber_frames),
  1131. GBENU_STATS_P3(rx_undersized_frames),
  1132. GBENU_STATS_P3(rx_fragments),
  1133. GBENU_STATS_P3(ale_drop),
  1134. GBENU_STATS_P3(ale_overrun_drop),
  1135. GBENU_STATS_P3(rx_bytes),
  1136. GBENU_STATS_P3(tx_good_frames),
  1137. GBENU_STATS_P3(tx_broadcast_frames),
  1138. GBENU_STATS_P3(tx_multicast_frames),
  1139. GBENU_STATS_P3(tx_pause_frames),
  1140. GBENU_STATS_P3(tx_deferred_frames),
  1141. GBENU_STATS_P3(tx_collision_frames),
  1142. GBENU_STATS_P3(tx_single_coll_frames),
  1143. GBENU_STATS_P3(tx_mult_coll_frames),
  1144. GBENU_STATS_P3(tx_excessive_collisions),
  1145. GBENU_STATS_P3(tx_late_collisions),
  1146. GBENU_STATS_P3(rx_ipg_error),
  1147. GBENU_STATS_P3(tx_carrier_sense_errors),
  1148. GBENU_STATS_P3(tx_bytes),
  1149. GBENU_STATS_P3(tx_64B_frames),
  1150. GBENU_STATS_P3(tx_65_to_127B_frames),
  1151. GBENU_STATS_P3(tx_128_to_255B_frames),
  1152. GBENU_STATS_P3(tx_256_to_511B_frames),
  1153. GBENU_STATS_P3(tx_512_to_1023B_frames),
  1154. GBENU_STATS_P3(tx_1024B_frames),
  1155. GBENU_STATS_P3(net_bytes),
  1156. GBENU_STATS_P3(rx_bottom_fifo_drop),
  1157. GBENU_STATS_P3(rx_port_mask_drop),
  1158. GBENU_STATS_P3(rx_top_fifo_drop),
  1159. GBENU_STATS_P3(ale_rate_limit_drop),
  1160. GBENU_STATS_P3(ale_vid_ingress_drop),
  1161. GBENU_STATS_P3(ale_da_eq_sa_drop),
  1162. GBENU_STATS_P3(ale_unknown_ucast),
  1163. GBENU_STATS_P3(ale_unknown_ucast_bytes),
  1164. GBENU_STATS_P3(ale_unknown_mcast),
  1165. GBENU_STATS_P3(ale_unknown_mcast_bytes),
  1166. GBENU_STATS_P3(ale_unknown_bcast),
  1167. GBENU_STATS_P3(ale_unknown_bcast_bytes),
  1168. GBENU_STATS_P3(ale_pol_match),
  1169. GBENU_STATS_P3(ale_pol_match_red),
  1170. GBENU_STATS_P3(ale_pol_match_yellow),
  1171. GBENU_STATS_P3(tx_mem_protect_err),
  1172. GBENU_STATS_P3(tx_pri0_drop),
  1173. GBENU_STATS_P3(tx_pri1_drop),
  1174. GBENU_STATS_P3(tx_pri2_drop),
  1175. GBENU_STATS_P3(tx_pri3_drop),
  1176. GBENU_STATS_P3(tx_pri4_drop),
  1177. GBENU_STATS_P3(tx_pri5_drop),
  1178. GBENU_STATS_P3(tx_pri6_drop),
  1179. GBENU_STATS_P3(tx_pri7_drop),
  1180. GBENU_STATS_P3(tx_pri0_drop_bcnt),
  1181. GBENU_STATS_P3(tx_pri1_drop_bcnt),
  1182. GBENU_STATS_P3(tx_pri2_drop_bcnt),
  1183. GBENU_STATS_P3(tx_pri3_drop_bcnt),
  1184. GBENU_STATS_P3(tx_pri4_drop_bcnt),
  1185. GBENU_STATS_P3(tx_pri5_drop_bcnt),
  1186. GBENU_STATS_P3(tx_pri6_drop_bcnt),
  1187. GBENU_STATS_P3(tx_pri7_drop_bcnt),
  1188. /* GBENU Module 4 */
  1189. GBENU_STATS_P4(rx_good_frames),
  1190. GBENU_STATS_P4(rx_broadcast_frames),
  1191. GBENU_STATS_P4(rx_multicast_frames),
  1192. GBENU_STATS_P4(rx_pause_frames),
  1193. GBENU_STATS_P4(rx_crc_errors),
  1194. GBENU_STATS_P4(rx_align_code_errors),
  1195. GBENU_STATS_P4(rx_oversized_frames),
  1196. GBENU_STATS_P4(rx_jabber_frames),
  1197. GBENU_STATS_P4(rx_undersized_frames),
  1198. GBENU_STATS_P4(rx_fragments),
  1199. GBENU_STATS_P4(ale_drop),
  1200. GBENU_STATS_P4(ale_overrun_drop),
  1201. GBENU_STATS_P4(rx_bytes),
  1202. GBENU_STATS_P4(tx_good_frames),
  1203. GBENU_STATS_P4(tx_broadcast_frames),
  1204. GBENU_STATS_P4(tx_multicast_frames),
  1205. GBENU_STATS_P4(tx_pause_frames),
  1206. GBENU_STATS_P4(tx_deferred_frames),
  1207. GBENU_STATS_P4(tx_collision_frames),
  1208. GBENU_STATS_P4(tx_single_coll_frames),
  1209. GBENU_STATS_P4(tx_mult_coll_frames),
  1210. GBENU_STATS_P4(tx_excessive_collisions),
  1211. GBENU_STATS_P4(tx_late_collisions),
  1212. GBENU_STATS_P4(rx_ipg_error),
  1213. GBENU_STATS_P4(tx_carrier_sense_errors),
  1214. GBENU_STATS_P4(tx_bytes),
  1215. GBENU_STATS_P4(tx_64B_frames),
  1216. GBENU_STATS_P4(tx_65_to_127B_frames),
  1217. GBENU_STATS_P4(tx_128_to_255B_frames),
  1218. GBENU_STATS_P4(tx_256_to_511B_frames),
  1219. GBENU_STATS_P4(tx_512_to_1023B_frames),
  1220. GBENU_STATS_P4(tx_1024B_frames),
  1221. GBENU_STATS_P4(net_bytes),
  1222. GBENU_STATS_P4(rx_bottom_fifo_drop),
  1223. GBENU_STATS_P4(rx_port_mask_drop),
  1224. GBENU_STATS_P4(rx_top_fifo_drop),
  1225. GBENU_STATS_P4(ale_rate_limit_drop),
  1226. GBENU_STATS_P4(ale_vid_ingress_drop),
  1227. GBENU_STATS_P4(ale_da_eq_sa_drop),
  1228. GBENU_STATS_P4(ale_unknown_ucast),
  1229. GBENU_STATS_P4(ale_unknown_ucast_bytes),
  1230. GBENU_STATS_P4(ale_unknown_mcast),
  1231. GBENU_STATS_P4(ale_unknown_mcast_bytes),
  1232. GBENU_STATS_P4(ale_unknown_bcast),
  1233. GBENU_STATS_P4(ale_unknown_bcast_bytes),
  1234. GBENU_STATS_P4(ale_pol_match),
  1235. GBENU_STATS_P4(ale_pol_match_red),
  1236. GBENU_STATS_P4(ale_pol_match_yellow),
  1237. GBENU_STATS_P4(tx_mem_protect_err),
  1238. GBENU_STATS_P4(tx_pri0_drop),
  1239. GBENU_STATS_P4(tx_pri1_drop),
  1240. GBENU_STATS_P4(tx_pri2_drop),
  1241. GBENU_STATS_P4(tx_pri3_drop),
  1242. GBENU_STATS_P4(tx_pri4_drop),
  1243. GBENU_STATS_P4(tx_pri5_drop),
  1244. GBENU_STATS_P4(tx_pri6_drop),
  1245. GBENU_STATS_P4(tx_pri7_drop),
  1246. GBENU_STATS_P4(tx_pri0_drop_bcnt),
  1247. GBENU_STATS_P4(tx_pri1_drop_bcnt),
  1248. GBENU_STATS_P4(tx_pri2_drop_bcnt),
  1249. GBENU_STATS_P4(tx_pri3_drop_bcnt),
  1250. GBENU_STATS_P4(tx_pri4_drop_bcnt),
  1251. GBENU_STATS_P4(tx_pri5_drop_bcnt),
  1252. GBENU_STATS_P4(tx_pri6_drop_bcnt),
  1253. GBENU_STATS_P4(tx_pri7_drop_bcnt),
  1254. /* GBENU Module 5 */
  1255. GBENU_STATS_P5(rx_good_frames),
  1256. GBENU_STATS_P5(rx_broadcast_frames),
  1257. GBENU_STATS_P5(rx_multicast_frames),
  1258. GBENU_STATS_P5(rx_pause_frames),
  1259. GBENU_STATS_P5(rx_crc_errors),
  1260. GBENU_STATS_P5(rx_align_code_errors),
  1261. GBENU_STATS_P5(rx_oversized_frames),
  1262. GBENU_STATS_P5(rx_jabber_frames),
  1263. GBENU_STATS_P5(rx_undersized_frames),
  1264. GBENU_STATS_P5(rx_fragments),
  1265. GBENU_STATS_P5(ale_drop),
  1266. GBENU_STATS_P5(ale_overrun_drop),
  1267. GBENU_STATS_P5(rx_bytes),
  1268. GBENU_STATS_P5(tx_good_frames),
  1269. GBENU_STATS_P5(tx_broadcast_frames),
  1270. GBENU_STATS_P5(tx_multicast_frames),
  1271. GBENU_STATS_P5(tx_pause_frames),
  1272. GBENU_STATS_P5(tx_deferred_frames),
  1273. GBENU_STATS_P5(tx_collision_frames),
  1274. GBENU_STATS_P5(tx_single_coll_frames),
  1275. GBENU_STATS_P5(tx_mult_coll_frames),
  1276. GBENU_STATS_P5(tx_excessive_collisions),
  1277. GBENU_STATS_P5(tx_late_collisions),
  1278. GBENU_STATS_P5(rx_ipg_error),
  1279. GBENU_STATS_P5(tx_carrier_sense_errors),
  1280. GBENU_STATS_P5(tx_bytes),
  1281. GBENU_STATS_P5(tx_64B_frames),
  1282. GBENU_STATS_P5(tx_65_to_127B_frames),
  1283. GBENU_STATS_P5(tx_128_to_255B_frames),
  1284. GBENU_STATS_P5(tx_256_to_511B_frames),
  1285. GBENU_STATS_P5(tx_512_to_1023B_frames),
  1286. GBENU_STATS_P5(tx_1024B_frames),
  1287. GBENU_STATS_P5(net_bytes),
  1288. GBENU_STATS_P5(rx_bottom_fifo_drop),
  1289. GBENU_STATS_P5(rx_port_mask_drop),
  1290. GBENU_STATS_P5(rx_top_fifo_drop),
  1291. GBENU_STATS_P5(ale_rate_limit_drop),
  1292. GBENU_STATS_P5(ale_vid_ingress_drop),
  1293. GBENU_STATS_P5(ale_da_eq_sa_drop),
  1294. GBENU_STATS_P5(ale_unknown_ucast),
  1295. GBENU_STATS_P5(ale_unknown_ucast_bytes),
  1296. GBENU_STATS_P5(ale_unknown_mcast),
  1297. GBENU_STATS_P5(ale_unknown_mcast_bytes),
  1298. GBENU_STATS_P5(ale_unknown_bcast),
  1299. GBENU_STATS_P5(ale_unknown_bcast_bytes),
  1300. GBENU_STATS_P5(ale_pol_match),
  1301. GBENU_STATS_P5(ale_pol_match_red),
  1302. GBENU_STATS_P5(ale_pol_match_yellow),
  1303. GBENU_STATS_P5(tx_mem_protect_err),
  1304. GBENU_STATS_P5(tx_pri0_drop),
  1305. GBENU_STATS_P5(tx_pri1_drop),
  1306. GBENU_STATS_P5(tx_pri2_drop),
  1307. GBENU_STATS_P5(tx_pri3_drop),
  1308. GBENU_STATS_P5(tx_pri4_drop),
  1309. GBENU_STATS_P5(tx_pri5_drop),
  1310. GBENU_STATS_P5(tx_pri6_drop),
  1311. GBENU_STATS_P5(tx_pri7_drop),
  1312. GBENU_STATS_P5(tx_pri0_drop_bcnt),
  1313. GBENU_STATS_P5(tx_pri1_drop_bcnt),
  1314. GBENU_STATS_P5(tx_pri2_drop_bcnt),
  1315. GBENU_STATS_P5(tx_pri3_drop_bcnt),
  1316. GBENU_STATS_P5(tx_pri4_drop_bcnt),
  1317. GBENU_STATS_P5(tx_pri5_drop_bcnt),
  1318. GBENU_STATS_P5(tx_pri6_drop_bcnt),
  1319. GBENU_STATS_P5(tx_pri7_drop_bcnt),
  1320. /* GBENU Module 6 */
  1321. GBENU_STATS_P6(rx_good_frames),
  1322. GBENU_STATS_P6(rx_broadcast_frames),
  1323. GBENU_STATS_P6(rx_multicast_frames),
  1324. GBENU_STATS_P6(rx_pause_frames),
  1325. GBENU_STATS_P6(rx_crc_errors),
  1326. GBENU_STATS_P6(rx_align_code_errors),
  1327. GBENU_STATS_P6(rx_oversized_frames),
  1328. GBENU_STATS_P6(rx_jabber_frames),
  1329. GBENU_STATS_P6(rx_undersized_frames),
  1330. GBENU_STATS_P6(rx_fragments),
  1331. GBENU_STATS_P6(ale_drop),
  1332. GBENU_STATS_P6(ale_overrun_drop),
  1333. GBENU_STATS_P6(rx_bytes),
  1334. GBENU_STATS_P6(tx_good_frames),
  1335. GBENU_STATS_P6(tx_broadcast_frames),
  1336. GBENU_STATS_P6(tx_multicast_frames),
  1337. GBENU_STATS_P6(tx_pause_frames),
  1338. GBENU_STATS_P6(tx_deferred_frames),
  1339. GBENU_STATS_P6(tx_collision_frames),
  1340. GBENU_STATS_P6(tx_single_coll_frames),
  1341. GBENU_STATS_P6(tx_mult_coll_frames),
  1342. GBENU_STATS_P6(tx_excessive_collisions),
  1343. GBENU_STATS_P6(tx_late_collisions),
  1344. GBENU_STATS_P6(rx_ipg_error),
  1345. GBENU_STATS_P6(tx_carrier_sense_errors),
  1346. GBENU_STATS_P6(tx_bytes),
  1347. GBENU_STATS_P6(tx_64B_frames),
  1348. GBENU_STATS_P6(tx_65_to_127B_frames),
  1349. GBENU_STATS_P6(tx_128_to_255B_frames),
  1350. GBENU_STATS_P6(tx_256_to_511B_frames),
  1351. GBENU_STATS_P6(tx_512_to_1023B_frames),
  1352. GBENU_STATS_P6(tx_1024B_frames),
  1353. GBENU_STATS_P6(net_bytes),
  1354. GBENU_STATS_P6(rx_bottom_fifo_drop),
  1355. GBENU_STATS_P6(rx_port_mask_drop),
  1356. GBENU_STATS_P6(rx_top_fifo_drop),
  1357. GBENU_STATS_P6(ale_rate_limit_drop),
  1358. GBENU_STATS_P6(ale_vid_ingress_drop),
  1359. GBENU_STATS_P6(ale_da_eq_sa_drop),
  1360. GBENU_STATS_P6(ale_unknown_ucast),
  1361. GBENU_STATS_P6(ale_unknown_ucast_bytes),
  1362. GBENU_STATS_P6(ale_unknown_mcast),
  1363. GBENU_STATS_P6(ale_unknown_mcast_bytes),
  1364. GBENU_STATS_P6(ale_unknown_bcast),
  1365. GBENU_STATS_P6(ale_unknown_bcast_bytes),
  1366. GBENU_STATS_P6(ale_pol_match),
  1367. GBENU_STATS_P6(ale_pol_match_red),
  1368. GBENU_STATS_P6(ale_pol_match_yellow),
  1369. GBENU_STATS_P6(tx_mem_protect_err),
  1370. GBENU_STATS_P6(tx_pri0_drop),
  1371. GBENU_STATS_P6(tx_pri1_drop),
  1372. GBENU_STATS_P6(tx_pri2_drop),
  1373. GBENU_STATS_P6(tx_pri3_drop),
  1374. GBENU_STATS_P6(tx_pri4_drop),
  1375. GBENU_STATS_P6(tx_pri5_drop),
  1376. GBENU_STATS_P6(tx_pri6_drop),
  1377. GBENU_STATS_P6(tx_pri7_drop),
  1378. GBENU_STATS_P6(tx_pri0_drop_bcnt),
  1379. GBENU_STATS_P6(tx_pri1_drop_bcnt),
  1380. GBENU_STATS_P6(tx_pri2_drop_bcnt),
  1381. GBENU_STATS_P6(tx_pri3_drop_bcnt),
  1382. GBENU_STATS_P6(tx_pri4_drop_bcnt),
  1383. GBENU_STATS_P6(tx_pri5_drop_bcnt),
  1384. GBENU_STATS_P6(tx_pri6_drop_bcnt),
  1385. GBENU_STATS_P6(tx_pri7_drop_bcnt),
  1386. /* GBENU Module 7 */
  1387. GBENU_STATS_P7(rx_good_frames),
  1388. GBENU_STATS_P7(rx_broadcast_frames),
  1389. GBENU_STATS_P7(rx_multicast_frames),
  1390. GBENU_STATS_P7(rx_pause_frames),
  1391. GBENU_STATS_P7(rx_crc_errors),
  1392. GBENU_STATS_P7(rx_align_code_errors),
  1393. GBENU_STATS_P7(rx_oversized_frames),
  1394. GBENU_STATS_P7(rx_jabber_frames),
  1395. GBENU_STATS_P7(rx_undersized_frames),
  1396. GBENU_STATS_P7(rx_fragments),
  1397. GBENU_STATS_P7(ale_drop),
  1398. GBENU_STATS_P7(ale_overrun_drop),
  1399. GBENU_STATS_P7(rx_bytes),
  1400. GBENU_STATS_P7(tx_good_frames),
  1401. GBENU_STATS_P7(tx_broadcast_frames),
  1402. GBENU_STATS_P7(tx_multicast_frames),
  1403. GBENU_STATS_P7(tx_pause_frames),
  1404. GBENU_STATS_P7(tx_deferred_frames),
  1405. GBENU_STATS_P7(tx_collision_frames),
  1406. GBENU_STATS_P7(tx_single_coll_frames),
  1407. GBENU_STATS_P7(tx_mult_coll_frames),
  1408. GBENU_STATS_P7(tx_excessive_collisions),
  1409. GBENU_STATS_P7(tx_late_collisions),
  1410. GBENU_STATS_P7(rx_ipg_error),
  1411. GBENU_STATS_P7(tx_carrier_sense_errors),
  1412. GBENU_STATS_P7(tx_bytes),
  1413. GBENU_STATS_P7(tx_64B_frames),
  1414. GBENU_STATS_P7(tx_65_to_127B_frames),
  1415. GBENU_STATS_P7(tx_128_to_255B_frames),
  1416. GBENU_STATS_P7(tx_256_to_511B_frames),
  1417. GBENU_STATS_P7(tx_512_to_1023B_frames),
  1418. GBENU_STATS_P7(tx_1024B_frames),
  1419. GBENU_STATS_P7(net_bytes),
  1420. GBENU_STATS_P7(rx_bottom_fifo_drop),
  1421. GBENU_STATS_P7(rx_port_mask_drop),
  1422. GBENU_STATS_P7(rx_top_fifo_drop),
  1423. GBENU_STATS_P7(ale_rate_limit_drop),
  1424. GBENU_STATS_P7(ale_vid_ingress_drop),
  1425. GBENU_STATS_P7(ale_da_eq_sa_drop),
  1426. GBENU_STATS_P7(ale_unknown_ucast),
  1427. GBENU_STATS_P7(ale_unknown_ucast_bytes),
  1428. GBENU_STATS_P7(ale_unknown_mcast),
  1429. GBENU_STATS_P7(ale_unknown_mcast_bytes),
  1430. GBENU_STATS_P7(ale_unknown_bcast),
  1431. GBENU_STATS_P7(ale_unknown_bcast_bytes),
  1432. GBENU_STATS_P7(ale_pol_match),
  1433. GBENU_STATS_P7(ale_pol_match_red),
  1434. GBENU_STATS_P7(ale_pol_match_yellow),
  1435. GBENU_STATS_P7(tx_mem_protect_err),
  1436. GBENU_STATS_P7(tx_pri0_drop),
  1437. GBENU_STATS_P7(tx_pri1_drop),
  1438. GBENU_STATS_P7(tx_pri2_drop),
  1439. GBENU_STATS_P7(tx_pri3_drop),
  1440. GBENU_STATS_P7(tx_pri4_drop),
  1441. GBENU_STATS_P7(tx_pri5_drop),
  1442. GBENU_STATS_P7(tx_pri6_drop),
  1443. GBENU_STATS_P7(tx_pri7_drop),
  1444. GBENU_STATS_P7(tx_pri0_drop_bcnt),
  1445. GBENU_STATS_P7(tx_pri1_drop_bcnt),
  1446. GBENU_STATS_P7(tx_pri2_drop_bcnt),
  1447. GBENU_STATS_P7(tx_pri3_drop_bcnt),
  1448. GBENU_STATS_P7(tx_pri4_drop_bcnt),
  1449. GBENU_STATS_P7(tx_pri5_drop_bcnt),
  1450. GBENU_STATS_P7(tx_pri6_drop_bcnt),
  1451. GBENU_STATS_P7(tx_pri7_drop_bcnt),
  1452. /* GBENU Module 8 */
  1453. GBENU_STATS_P8(rx_good_frames),
  1454. GBENU_STATS_P8(rx_broadcast_frames),
  1455. GBENU_STATS_P8(rx_multicast_frames),
  1456. GBENU_STATS_P8(rx_pause_frames),
  1457. GBENU_STATS_P8(rx_crc_errors),
  1458. GBENU_STATS_P8(rx_align_code_errors),
  1459. GBENU_STATS_P8(rx_oversized_frames),
  1460. GBENU_STATS_P8(rx_jabber_frames),
  1461. GBENU_STATS_P8(rx_undersized_frames),
  1462. GBENU_STATS_P8(rx_fragments),
  1463. GBENU_STATS_P8(ale_drop),
  1464. GBENU_STATS_P8(ale_overrun_drop),
  1465. GBENU_STATS_P8(rx_bytes),
  1466. GBENU_STATS_P8(tx_good_frames),
  1467. GBENU_STATS_P8(tx_broadcast_frames),
  1468. GBENU_STATS_P8(tx_multicast_frames),
  1469. GBENU_STATS_P8(tx_pause_frames),
  1470. GBENU_STATS_P8(tx_deferred_frames),
  1471. GBENU_STATS_P8(tx_collision_frames),
  1472. GBENU_STATS_P8(tx_single_coll_frames),
  1473. GBENU_STATS_P8(tx_mult_coll_frames),
  1474. GBENU_STATS_P8(tx_excessive_collisions),
  1475. GBENU_STATS_P8(tx_late_collisions),
  1476. GBENU_STATS_P8(rx_ipg_error),
  1477. GBENU_STATS_P8(tx_carrier_sense_errors),
  1478. GBENU_STATS_P8(tx_bytes),
  1479. GBENU_STATS_P8(tx_64B_frames),
  1480. GBENU_STATS_P8(tx_65_to_127B_frames),
  1481. GBENU_STATS_P8(tx_128_to_255B_frames),
  1482. GBENU_STATS_P8(tx_256_to_511B_frames),
  1483. GBENU_STATS_P8(tx_512_to_1023B_frames),
  1484. GBENU_STATS_P8(tx_1024B_frames),
  1485. GBENU_STATS_P8(net_bytes),
  1486. GBENU_STATS_P8(rx_bottom_fifo_drop),
  1487. GBENU_STATS_P8(rx_port_mask_drop),
  1488. GBENU_STATS_P8(rx_top_fifo_drop),
  1489. GBENU_STATS_P8(ale_rate_limit_drop),
  1490. GBENU_STATS_P8(ale_vid_ingress_drop),
  1491. GBENU_STATS_P8(ale_da_eq_sa_drop),
  1492. GBENU_STATS_P8(ale_unknown_ucast),
  1493. GBENU_STATS_P8(ale_unknown_ucast_bytes),
  1494. GBENU_STATS_P8(ale_unknown_mcast),
  1495. GBENU_STATS_P8(ale_unknown_mcast_bytes),
  1496. GBENU_STATS_P8(ale_unknown_bcast),
  1497. GBENU_STATS_P8(ale_unknown_bcast_bytes),
  1498. GBENU_STATS_P8(ale_pol_match),
  1499. GBENU_STATS_P8(ale_pol_match_red),
  1500. GBENU_STATS_P8(ale_pol_match_yellow),
  1501. GBENU_STATS_P8(tx_mem_protect_err),
  1502. GBENU_STATS_P8(tx_pri0_drop),
  1503. GBENU_STATS_P8(tx_pri1_drop),
  1504. GBENU_STATS_P8(tx_pri2_drop),
  1505. GBENU_STATS_P8(tx_pri3_drop),
  1506. GBENU_STATS_P8(tx_pri4_drop),
  1507. GBENU_STATS_P8(tx_pri5_drop),
  1508. GBENU_STATS_P8(tx_pri6_drop),
  1509. GBENU_STATS_P8(tx_pri7_drop),
  1510. GBENU_STATS_P8(tx_pri0_drop_bcnt),
  1511. GBENU_STATS_P8(tx_pri1_drop_bcnt),
  1512. GBENU_STATS_P8(tx_pri2_drop_bcnt),
  1513. GBENU_STATS_P8(tx_pri3_drop_bcnt),
  1514. GBENU_STATS_P8(tx_pri4_drop_bcnt),
  1515. GBENU_STATS_P8(tx_pri5_drop_bcnt),
  1516. GBENU_STATS_P8(tx_pri6_drop_bcnt),
  1517. GBENU_STATS_P8(tx_pri7_drop_bcnt),
  1518. };
  1519. #define XGBE_STATS0_INFO(field) \
  1520. { \
  1521. "GBE_0:"#field, XGBE_STATS0_MODULE, \
  1522. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1523. offsetof(struct xgbe_hw_stats, field) \
  1524. }
  1525. #define XGBE_STATS1_INFO(field) \
  1526. { \
  1527. "GBE_1:"#field, XGBE_STATS1_MODULE, \
  1528. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1529. offsetof(struct xgbe_hw_stats, field) \
  1530. }
  1531. #define XGBE_STATS2_INFO(field) \
  1532. { \
  1533. "GBE_2:"#field, XGBE_STATS2_MODULE, \
  1534. FIELD_SIZEOF(struct xgbe_hw_stats, field), \
  1535. offsetof(struct xgbe_hw_stats, field) \
  1536. }
  1537. static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
  1538. /* GBE module 0 */
  1539. XGBE_STATS0_INFO(rx_good_frames),
  1540. XGBE_STATS0_INFO(rx_broadcast_frames),
  1541. XGBE_STATS0_INFO(rx_multicast_frames),
  1542. XGBE_STATS0_INFO(rx_oversized_frames),
  1543. XGBE_STATS0_INFO(rx_undersized_frames),
  1544. XGBE_STATS0_INFO(overrun_type4),
  1545. XGBE_STATS0_INFO(overrun_type5),
  1546. XGBE_STATS0_INFO(rx_bytes),
  1547. XGBE_STATS0_INFO(tx_good_frames),
  1548. XGBE_STATS0_INFO(tx_broadcast_frames),
  1549. XGBE_STATS0_INFO(tx_multicast_frames),
  1550. XGBE_STATS0_INFO(tx_bytes),
  1551. XGBE_STATS0_INFO(tx_64byte_frames),
  1552. XGBE_STATS0_INFO(tx_65_to_127byte_frames),
  1553. XGBE_STATS0_INFO(tx_128_to_255byte_frames),
  1554. XGBE_STATS0_INFO(tx_256_to_511byte_frames),
  1555. XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
  1556. XGBE_STATS0_INFO(tx_1024byte_frames),
  1557. XGBE_STATS0_INFO(net_bytes),
  1558. XGBE_STATS0_INFO(rx_sof_overruns),
  1559. XGBE_STATS0_INFO(rx_mof_overruns),
  1560. XGBE_STATS0_INFO(rx_dma_overruns),
  1561. /* XGBE module 1 */
  1562. XGBE_STATS1_INFO(rx_good_frames),
  1563. XGBE_STATS1_INFO(rx_broadcast_frames),
  1564. XGBE_STATS1_INFO(rx_multicast_frames),
  1565. XGBE_STATS1_INFO(rx_pause_frames),
  1566. XGBE_STATS1_INFO(rx_crc_errors),
  1567. XGBE_STATS1_INFO(rx_align_code_errors),
  1568. XGBE_STATS1_INFO(rx_oversized_frames),
  1569. XGBE_STATS1_INFO(rx_jabber_frames),
  1570. XGBE_STATS1_INFO(rx_undersized_frames),
  1571. XGBE_STATS1_INFO(rx_fragments),
  1572. XGBE_STATS1_INFO(overrun_type4),
  1573. XGBE_STATS1_INFO(overrun_type5),
  1574. XGBE_STATS1_INFO(rx_bytes),
  1575. XGBE_STATS1_INFO(tx_good_frames),
  1576. XGBE_STATS1_INFO(tx_broadcast_frames),
  1577. XGBE_STATS1_INFO(tx_multicast_frames),
  1578. XGBE_STATS1_INFO(tx_pause_frames),
  1579. XGBE_STATS1_INFO(tx_deferred_frames),
  1580. XGBE_STATS1_INFO(tx_collision_frames),
  1581. XGBE_STATS1_INFO(tx_single_coll_frames),
  1582. XGBE_STATS1_INFO(tx_mult_coll_frames),
  1583. XGBE_STATS1_INFO(tx_excessive_collisions),
  1584. XGBE_STATS1_INFO(tx_late_collisions),
  1585. XGBE_STATS1_INFO(tx_underrun),
  1586. XGBE_STATS1_INFO(tx_carrier_sense_errors),
  1587. XGBE_STATS1_INFO(tx_bytes),
  1588. XGBE_STATS1_INFO(tx_64byte_frames),
  1589. XGBE_STATS1_INFO(tx_65_to_127byte_frames),
  1590. XGBE_STATS1_INFO(tx_128_to_255byte_frames),
  1591. XGBE_STATS1_INFO(tx_256_to_511byte_frames),
  1592. XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
  1593. XGBE_STATS1_INFO(tx_1024byte_frames),
  1594. XGBE_STATS1_INFO(net_bytes),
  1595. XGBE_STATS1_INFO(rx_sof_overruns),
  1596. XGBE_STATS1_INFO(rx_mof_overruns),
  1597. XGBE_STATS1_INFO(rx_dma_overruns),
  1598. /* XGBE module 2 */
  1599. XGBE_STATS2_INFO(rx_good_frames),
  1600. XGBE_STATS2_INFO(rx_broadcast_frames),
  1601. XGBE_STATS2_INFO(rx_multicast_frames),
  1602. XGBE_STATS2_INFO(rx_pause_frames),
  1603. XGBE_STATS2_INFO(rx_crc_errors),
  1604. XGBE_STATS2_INFO(rx_align_code_errors),
  1605. XGBE_STATS2_INFO(rx_oversized_frames),
  1606. XGBE_STATS2_INFO(rx_jabber_frames),
  1607. XGBE_STATS2_INFO(rx_undersized_frames),
  1608. XGBE_STATS2_INFO(rx_fragments),
  1609. XGBE_STATS2_INFO(overrun_type4),
  1610. XGBE_STATS2_INFO(overrun_type5),
  1611. XGBE_STATS2_INFO(rx_bytes),
  1612. XGBE_STATS2_INFO(tx_good_frames),
  1613. XGBE_STATS2_INFO(tx_broadcast_frames),
  1614. XGBE_STATS2_INFO(tx_multicast_frames),
  1615. XGBE_STATS2_INFO(tx_pause_frames),
  1616. XGBE_STATS2_INFO(tx_deferred_frames),
  1617. XGBE_STATS2_INFO(tx_collision_frames),
  1618. XGBE_STATS2_INFO(tx_single_coll_frames),
  1619. XGBE_STATS2_INFO(tx_mult_coll_frames),
  1620. XGBE_STATS2_INFO(tx_excessive_collisions),
  1621. XGBE_STATS2_INFO(tx_late_collisions),
  1622. XGBE_STATS2_INFO(tx_underrun),
  1623. XGBE_STATS2_INFO(tx_carrier_sense_errors),
  1624. XGBE_STATS2_INFO(tx_bytes),
  1625. XGBE_STATS2_INFO(tx_64byte_frames),
  1626. XGBE_STATS2_INFO(tx_65_to_127byte_frames),
  1627. XGBE_STATS2_INFO(tx_128_to_255byte_frames),
  1628. XGBE_STATS2_INFO(tx_256_to_511byte_frames),
  1629. XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
  1630. XGBE_STATS2_INFO(tx_1024byte_frames),
  1631. XGBE_STATS2_INFO(net_bytes),
  1632. XGBE_STATS2_INFO(rx_sof_overruns),
  1633. XGBE_STATS2_INFO(rx_mof_overruns),
  1634. XGBE_STATS2_INFO(rx_dma_overruns),
  1635. };
  1636. #define for_each_intf(i, priv) \
  1637. list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
  1638. #define for_each_sec_slave(slave, priv) \
  1639. list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
  1640. #define first_sec_slave(priv) \
  1641. list_first_entry(&priv->secondary_slaves, \
  1642. struct gbe_slave, slave_list)
  1643. static void keystone_get_drvinfo(struct net_device *ndev,
  1644. struct ethtool_drvinfo *info)
  1645. {
  1646. strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
  1647. strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
  1648. }
  1649. static u32 keystone_get_msglevel(struct net_device *ndev)
  1650. {
  1651. struct netcp_intf *netcp = netdev_priv(ndev);
  1652. return netcp->msg_enable;
  1653. }
  1654. static void keystone_set_msglevel(struct net_device *ndev, u32 value)
  1655. {
  1656. struct netcp_intf *netcp = netdev_priv(ndev);
  1657. netcp->msg_enable = value;
  1658. }
  1659. static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
  1660. {
  1661. struct gbe_intf *gbe_intf;
  1662. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1663. if (!gbe_intf)
  1664. gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
  1665. return gbe_intf;
  1666. }
  1667. static void keystone_get_stat_strings(struct net_device *ndev,
  1668. uint32_t stringset, uint8_t *data)
  1669. {
  1670. struct netcp_intf *netcp = netdev_priv(ndev);
  1671. struct gbe_intf *gbe_intf;
  1672. struct gbe_priv *gbe_dev;
  1673. int i;
  1674. gbe_intf = keystone_get_intf_data(netcp);
  1675. if (!gbe_intf)
  1676. return;
  1677. gbe_dev = gbe_intf->gbe_dev;
  1678. switch (stringset) {
  1679. case ETH_SS_STATS:
  1680. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1681. memcpy(data, gbe_dev->et_stats[i].desc,
  1682. ETH_GSTRING_LEN);
  1683. data += ETH_GSTRING_LEN;
  1684. }
  1685. break;
  1686. case ETH_SS_TEST:
  1687. break;
  1688. }
  1689. }
  1690. static int keystone_get_sset_count(struct net_device *ndev, int stringset)
  1691. {
  1692. struct netcp_intf *netcp = netdev_priv(ndev);
  1693. struct gbe_intf *gbe_intf;
  1694. struct gbe_priv *gbe_dev;
  1695. gbe_intf = keystone_get_intf_data(netcp);
  1696. if (!gbe_intf)
  1697. return -EINVAL;
  1698. gbe_dev = gbe_intf->gbe_dev;
  1699. switch (stringset) {
  1700. case ETH_SS_TEST:
  1701. return 0;
  1702. case ETH_SS_STATS:
  1703. return gbe_dev->num_et_stats;
  1704. default:
  1705. return -EINVAL;
  1706. }
  1707. }
  1708. static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
  1709. {
  1710. void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
  1711. u32 __iomem *p_stats_entry;
  1712. int i;
  1713. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1714. if (gbe_dev->et_stats[i].type == stats_mod) {
  1715. p_stats_entry = base + gbe_dev->et_stats[i].offset;
  1716. gbe_dev->hw_stats[i] = 0;
  1717. gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
  1718. }
  1719. }
  1720. }
  1721. static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
  1722. int et_stats_entry)
  1723. {
  1724. void __iomem *base = NULL;
  1725. u32 __iomem *p_stats_entry;
  1726. u32 curr, delta;
  1727. /* The hw_stats_regs pointers are already
  1728. * properly set to point to the right base:
  1729. */
  1730. base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
  1731. p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
  1732. curr = readl(p_stats_entry);
  1733. delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
  1734. gbe_dev->hw_stats_prev[et_stats_entry] = curr;
  1735. gbe_dev->hw_stats[et_stats_entry] += delta;
  1736. }
  1737. static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
  1738. {
  1739. int i;
  1740. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1741. gbe_update_hw_stats_entry(gbe_dev, i);
  1742. if (data)
  1743. data[i] = gbe_dev->hw_stats[i];
  1744. }
  1745. }
  1746. static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
  1747. int stats_mod)
  1748. {
  1749. u32 val;
  1750. val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1751. switch (stats_mod) {
  1752. case GBE_STATSA_MODULE:
  1753. case GBE_STATSB_MODULE:
  1754. val &= ~GBE_STATS_CD_SEL;
  1755. break;
  1756. case GBE_STATSC_MODULE:
  1757. case GBE_STATSD_MODULE:
  1758. val |= GBE_STATS_CD_SEL;
  1759. break;
  1760. default:
  1761. return;
  1762. }
  1763. /* make the stat module visible */
  1764. writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1765. }
  1766. static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
  1767. {
  1768. gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
  1769. gbe_reset_mod_stats(gbe_dev, stats_mod);
  1770. }
  1771. static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
  1772. {
  1773. u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
  1774. int et_entry, j, pair;
  1775. for (pair = 0; pair < 2; pair++) {
  1776. gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
  1777. GBE_STATSC_MODULE :
  1778. GBE_STATSA_MODULE));
  1779. for (j = 0; j < half_num_et_stats; j++) {
  1780. et_entry = pair * half_num_et_stats + j;
  1781. gbe_update_hw_stats_entry(gbe_dev, et_entry);
  1782. if (data)
  1783. data[et_entry] = gbe_dev->hw_stats[et_entry];
  1784. }
  1785. }
  1786. }
  1787. static void keystone_get_ethtool_stats(struct net_device *ndev,
  1788. struct ethtool_stats *stats,
  1789. uint64_t *data)
  1790. {
  1791. struct netcp_intf *netcp = netdev_priv(ndev);
  1792. struct gbe_intf *gbe_intf;
  1793. struct gbe_priv *gbe_dev;
  1794. gbe_intf = keystone_get_intf_data(netcp);
  1795. if (!gbe_intf)
  1796. return;
  1797. gbe_dev = gbe_intf->gbe_dev;
  1798. spin_lock_bh(&gbe_dev->hw_stats_lock);
  1799. if (IS_SS_ID_VER_14(gbe_dev))
  1800. gbe_update_stats_ver14(gbe_dev, data);
  1801. else
  1802. gbe_update_stats(gbe_dev, data);
  1803. spin_unlock_bh(&gbe_dev->hw_stats_lock);
  1804. }
  1805. static int keystone_get_link_ksettings(struct net_device *ndev,
  1806. struct ethtool_link_ksettings *cmd)
  1807. {
  1808. struct netcp_intf *netcp = netdev_priv(ndev);
  1809. struct phy_device *phy = ndev->phydev;
  1810. struct gbe_intf *gbe_intf;
  1811. if (!phy)
  1812. return -EINVAL;
  1813. gbe_intf = keystone_get_intf_data(netcp);
  1814. if (!gbe_intf)
  1815. return -EINVAL;
  1816. if (!gbe_intf->slave)
  1817. return -EINVAL;
  1818. phy_ethtool_ksettings_get(phy, cmd);
  1819. cmd->base.port = gbe_intf->slave->phy_port_t;
  1820. return 0;
  1821. }
  1822. static int keystone_set_link_ksettings(struct net_device *ndev,
  1823. const struct ethtool_link_ksettings *cmd)
  1824. {
  1825. struct netcp_intf *netcp = netdev_priv(ndev);
  1826. struct phy_device *phy = ndev->phydev;
  1827. struct gbe_intf *gbe_intf;
  1828. u8 port = cmd->base.port;
  1829. u32 advertising, supported;
  1830. u32 features;
  1831. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  1832. cmd->link_modes.advertising);
  1833. ethtool_convert_link_mode_to_legacy_u32(&supported,
  1834. cmd->link_modes.supported);
  1835. features = advertising & supported;
  1836. if (!phy)
  1837. return -EINVAL;
  1838. gbe_intf = keystone_get_intf_data(netcp);
  1839. if (!gbe_intf)
  1840. return -EINVAL;
  1841. if (!gbe_intf->slave)
  1842. return -EINVAL;
  1843. if (port != gbe_intf->slave->phy_port_t) {
  1844. if ((port == PORT_TP) && !(features & ADVERTISED_TP))
  1845. return -EINVAL;
  1846. if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
  1847. return -EINVAL;
  1848. if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
  1849. return -EINVAL;
  1850. if ((port == PORT_MII) && !(features & ADVERTISED_MII))
  1851. return -EINVAL;
  1852. if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
  1853. return -EINVAL;
  1854. }
  1855. gbe_intf->slave->phy_port_t = port;
  1856. return phy_ethtool_ksettings_set(phy, cmd);
  1857. }
  1858. #if IS_ENABLED(CONFIG_TI_CPTS)
  1859. static int keystone_get_ts_info(struct net_device *ndev,
  1860. struct ethtool_ts_info *info)
  1861. {
  1862. struct netcp_intf *netcp = netdev_priv(ndev);
  1863. struct gbe_intf *gbe_intf;
  1864. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1865. if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
  1866. return -EINVAL;
  1867. info->so_timestamping =
  1868. SOF_TIMESTAMPING_TX_HARDWARE |
  1869. SOF_TIMESTAMPING_TX_SOFTWARE |
  1870. SOF_TIMESTAMPING_RX_HARDWARE |
  1871. SOF_TIMESTAMPING_RX_SOFTWARE |
  1872. SOF_TIMESTAMPING_SOFTWARE |
  1873. SOF_TIMESTAMPING_RAW_HARDWARE;
  1874. info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
  1875. info->tx_types =
  1876. (1 << HWTSTAMP_TX_OFF) |
  1877. (1 << HWTSTAMP_TX_ON);
  1878. info->rx_filters =
  1879. (1 << HWTSTAMP_FILTER_NONE) |
  1880. (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
  1881. (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
  1882. return 0;
  1883. }
  1884. #else
  1885. static int keystone_get_ts_info(struct net_device *ndev,
  1886. struct ethtool_ts_info *info)
  1887. {
  1888. info->so_timestamping =
  1889. SOF_TIMESTAMPING_TX_SOFTWARE |
  1890. SOF_TIMESTAMPING_RX_SOFTWARE |
  1891. SOF_TIMESTAMPING_SOFTWARE;
  1892. info->phc_index = -1;
  1893. info->tx_types = 0;
  1894. info->rx_filters = 0;
  1895. return 0;
  1896. }
  1897. #endif /* CONFIG_TI_CPTS */
  1898. static const struct ethtool_ops keystone_ethtool_ops = {
  1899. .get_drvinfo = keystone_get_drvinfo,
  1900. .get_link = ethtool_op_get_link,
  1901. .get_msglevel = keystone_get_msglevel,
  1902. .set_msglevel = keystone_set_msglevel,
  1903. .get_strings = keystone_get_stat_strings,
  1904. .get_sset_count = keystone_get_sset_count,
  1905. .get_ethtool_stats = keystone_get_ethtool_stats,
  1906. .get_link_ksettings = keystone_get_link_ksettings,
  1907. .set_link_ksettings = keystone_set_link_ksettings,
  1908. .get_ts_info = keystone_get_ts_info,
  1909. };
  1910. static void gbe_set_slave_mac(struct gbe_slave *slave,
  1911. struct gbe_intf *gbe_intf)
  1912. {
  1913. struct net_device *ndev = gbe_intf->ndev;
  1914. writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
  1915. writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
  1916. }
  1917. static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
  1918. {
  1919. if (priv->host_port == 0)
  1920. return slave_num + 1;
  1921. return slave_num;
  1922. }
  1923. static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
  1924. struct net_device *ndev,
  1925. struct gbe_slave *slave,
  1926. int up)
  1927. {
  1928. struct phy_device *phy = slave->phy;
  1929. u32 mac_control = 0;
  1930. if (up) {
  1931. mac_control = slave->mac_control;
  1932. if (phy && (phy->speed == SPEED_1000)) {
  1933. mac_control |= MACSL_GIG_MODE;
  1934. mac_control &= ~MACSL_XGIG_MODE;
  1935. } else if (phy && (phy->speed == SPEED_10000)) {
  1936. mac_control |= MACSL_XGIG_MODE;
  1937. mac_control &= ~MACSL_GIG_MODE;
  1938. }
  1939. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1940. mac_control));
  1941. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1942. ALE_PORT_STATE,
  1943. ALE_PORT_STATE_FORWARD);
  1944. if (ndev && slave->open &&
  1945. ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
  1946. (slave->link_interface != RGMII_LINK_MAC_PHY) &&
  1947. (slave->link_interface != XGMII_LINK_MAC_PHY)))
  1948. netif_carrier_on(ndev);
  1949. } else {
  1950. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1951. mac_control));
  1952. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1953. ALE_PORT_STATE,
  1954. ALE_PORT_STATE_DISABLE);
  1955. if (ndev &&
  1956. ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
  1957. (slave->link_interface != RGMII_LINK_MAC_PHY) &&
  1958. (slave->link_interface != XGMII_LINK_MAC_PHY)))
  1959. netif_carrier_off(ndev);
  1960. }
  1961. if (phy)
  1962. phy_print_status(phy);
  1963. }
  1964. static bool gbe_phy_link_status(struct gbe_slave *slave)
  1965. {
  1966. return !slave->phy || slave->phy->link;
  1967. }
  1968. #define RGMII_REG_STATUS_LINK BIT(0)
  1969. static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
  1970. {
  1971. u32 val = 0;
  1972. val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
  1973. *status = !!(val & RGMII_REG_STATUS_LINK);
  1974. }
  1975. static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
  1976. struct gbe_slave *slave,
  1977. struct net_device *ndev)
  1978. {
  1979. bool sw_link_state = true, phy_link_state;
  1980. int sp = slave->slave_num, link_state;
  1981. if (!slave->open)
  1982. return;
  1983. if (SLAVE_LINK_IS_RGMII(slave))
  1984. netcp_2u_rgmii_get_port_link(gbe_dev,
  1985. &sw_link_state);
  1986. if (SLAVE_LINK_IS_SGMII(slave))
  1987. sw_link_state =
  1988. netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
  1989. phy_link_state = gbe_phy_link_status(slave);
  1990. link_state = phy_link_state & sw_link_state;
  1991. if (atomic_xchg(&slave->link_state, link_state) != link_state)
  1992. netcp_ethss_link_state_action(gbe_dev, ndev, slave,
  1993. link_state);
  1994. }
  1995. static void xgbe_adjust_link(struct net_device *ndev)
  1996. {
  1997. struct netcp_intf *netcp = netdev_priv(ndev);
  1998. struct gbe_intf *gbe_intf;
  1999. gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
  2000. if (!gbe_intf)
  2001. return;
  2002. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  2003. ndev);
  2004. }
  2005. static void gbe_adjust_link(struct net_device *ndev)
  2006. {
  2007. struct netcp_intf *netcp = netdev_priv(ndev);
  2008. struct gbe_intf *gbe_intf;
  2009. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  2010. if (!gbe_intf)
  2011. return;
  2012. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  2013. ndev);
  2014. }
  2015. static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
  2016. {
  2017. struct gbe_priv *gbe_dev = netdev_priv(ndev);
  2018. struct gbe_slave *slave;
  2019. for_each_sec_slave(slave, gbe_dev)
  2020. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  2021. }
  2022. /* Reset EMAC
  2023. * Soft reset is set and polled until clear, or until a timeout occurs
  2024. */
  2025. static int gbe_port_reset(struct gbe_slave *slave)
  2026. {
  2027. u32 i, v;
  2028. /* Set the soft reset bit */
  2029. writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
  2030. /* Wait for the bit to clear */
  2031. for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
  2032. v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
  2033. if ((v & SOFT_RESET_MASK) != SOFT_RESET)
  2034. return 0;
  2035. }
  2036. /* Timeout on the reset */
  2037. return GMACSL_RET_WARN_RESET_INCOMPLETE;
  2038. }
  2039. /* Configure EMAC */
  2040. static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  2041. int max_rx_len)
  2042. {
  2043. void __iomem *rx_maxlen_reg;
  2044. u32 xgmii_mode;
  2045. if (max_rx_len > NETCP_MAX_FRAME_SIZE)
  2046. max_rx_len = NETCP_MAX_FRAME_SIZE;
  2047. /* Enable correct MII mode at SS level */
  2048. if (IS_SS_ID_XGBE(gbe_dev) &&
  2049. (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
  2050. xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
  2051. xgmii_mode |= (1 << slave->slave_num);
  2052. writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
  2053. }
  2054. if (IS_SS_ID_MU(gbe_dev))
  2055. rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
  2056. else
  2057. rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
  2058. writel(max_rx_len, rx_maxlen_reg);
  2059. writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
  2060. }
  2061. static void gbe_sgmii_rtreset(struct gbe_priv *priv,
  2062. struct gbe_slave *slave, bool set)
  2063. {
  2064. if (SLAVE_LINK_IS_XGMII(slave))
  2065. return;
  2066. netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
  2067. slave->slave_num, set);
  2068. }
  2069. static void gbe_slave_stop(struct gbe_intf *intf)
  2070. {
  2071. struct gbe_priv *gbe_dev = intf->gbe_dev;
  2072. struct gbe_slave *slave = intf->slave;
  2073. if (!IS_SS_ID_2U(gbe_dev))
  2074. gbe_sgmii_rtreset(gbe_dev, slave, true);
  2075. gbe_port_reset(slave);
  2076. /* Disable forwarding */
  2077. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  2078. ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
  2079. cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
  2080. 1 << slave->port_num, 0, 0);
  2081. if (!slave->phy)
  2082. return;
  2083. phy_stop(slave->phy);
  2084. phy_disconnect(slave->phy);
  2085. slave->phy = NULL;
  2086. }
  2087. static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
  2088. {
  2089. if (SLAVE_LINK_IS_XGMII(slave))
  2090. return;
  2091. netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
  2092. netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
  2093. slave->link_interface);
  2094. }
  2095. static int gbe_slave_open(struct gbe_intf *gbe_intf)
  2096. {
  2097. struct gbe_priv *priv = gbe_intf->gbe_dev;
  2098. struct gbe_slave *slave = gbe_intf->slave;
  2099. phy_interface_t phy_mode;
  2100. bool has_phy = false;
  2101. void (*hndlr)(struct net_device *) = gbe_adjust_link;
  2102. if (!IS_SS_ID_2U(priv))
  2103. gbe_sgmii_config(priv, slave);
  2104. gbe_port_reset(slave);
  2105. if (!IS_SS_ID_2U(priv))
  2106. gbe_sgmii_rtreset(priv, slave, false);
  2107. gbe_port_config(priv, slave, priv->rx_packet_max);
  2108. gbe_set_slave_mac(slave, gbe_intf);
  2109. /* For NU & 2U switch, map the vlan priorities to zero
  2110. * as we only configure to use priority 0
  2111. */
  2112. if (IS_SS_ID_MU(priv))
  2113. writel(HOST_TX_PRI_MAP_DEFAULT,
  2114. GBE_REG_ADDR(slave, port_regs, rx_pri_map));
  2115. /* enable forwarding */
  2116. cpsw_ale_control_set(priv->ale, slave->port_num,
  2117. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  2118. cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
  2119. 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
  2120. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  2121. has_phy = true;
  2122. phy_mode = PHY_INTERFACE_MODE_SGMII;
  2123. slave->phy_port_t = PORT_MII;
  2124. } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
  2125. has_phy = true;
  2126. phy_mode = of_get_phy_mode(slave->node);
  2127. /* if phy-mode is not present, default to
  2128. * PHY_INTERFACE_MODE_RGMII
  2129. */
  2130. if (phy_mode < 0)
  2131. phy_mode = PHY_INTERFACE_MODE_RGMII;
  2132. if (!phy_interface_mode_is_rgmii(phy_mode)) {
  2133. dev_err(priv->dev,
  2134. "Unsupported phy mode %d\n", phy_mode);
  2135. return -EINVAL;
  2136. }
  2137. slave->phy_port_t = PORT_MII;
  2138. } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
  2139. has_phy = true;
  2140. phy_mode = PHY_INTERFACE_MODE_NA;
  2141. slave->phy_port_t = PORT_FIBRE;
  2142. }
  2143. if (has_phy) {
  2144. if (IS_SS_ID_XGBE(priv))
  2145. hndlr = xgbe_adjust_link;
  2146. slave->phy = of_phy_connect(gbe_intf->ndev,
  2147. slave->phy_node,
  2148. hndlr, 0,
  2149. phy_mode);
  2150. if (!slave->phy) {
  2151. dev_err(priv->dev, "phy not found on slave %d\n",
  2152. slave->slave_num);
  2153. return -ENODEV;
  2154. }
  2155. dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
  2156. phydev_name(slave->phy));
  2157. phy_start(slave->phy);
  2158. }
  2159. return 0;
  2160. }
  2161. static void gbe_init_host_port(struct gbe_priv *priv)
  2162. {
  2163. int bypass_en = 1;
  2164. /* Host Tx Pri */
  2165. if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
  2166. writel(HOST_TX_PRI_MAP_DEFAULT,
  2167. GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
  2168. /* Max length register */
  2169. writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
  2170. rx_maxlen));
  2171. cpsw_ale_start(priv->ale);
  2172. if (priv->enable_ale)
  2173. bypass_en = 0;
  2174. cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
  2175. cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
  2176. cpsw_ale_control_set(priv->ale, priv->host_port,
  2177. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  2178. cpsw_ale_control_set(priv->ale, 0,
  2179. ALE_PORT_UNKNOWN_VLAN_MEMBER,
  2180. GBE_PORT_MASK(priv->ale_ports));
  2181. cpsw_ale_control_set(priv->ale, 0,
  2182. ALE_PORT_UNKNOWN_MCAST_FLOOD,
  2183. GBE_PORT_MASK(priv->ale_ports - 1));
  2184. cpsw_ale_control_set(priv->ale, 0,
  2185. ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
  2186. GBE_PORT_MASK(priv->ale_ports));
  2187. cpsw_ale_control_set(priv->ale, 0,
  2188. ALE_PORT_UNTAGGED_EGRESS,
  2189. GBE_PORT_MASK(priv->ale_ports));
  2190. }
  2191. static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2192. {
  2193. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2194. u16 vlan_id;
  2195. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  2196. GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
  2197. ALE_MCAST_FWD_2);
  2198. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  2199. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  2200. GBE_PORT_MASK(gbe_dev->ale_ports),
  2201. ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
  2202. }
  2203. }
  2204. static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2205. {
  2206. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2207. u16 vlan_id;
  2208. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  2209. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
  2210. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  2211. ALE_VLAN, vlan_id);
  2212. }
  2213. static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2214. {
  2215. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2216. u16 vlan_id;
  2217. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
  2218. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  2219. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
  2220. }
  2221. }
  2222. static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2223. {
  2224. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2225. u16 vlan_id;
  2226. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  2227. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  2228. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  2229. ALE_VLAN, vlan_id);
  2230. }
  2231. }
  2232. static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
  2233. {
  2234. struct gbe_intf *gbe_intf = intf_priv;
  2235. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2236. dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
  2237. naddr->addr, naddr->type);
  2238. switch (naddr->type) {
  2239. case ADDR_MCAST:
  2240. case ADDR_BCAST:
  2241. gbe_add_mcast_addr(gbe_intf, naddr->addr);
  2242. break;
  2243. case ADDR_UCAST:
  2244. case ADDR_DEV:
  2245. gbe_add_ucast_addr(gbe_intf, naddr->addr);
  2246. break;
  2247. case ADDR_ANY:
  2248. /* nothing to do for promiscuous */
  2249. default:
  2250. break;
  2251. }
  2252. return 0;
  2253. }
  2254. static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
  2255. {
  2256. struct gbe_intf *gbe_intf = intf_priv;
  2257. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2258. dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
  2259. naddr->addr, naddr->type);
  2260. switch (naddr->type) {
  2261. case ADDR_MCAST:
  2262. case ADDR_BCAST:
  2263. gbe_del_mcast_addr(gbe_intf, naddr->addr);
  2264. break;
  2265. case ADDR_UCAST:
  2266. case ADDR_DEV:
  2267. gbe_del_ucast_addr(gbe_intf, naddr->addr);
  2268. break;
  2269. case ADDR_ANY:
  2270. /* nothing to do for promiscuous */
  2271. default:
  2272. break;
  2273. }
  2274. return 0;
  2275. }
  2276. static int gbe_add_vid(void *intf_priv, int vid)
  2277. {
  2278. struct gbe_intf *gbe_intf = intf_priv;
  2279. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2280. set_bit(vid, gbe_intf->active_vlans);
  2281. cpsw_ale_add_vlan(gbe_dev->ale, vid,
  2282. GBE_PORT_MASK(gbe_dev->ale_ports),
  2283. GBE_MASK_NO_PORTS,
  2284. GBE_PORT_MASK(gbe_dev->ale_ports),
  2285. GBE_PORT_MASK(gbe_dev->ale_ports - 1));
  2286. return 0;
  2287. }
  2288. static int gbe_del_vid(void *intf_priv, int vid)
  2289. {
  2290. struct gbe_intf *gbe_intf = intf_priv;
  2291. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2292. cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
  2293. clear_bit(vid, gbe_intf->active_vlans);
  2294. return 0;
  2295. }
  2296. #if IS_ENABLED(CONFIG_TI_CPTS)
  2297. #define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
  2298. #define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
  2299. static void gbe_txtstamp(void *context, struct sk_buff *skb)
  2300. {
  2301. struct gbe_intf *gbe_intf = context;
  2302. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2303. cpts_tx_timestamp(gbe_dev->cpts, skb);
  2304. }
  2305. static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
  2306. const struct netcp_packet *p_info)
  2307. {
  2308. struct sk_buff *skb = p_info->skb;
  2309. return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
  2310. }
  2311. static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
  2312. struct netcp_packet *p_info)
  2313. {
  2314. struct phy_device *phydev = p_info->skb->dev->phydev;
  2315. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2316. if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
  2317. !cpts_is_tx_enabled(gbe_dev->cpts))
  2318. return 0;
  2319. /* If phy has the txtstamp api, assume it will do it.
  2320. * We mark it here because skb_tx_timestamp() is called
  2321. * after all the txhooks are called.
  2322. */
  2323. if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
  2324. skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2325. return 0;
  2326. }
  2327. if (gbe_need_txtstamp(gbe_intf, p_info)) {
  2328. p_info->txtstamp = gbe_txtstamp;
  2329. p_info->ts_context = (void *)gbe_intf;
  2330. skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2331. }
  2332. return 0;
  2333. }
  2334. static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
  2335. {
  2336. struct phy_device *phydev = p_info->skb->dev->phydev;
  2337. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2338. if (p_info->rxtstamp_complete)
  2339. return 0;
  2340. if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
  2341. p_info->rxtstamp_complete = true;
  2342. return 0;
  2343. }
  2344. cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
  2345. p_info->rxtstamp_complete = true;
  2346. return 0;
  2347. }
  2348. static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
  2349. {
  2350. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2351. struct cpts *cpts = gbe_dev->cpts;
  2352. struct hwtstamp_config cfg;
  2353. if (!cpts)
  2354. return -EOPNOTSUPP;
  2355. cfg.flags = 0;
  2356. cfg.tx_type = cpts_is_tx_enabled(cpts) ?
  2357. HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
  2358. cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
  2359. cpts->rx_enable : HWTSTAMP_FILTER_NONE);
  2360. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  2361. }
  2362. static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
  2363. {
  2364. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2365. struct gbe_slave *slave = gbe_intf->slave;
  2366. u32 ts_en, seq_id, ctl;
  2367. if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
  2368. !cpts_is_tx_enabled(gbe_dev->cpts)) {
  2369. writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
  2370. return;
  2371. }
  2372. seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
  2373. ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
  2374. ctl = ETH_P_1588 | TS_TTL_NONZERO |
  2375. (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
  2376. (slave->ts_ctl.uni ? TS_UNI_EN :
  2377. slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
  2378. if (cpts_is_tx_enabled(gbe_dev->cpts))
  2379. ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
  2380. if (cpts_is_rx_enabled(gbe_dev->cpts))
  2381. ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
  2382. writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
  2383. writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
  2384. writel(ctl, GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
  2385. }
  2386. static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
  2387. {
  2388. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2389. struct cpts *cpts = gbe_dev->cpts;
  2390. struct hwtstamp_config cfg;
  2391. if (!cpts)
  2392. return -EOPNOTSUPP;
  2393. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  2394. return -EFAULT;
  2395. /* reserved for future extensions */
  2396. if (cfg.flags)
  2397. return -EINVAL;
  2398. switch (cfg.tx_type) {
  2399. case HWTSTAMP_TX_OFF:
  2400. cpts_tx_enable(cpts, 0);
  2401. break;
  2402. case HWTSTAMP_TX_ON:
  2403. cpts_tx_enable(cpts, 1);
  2404. break;
  2405. default:
  2406. return -ERANGE;
  2407. }
  2408. switch (cfg.rx_filter) {
  2409. case HWTSTAMP_FILTER_NONE:
  2410. cpts_rx_enable(cpts, 0);
  2411. break;
  2412. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  2413. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  2414. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  2415. cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
  2416. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
  2417. break;
  2418. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  2419. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  2420. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  2421. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  2422. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  2423. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  2424. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  2425. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  2426. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  2427. cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
  2428. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
  2429. break;
  2430. default:
  2431. return -ERANGE;
  2432. }
  2433. gbe_hwtstamp(gbe_intf);
  2434. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  2435. }
  2436. static void gbe_register_cpts(struct gbe_priv *gbe_dev)
  2437. {
  2438. if (!gbe_dev->cpts)
  2439. return;
  2440. if (gbe_dev->cpts_registered > 0)
  2441. goto done;
  2442. if (cpts_register(gbe_dev->cpts)) {
  2443. dev_err(gbe_dev->dev, "error registering cpts device\n");
  2444. return;
  2445. }
  2446. done:
  2447. ++gbe_dev->cpts_registered;
  2448. }
  2449. static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
  2450. {
  2451. if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
  2452. return;
  2453. if (--gbe_dev->cpts_registered)
  2454. return;
  2455. cpts_unregister(gbe_dev->cpts);
  2456. }
  2457. #else
  2458. static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
  2459. struct netcp_packet *p_info)
  2460. {
  2461. return 0;
  2462. }
  2463. static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
  2464. struct netcp_packet *p_info)
  2465. {
  2466. return 0;
  2467. }
  2468. static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
  2469. struct ifreq *ifr, int cmd)
  2470. {
  2471. return -EOPNOTSUPP;
  2472. }
  2473. static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
  2474. {
  2475. }
  2476. static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
  2477. {
  2478. }
  2479. static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
  2480. {
  2481. return -EOPNOTSUPP;
  2482. }
  2483. static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
  2484. {
  2485. return -EOPNOTSUPP;
  2486. }
  2487. #endif /* CONFIG_TI_CPTS */
  2488. static int gbe_set_rx_mode(void *intf_priv, bool promisc)
  2489. {
  2490. struct gbe_intf *gbe_intf = intf_priv;
  2491. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2492. struct cpsw_ale *ale = gbe_dev->ale;
  2493. unsigned long timeout;
  2494. int i, ret = -ETIMEDOUT;
  2495. /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
  2496. * slaves are port 1 and up
  2497. */
  2498. for (i = 0; i <= gbe_dev->num_slaves; i++) {
  2499. cpsw_ale_control_set(ale, i,
  2500. ALE_PORT_NOLEARN, !!promisc);
  2501. cpsw_ale_control_set(ale, i,
  2502. ALE_PORT_NO_SA_UPDATE, !!promisc);
  2503. }
  2504. if (!promisc) {
  2505. /* Don't Flood All Unicast Packets to Host port */
  2506. cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
  2507. dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
  2508. return 0;
  2509. }
  2510. timeout = jiffies + HZ;
  2511. /* Clear All Untouched entries */
  2512. cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
  2513. do {
  2514. cpu_relax();
  2515. if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
  2516. ret = 0;
  2517. break;
  2518. }
  2519. } while (time_after(timeout, jiffies));
  2520. /* Make sure it is not a false timeout */
  2521. if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
  2522. return ret;
  2523. cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
  2524. /* Clear all mcast from ALE */
  2525. cpsw_ale_flush_multicast(ale,
  2526. GBE_PORT_MASK(gbe_dev->ale_ports),
  2527. -1);
  2528. /* Flood All Unicast Packets to Host port */
  2529. cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
  2530. dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
  2531. return ret;
  2532. }
  2533. static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
  2534. {
  2535. struct gbe_intf *gbe_intf = intf_priv;
  2536. struct phy_device *phy = gbe_intf->slave->phy;
  2537. if (!phy || !phy->drv->hwtstamp) {
  2538. switch (cmd) {
  2539. case SIOCGHWTSTAMP:
  2540. return gbe_hwtstamp_get(gbe_intf, req);
  2541. case SIOCSHWTSTAMP:
  2542. return gbe_hwtstamp_set(gbe_intf, req);
  2543. }
  2544. }
  2545. if (phy)
  2546. return phy_mii_ioctl(phy, req, cmd);
  2547. return -EOPNOTSUPP;
  2548. }
  2549. static void netcp_ethss_timer(struct timer_list *t)
  2550. {
  2551. struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
  2552. struct gbe_intf *gbe_intf;
  2553. struct gbe_slave *slave;
  2554. /* Check & update SGMII link state of interfaces */
  2555. for_each_intf(gbe_intf, gbe_dev) {
  2556. if (!gbe_intf->slave->open)
  2557. continue;
  2558. netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
  2559. gbe_intf->ndev);
  2560. }
  2561. /* Check & update SGMII link state of secondary ports */
  2562. for_each_sec_slave(slave, gbe_dev) {
  2563. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  2564. }
  2565. /* A timer runs as a BH, no need to block them */
  2566. spin_lock(&gbe_dev->hw_stats_lock);
  2567. if (IS_SS_ID_VER_14(gbe_dev))
  2568. gbe_update_stats_ver14(gbe_dev, NULL);
  2569. else
  2570. gbe_update_stats(gbe_dev, NULL);
  2571. spin_unlock(&gbe_dev->hw_stats_lock);
  2572. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  2573. add_timer(&gbe_dev->timer);
  2574. }
  2575. static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
  2576. {
  2577. struct gbe_intf *gbe_intf = data;
  2578. p_info->tx_pipe = &gbe_intf->tx_pipe;
  2579. return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
  2580. }
  2581. static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
  2582. {
  2583. struct gbe_intf *gbe_intf = data;
  2584. return gbe_rxtstamp(gbe_intf, p_info);
  2585. }
  2586. static int gbe_open(void *intf_priv, struct net_device *ndev)
  2587. {
  2588. struct gbe_intf *gbe_intf = intf_priv;
  2589. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2590. struct netcp_intf *netcp = netdev_priv(ndev);
  2591. struct gbe_slave *slave = gbe_intf->slave;
  2592. int port_num = slave->port_num;
  2593. u32 reg, val;
  2594. int ret;
  2595. reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
  2596. dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
  2597. GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
  2598. GBE_RTL_VERSION(reg), GBE_IDENT(reg));
  2599. /* For 10G and on NetCP 1.5, use directed to port */
  2600. if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
  2601. gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
  2602. if (gbe_dev->enable_ale)
  2603. gbe_intf->tx_pipe.switch_to_port = 0;
  2604. else
  2605. gbe_intf->tx_pipe.switch_to_port = port_num;
  2606. dev_dbg(gbe_dev->dev,
  2607. "opened TX channel %s: %p with to port %d, flags %d\n",
  2608. gbe_intf->tx_pipe.dma_chan_name,
  2609. gbe_intf->tx_pipe.dma_channel,
  2610. gbe_intf->tx_pipe.switch_to_port,
  2611. gbe_intf->tx_pipe.flags);
  2612. gbe_slave_stop(gbe_intf);
  2613. /* disable priority elevation and enable statistics on all ports */
  2614. writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
  2615. /* Control register */
  2616. val = GBE_CTL_P0_ENABLE;
  2617. if (IS_SS_ID_MU(gbe_dev)) {
  2618. val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
  2619. netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
  2620. }
  2621. writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
  2622. /* All statistics enabled and STAT AB visible by default */
  2623. writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
  2624. stat_port_en));
  2625. ret = gbe_slave_open(gbe_intf);
  2626. if (ret)
  2627. goto fail;
  2628. netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
  2629. netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
  2630. slave->open = true;
  2631. netcp_ethss_update_link_state(gbe_dev, slave, ndev);
  2632. gbe_register_cpts(gbe_dev);
  2633. return 0;
  2634. fail:
  2635. gbe_slave_stop(gbe_intf);
  2636. return ret;
  2637. }
  2638. static int gbe_close(void *intf_priv, struct net_device *ndev)
  2639. {
  2640. struct gbe_intf *gbe_intf = intf_priv;
  2641. struct netcp_intf *netcp = netdev_priv(ndev);
  2642. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2643. gbe_unregister_cpts(gbe_dev);
  2644. gbe_slave_stop(gbe_intf);
  2645. netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
  2646. netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
  2647. gbe_intf->slave->open = false;
  2648. atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
  2649. return 0;
  2650. }
  2651. #if IS_ENABLED(CONFIG_TI_CPTS)
  2652. static void init_slave_ts_ctl(struct gbe_slave *slave)
  2653. {
  2654. slave->ts_ctl.uni = 1;
  2655. slave->ts_ctl.dst_port_map =
  2656. (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
  2657. slave->ts_ctl.maddr_map =
  2658. (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
  2659. }
  2660. #else
  2661. static void init_slave_ts_ctl(struct gbe_slave *slave)
  2662. {
  2663. }
  2664. #endif /* CONFIG_TI_CPTS */
  2665. static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  2666. struct device_node *node)
  2667. {
  2668. int port_reg_num;
  2669. u32 port_reg_ofs, emac_reg_ofs;
  2670. u32 port_reg_blk_sz, emac_reg_blk_sz;
  2671. if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
  2672. dev_err(gbe_dev->dev, "missing slave-port parameter\n");
  2673. return -EINVAL;
  2674. }
  2675. if (of_property_read_u32(node, "link-interface",
  2676. &slave->link_interface)) {
  2677. dev_warn(gbe_dev->dev,
  2678. "missing link-interface value defaulting to 1G mac-phy link\n");
  2679. slave->link_interface = SGMII_LINK_MAC_PHY;
  2680. }
  2681. slave->node = node;
  2682. slave->open = false;
  2683. if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
  2684. (slave->link_interface == RGMII_LINK_MAC_PHY) ||
  2685. (slave->link_interface == XGMII_LINK_MAC_PHY))
  2686. slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
  2687. slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
  2688. if (slave->link_interface >= XGMII_LINK_MAC_PHY)
  2689. slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
  2690. else
  2691. slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
  2692. /* Emac regs memmap are contiguous but port regs are not */
  2693. port_reg_num = slave->slave_num;
  2694. if (IS_SS_ID_VER_14(gbe_dev)) {
  2695. if (slave->slave_num > 1) {
  2696. port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
  2697. port_reg_num -= 2;
  2698. } else {
  2699. port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
  2700. }
  2701. emac_reg_ofs = GBE13_EMAC_OFFSET;
  2702. port_reg_blk_sz = 0x30;
  2703. emac_reg_blk_sz = 0x40;
  2704. } else if (IS_SS_ID_MU(gbe_dev)) {
  2705. port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
  2706. emac_reg_ofs = GBENU_EMAC_OFFSET;
  2707. port_reg_blk_sz = 0x1000;
  2708. emac_reg_blk_sz = 0x1000;
  2709. } else if (IS_SS_ID_XGBE(gbe_dev)) {
  2710. port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
  2711. emac_reg_ofs = XGBE10_EMAC_OFFSET;
  2712. port_reg_blk_sz = 0x30;
  2713. emac_reg_blk_sz = 0x40;
  2714. } else {
  2715. dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
  2716. gbe_dev->ss_version);
  2717. return -EINVAL;
  2718. }
  2719. slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
  2720. (port_reg_blk_sz * port_reg_num);
  2721. slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
  2722. (emac_reg_blk_sz * slave->slave_num);
  2723. if (IS_SS_ID_VER_14(gbe_dev)) {
  2724. /* Initialize slave port register offsets */
  2725. GBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2726. GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2727. GBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2728. GBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2729. GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2730. GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2731. GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2732. GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2733. GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2734. /* Initialize EMAC register offsets */
  2735. GBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2736. GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2737. GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2738. } else if (IS_SS_ID_MU(gbe_dev)) {
  2739. /* Initialize slave port register offsets */
  2740. GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
  2741. GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2742. GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
  2743. GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
  2744. GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
  2745. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
  2746. GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2747. GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
  2748. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2749. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2750. GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
  2751. /* Initialize EMAC register offsets */
  2752. GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
  2753. GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
  2754. } else if (IS_SS_ID_XGBE(gbe_dev)) {
  2755. /* Initialize slave port register offsets */
  2756. XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2757. XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2758. XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2759. XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2760. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2761. XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2762. XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2763. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2764. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2765. /* Initialize EMAC register offsets */
  2766. XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2767. XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2768. XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2769. }
  2770. atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
  2771. init_slave_ts_ctl(slave);
  2772. return 0;
  2773. }
  2774. static void init_secondary_ports(struct gbe_priv *gbe_dev,
  2775. struct device_node *node)
  2776. {
  2777. struct device *dev = gbe_dev->dev;
  2778. phy_interface_t phy_mode;
  2779. struct gbe_priv **priv;
  2780. struct device_node *port;
  2781. struct gbe_slave *slave;
  2782. bool mac_phy_link = false;
  2783. for_each_child_of_node(node, port) {
  2784. slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
  2785. if (!slave) {
  2786. dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n",
  2787. port->name);
  2788. continue;
  2789. }
  2790. if (init_slave(gbe_dev, slave, port)) {
  2791. dev_err(dev,
  2792. "Failed to initialize secondary port(%s), skipping...\n",
  2793. port->name);
  2794. devm_kfree(dev, slave);
  2795. continue;
  2796. }
  2797. if (!IS_SS_ID_2U(gbe_dev))
  2798. gbe_sgmii_config(gbe_dev, slave);
  2799. gbe_port_reset(slave);
  2800. gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
  2801. list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
  2802. gbe_dev->num_slaves++;
  2803. if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
  2804. (slave->link_interface == XGMII_LINK_MAC_PHY))
  2805. mac_phy_link = true;
  2806. slave->open = true;
  2807. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
  2808. of_node_put(port);
  2809. break;
  2810. }
  2811. }
  2812. /* of_phy_connect() is needed only for MAC-PHY interface */
  2813. if (!mac_phy_link)
  2814. return;
  2815. /* Allocate dummy netdev device for attaching to phy device */
  2816. gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
  2817. NET_NAME_UNKNOWN, ether_setup);
  2818. if (!gbe_dev->dummy_ndev) {
  2819. dev_err(dev,
  2820. "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
  2821. return;
  2822. }
  2823. priv = netdev_priv(gbe_dev->dummy_ndev);
  2824. *priv = gbe_dev;
  2825. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  2826. phy_mode = PHY_INTERFACE_MODE_SGMII;
  2827. slave->phy_port_t = PORT_MII;
  2828. } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
  2829. phy_mode = PHY_INTERFACE_MODE_RGMII;
  2830. slave->phy_port_t = PORT_MII;
  2831. } else {
  2832. phy_mode = PHY_INTERFACE_MODE_NA;
  2833. slave->phy_port_t = PORT_FIBRE;
  2834. }
  2835. for_each_sec_slave(slave, gbe_dev) {
  2836. if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
  2837. (slave->link_interface != RGMII_LINK_MAC_PHY) &&
  2838. (slave->link_interface != XGMII_LINK_MAC_PHY))
  2839. continue;
  2840. slave->phy =
  2841. of_phy_connect(gbe_dev->dummy_ndev,
  2842. slave->phy_node,
  2843. gbe_adjust_link_sec_slaves,
  2844. 0, phy_mode);
  2845. if (!slave->phy) {
  2846. dev_err(dev, "phy not found for slave %d\n",
  2847. slave->slave_num);
  2848. } else {
  2849. dev_dbg(dev, "phy found: id is: 0x%s\n",
  2850. phydev_name(slave->phy));
  2851. phy_start(slave->phy);
  2852. }
  2853. }
  2854. }
  2855. static void free_secondary_ports(struct gbe_priv *gbe_dev)
  2856. {
  2857. struct gbe_slave *slave;
  2858. while (!list_empty(&gbe_dev->secondary_slaves)) {
  2859. slave = first_sec_slave(gbe_dev);
  2860. if (slave->phy)
  2861. phy_disconnect(slave->phy);
  2862. list_del(&slave->slave_list);
  2863. }
  2864. if (gbe_dev->dummy_ndev)
  2865. free_netdev(gbe_dev->dummy_ndev);
  2866. }
  2867. static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
  2868. struct device_node *node)
  2869. {
  2870. struct resource res;
  2871. void __iomem *regs;
  2872. int ret, i;
  2873. ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
  2874. if (ret) {
  2875. dev_err(gbe_dev->dev,
  2876. "Can't xlate xgbe of node(%s) ss address at %d\n",
  2877. node->name, XGBE_SS_REG_INDEX);
  2878. return ret;
  2879. }
  2880. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2881. if (IS_ERR(regs)) {
  2882. dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
  2883. return PTR_ERR(regs);
  2884. }
  2885. gbe_dev->ss_regs = regs;
  2886. ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
  2887. if (ret) {
  2888. dev_err(gbe_dev->dev,
  2889. "Can't xlate xgbe of node(%s) sm address at %d\n",
  2890. node->name, XGBE_SM_REG_INDEX);
  2891. return ret;
  2892. }
  2893. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2894. if (IS_ERR(regs)) {
  2895. dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
  2896. return PTR_ERR(regs);
  2897. }
  2898. gbe_dev->switch_regs = regs;
  2899. ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
  2900. if (ret) {
  2901. dev_err(gbe_dev->dev,
  2902. "Can't xlate xgbe serdes of node(%s) address at %d\n",
  2903. node->name, XGBE_SERDES_REG_INDEX);
  2904. return ret;
  2905. }
  2906. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2907. if (IS_ERR(regs)) {
  2908. dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
  2909. return PTR_ERR(regs);
  2910. }
  2911. gbe_dev->xgbe_serdes_regs = regs;
  2912. gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
  2913. gbe_dev->et_stats = xgbe10_et_stats;
  2914. gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
  2915. gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
  2916. gbe_dev->num_et_stats, sizeof(u64),
  2917. GFP_KERNEL);
  2918. if (!gbe_dev->hw_stats) {
  2919. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2920. return -ENOMEM;
  2921. }
  2922. gbe_dev->hw_stats_prev =
  2923. devm_kcalloc(gbe_dev->dev,
  2924. gbe_dev->num_et_stats, sizeof(u32),
  2925. GFP_KERNEL);
  2926. if (!gbe_dev->hw_stats_prev) {
  2927. dev_err(gbe_dev->dev,
  2928. "hw_stats_prev memory allocation failed\n");
  2929. return -ENOMEM;
  2930. }
  2931. gbe_dev->ss_version = XGBE_SS_VERSION_10;
  2932. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
  2933. XGBE10_SGMII_MODULE_OFFSET;
  2934. gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
  2935. for (i = 0; i < gbe_dev->max_num_ports; i++)
  2936. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  2937. XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
  2938. gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
  2939. gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
  2940. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2941. gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
  2942. gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
  2943. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  2944. /* Subsystem registers */
  2945. XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2946. XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
  2947. /* Switch module registers */
  2948. XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2949. XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  2950. XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2951. XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2952. XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  2953. /* Host port registers */
  2954. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2955. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  2956. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2957. return 0;
  2958. }
  2959. static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
  2960. struct device_node *node)
  2961. {
  2962. struct resource res;
  2963. void __iomem *regs;
  2964. int ret;
  2965. ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
  2966. if (ret) {
  2967. dev_err(gbe_dev->dev,
  2968. "Can't translate of node(%s) of gbe ss address at %d\n",
  2969. node->name, GBE_SS_REG_INDEX);
  2970. return ret;
  2971. }
  2972. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2973. if (IS_ERR(regs)) {
  2974. dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
  2975. return PTR_ERR(regs);
  2976. }
  2977. gbe_dev->ss_regs = regs;
  2978. gbe_dev->ss_version = readl(gbe_dev->ss_regs);
  2979. return 0;
  2980. }
  2981. static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
  2982. struct device_node *node)
  2983. {
  2984. struct resource res;
  2985. void __iomem *regs;
  2986. int i, ret;
  2987. ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
  2988. if (ret) {
  2989. dev_err(gbe_dev->dev,
  2990. "Can't translate of gbe node(%s) address at index %d\n",
  2991. node->name, GBE_SGMII34_REG_INDEX);
  2992. return ret;
  2993. }
  2994. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2995. if (IS_ERR(regs)) {
  2996. dev_err(gbe_dev->dev,
  2997. "Failed to map gbe sgmii port34 register base\n");
  2998. return PTR_ERR(regs);
  2999. }
  3000. gbe_dev->sgmii_port34_regs = regs;
  3001. ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
  3002. if (ret) {
  3003. dev_err(gbe_dev->dev,
  3004. "Can't translate of gbe node(%s) address at index %d\n",
  3005. node->name, GBE_SM_REG_INDEX);
  3006. return ret;
  3007. }
  3008. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  3009. if (IS_ERR(regs)) {
  3010. dev_err(gbe_dev->dev,
  3011. "Failed to map gbe switch module register base\n");
  3012. return PTR_ERR(regs);
  3013. }
  3014. gbe_dev->switch_regs = regs;
  3015. gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
  3016. gbe_dev->et_stats = gbe13_et_stats;
  3017. gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
  3018. gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
  3019. gbe_dev->num_et_stats, sizeof(u64),
  3020. GFP_KERNEL);
  3021. if (!gbe_dev->hw_stats) {
  3022. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  3023. return -ENOMEM;
  3024. }
  3025. gbe_dev->hw_stats_prev =
  3026. devm_kcalloc(gbe_dev->dev,
  3027. gbe_dev->num_et_stats, sizeof(u32),
  3028. GFP_KERNEL);
  3029. if (!gbe_dev->hw_stats_prev) {
  3030. dev_err(gbe_dev->dev,
  3031. "hw_stats_prev memory allocation failed\n");
  3032. return -ENOMEM;
  3033. }
  3034. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
  3035. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
  3036. /* K2HK has only 2 hw stats modules visible at a time, so
  3037. * module 0 & 2 points to one base and
  3038. * module 1 & 3 points to the other base
  3039. */
  3040. for (i = 0; i < gbe_dev->max_num_slaves; i++) {
  3041. gbe_dev->hw_stats_regs[i] =
  3042. gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
  3043. (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
  3044. }
  3045. gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
  3046. gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
  3047. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  3048. gbe_dev->host_port = GBE13_HOST_PORT_NUM;
  3049. gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
  3050. gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
  3051. /* Subsystem registers */
  3052. GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  3053. /* Switch module registers */
  3054. GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  3055. GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  3056. GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
  3057. GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  3058. GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  3059. GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  3060. /* Host port registers */
  3061. GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  3062. GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  3063. return 0;
  3064. }
  3065. static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
  3066. struct device_node *node)
  3067. {
  3068. struct resource res;
  3069. void __iomem *regs;
  3070. int i, ret;
  3071. gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
  3072. gbe_dev->et_stats = gbenu_et_stats;
  3073. if (IS_SS_ID_MU(gbe_dev))
  3074. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  3075. (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
  3076. else
  3077. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  3078. GBENU_ET_STATS_PORT_SIZE;
  3079. gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
  3080. gbe_dev->num_et_stats, sizeof(u64),
  3081. GFP_KERNEL);
  3082. if (!gbe_dev->hw_stats) {
  3083. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  3084. return -ENOMEM;
  3085. }
  3086. gbe_dev->hw_stats_prev =
  3087. devm_kcalloc(gbe_dev->dev,
  3088. gbe_dev->num_et_stats, sizeof(u32),
  3089. GFP_KERNEL);
  3090. if (!gbe_dev->hw_stats_prev) {
  3091. dev_err(gbe_dev->dev,
  3092. "hw_stats_prev memory allocation failed\n");
  3093. return -ENOMEM;
  3094. }
  3095. ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
  3096. if (ret) {
  3097. dev_err(gbe_dev->dev,
  3098. "Can't translate of gbenu node(%s) addr at index %d\n",
  3099. node->name, GBENU_SM_REG_INDEX);
  3100. return ret;
  3101. }
  3102. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  3103. if (IS_ERR(regs)) {
  3104. dev_err(gbe_dev->dev,
  3105. "Failed to map gbenu switch module register base\n");
  3106. return PTR_ERR(regs);
  3107. }
  3108. gbe_dev->switch_regs = regs;
  3109. if (!IS_SS_ID_2U(gbe_dev))
  3110. gbe_dev->sgmii_port_regs =
  3111. gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
  3112. /* Although sgmii modules are mem mapped to one contiguous
  3113. * region on GBENU devices, setting sgmii_port34_regs allows
  3114. * consistent code when accessing sgmii api
  3115. */
  3116. gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
  3117. (2 * GBENU_SGMII_MODULE_SIZE);
  3118. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
  3119. for (i = 0; i < (gbe_dev->max_num_ports); i++)
  3120. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  3121. GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
  3122. gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
  3123. gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
  3124. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  3125. gbe_dev->host_port = GBENU_HOST_PORT_NUM;
  3126. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  3127. /* Subsystem registers */
  3128. GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  3129. /* ok to set for MU, but used by 2U only */
  3130. GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
  3131. /* Switch module registers */
  3132. GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  3133. GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
  3134. GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  3135. GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  3136. /* Host port registers */
  3137. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  3138. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  3139. /* For NU only. 2U does not need tx_pri_map.
  3140. * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
  3141. * while 2U has only 1 such thread
  3142. */
  3143. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  3144. return 0;
  3145. }
  3146. static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
  3147. struct device_node *node, void **inst_priv)
  3148. {
  3149. struct device_node *interfaces, *interface;
  3150. struct device_node *secondary_ports;
  3151. struct cpsw_ale_params ale_params;
  3152. struct gbe_priv *gbe_dev;
  3153. u32 slave_num;
  3154. int i, ret = 0;
  3155. if (!node) {
  3156. dev_err(dev, "device tree info unavailable\n");
  3157. return -ENODEV;
  3158. }
  3159. gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
  3160. if (!gbe_dev)
  3161. return -ENOMEM;
  3162. if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
  3163. of_device_is_compatible(node, "ti,netcp-gbe")) {
  3164. gbe_dev->max_num_slaves = 4;
  3165. } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
  3166. gbe_dev->max_num_slaves = 8;
  3167. } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
  3168. gbe_dev->max_num_slaves = 1;
  3169. gbe_module.set_rx_mode = gbe_set_rx_mode;
  3170. } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
  3171. gbe_dev->max_num_slaves = 2;
  3172. } else {
  3173. dev_err(dev, "device tree node for unknown device\n");
  3174. return -EINVAL;
  3175. }
  3176. gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
  3177. gbe_dev->dev = dev;
  3178. gbe_dev->netcp_device = netcp_device;
  3179. gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
  3180. /* init the hw stats lock */
  3181. spin_lock_init(&gbe_dev->hw_stats_lock);
  3182. if (of_find_property(node, "enable-ale", NULL)) {
  3183. gbe_dev->enable_ale = true;
  3184. dev_info(dev, "ALE enabled\n");
  3185. } else {
  3186. gbe_dev->enable_ale = false;
  3187. dev_dbg(dev, "ALE bypass enabled*\n");
  3188. }
  3189. ret = of_property_read_u32(node, "tx-queue",
  3190. &gbe_dev->tx_queue_id);
  3191. if (ret < 0) {
  3192. dev_err(dev, "missing tx_queue parameter\n");
  3193. gbe_dev->tx_queue_id = GBE_TX_QUEUE;
  3194. }
  3195. ret = of_property_read_string(node, "tx-channel",
  3196. &gbe_dev->dma_chan_name);
  3197. if (ret < 0) {
  3198. dev_err(dev, "missing \"tx-channel\" parameter\n");
  3199. return -EINVAL;
  3200. }
  3201. if (!strcmp(node->name, "gbe")) {
  3202. ret = get_gbe_resource_version(gbe_dev, node);
  3203. if (ret)
  3204. return ret;
  3205. dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
  3206. if (IS_SS_ID_VER_14(gbe_dev))
  3207. ret = set_gbe_ethss14_priv(gbe_dev, node);
  3208. else if (IS_SS_ID_MU(gbe_dev))
  3209. ret = set_gbenu_ethss_priv(gbe_dev, node);
  3210. else
  3211. ret = -ENODEV;
  3212. } else if (!strcmp(node->name, "xgbe")) {
  3213. ret = set_xgbe_ethss10_priv(gbe_dev, node);
  3214. if (ret)
  3215. return ret;
  3216. ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
  3217. gbe_dev->ss_regs);
  3218. } else {
  3219. dev_err(dev, "unknown GBE node(%s)\n", node->name);
  3220. ret = -ENODEV;
  3221. }
  3222. if (ret)
  3223. return ret;
  3224. interfaces = of_get_child_by_name(node, "interfaces");
  3225. if (!interfaces)
  3226. dev_err(dev, "could not find interfaces\n");
  3227. ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
  3228. gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
  3229. if (ret) {
  3230. of_node_put(interfaces);
  3231. return ret;
  3232. }
  3233. ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
  3234. if (ret) {
  3235. of_node_put(interfaces);
  3236. return ret;
  3237. }
  3238. /* Create network interfaces */
  3239. INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
  3240. for_each_child_of_node(interfaces, interface) {
  3241. ret = of_property_read_u32(interface, "slave-port", &slave_num);
  3242. if (ret) {
  3243. dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
  3244. interface->name);
  3245. continue;
  3246. }
  3247. gbe_dev->num_slaves++;
  3248. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
  3249. of_node_put(interface);
  3250. break;
  3251. }
  3252. }
  3253. of_node_put(interfaces);
  3254. if (!gbe_dev->num_slaves)
  3255. dev_warn(dev, "No network interface configured\n");
  3256. /* Initialize Secondary slave ports */
  3257. secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
  3258. INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
  3259. if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves))
  3260. init_secondary_ports(gbe_dev, secondary_ports);
  3261. of_node_put(secondary_ports);
  3262. if (!gbe_dev->num_slaves) {
  3263. dev_err(dev,
  3264. "No network interface or secondary ports configured\n");
  3265. ret = -ENODEV;
  3266. goto free_sec_ports;
  3267. }
  3268. memset(&ale_params, 0, sizeof(ale_params));
  3269. ale_params.dev = gbe_dev->dev;
  3270. ale_params.ale_regs = gbe_dev->ale_reg;
  3271. ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
  3272. ale_params.ale_entries = gbe_dev->ale_entries;
  3273. ale_params.ale_ports = gbe_dev->ale_ports;
  3274. if (IS_SS_ID_MU(gbe_dev)) {
  3275. ale_params.major_ver_mask = 0x7;
  3276. ale_params.nu_switch_ale = true;
  3277. }
  3278. gbe_dev->ale = cpsw_ale_create(&ale_params);
  3279. if (!gbe_dev->ale) {
  3280. dev_err(gbe_dev->dev, "error initializing ale engine\n");
  3281. ret = -ENODEV;
  3282. goto free_sec_ports;
  3283. } else {
  3284. dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
  3285. }
  3286. gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
  3287. if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
  3288. ret = PTR_ERR(gbe_dev->cpts);
  3289. goto free_sec_ports;
  3290. }
  3291. /* initialize host port */
  3292. gbe_init_host_port(gbe_dev);
  3293. spin_lock_bh(&gbe_dev->hw_stats_lock);
  3294. for (i = 0; i < gbe_dev->num_stats_mods; i++) {
  3295. if (IS_SS_ID_VER_14(gbe_dev))
  3296. gbe_reset_mod_stats_ver14(gbe_dev, i);
  3297. else
  3298. gbe_reset_mod_stats(gbe_dev, i);
  3299. }
  3300. spin_unlock_bh(&gbe_dev->hw_stats_lock);
  3301. timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
  3302. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  3303. add_timer(&gbe_dev->timer);
  3304. *inst_priv = gbe_dev;
  3305. return 0;
  3306. free_sec_ports:
  3307. free_secondary_ports(gbe_dev);
  3308. return ret;
  3309. }
  3310. static int gbe_attach(void *inst_priv, struct net_device *ndev,
  3311. struct device_node *node, void **intf_priv)
  3312. {
  3313. struct gbe_priv *gbe_dev = inst_priv;
  3314. struct gbe_intf *gbe_intf;
  3315. int ret;
  3316. if (!node) {
  3317. dev_err(gbe_dev->dev, "interface node not available\n");
  3318. return -ENODEV;
  3319. }
  3320. gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
  3321. if (!gbe_intf)
  3322. return -ENOMEM;
  3323. gbe_intf->ndev = ndev;
  3324. gbe_intf->dev = gbe_dev->dev;
  3325. gbe_intf->gbe_dev = gbe_dev;
  3326. gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
  3327. sizeof(*gbe_intf->slave),
  3328. GFP_KERNEL);
  3329. if (!gbe_intf->slave) {
  3330. ret = -ENOMEM;
  3331. goto fail;
  3332. }
  3333. if (init_slave(gbe_dev, gbe_intf->slave, node)) {
  3334. ret = -ENODEV;
  3335. goto fail;
  3336. }
  3337. gbe_intf->tx_pipe = gbe_dev->tx_pipe;
  3338. ndev->ethtool_ops = &keystone_ethtool_ops;
  3339. list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
  3340. *intf_priv = gbe_intf;
  3341. return 0;
  3342. fail:
  3343. if (gbe_intf->slave)
  3344. devm_kfree(gbe_dev->dev, gbe_intf->slave);
  3345. if (gbe_intf)
  3346. devm_kfree(gbe_dev->dev, gbe_intf);
  3347. return ret;
  3348. }
  3349. static int gbe_release(void *intf_priv)
  3350. {
  3351. struct gbe_intf *gbe_intf = intf_priv;
  3352. gbe_intf->ndev->ethtool_ops = NULL;
  3353. list_del(&gbe_intf->gbe_intf_list);
  3354. devm_kfree(gbe_intf->dev, gbe_intf->slave);
  3355. devm_kfree(gbe_intf->dev, gbe_intf);
  3356. return 0;
  3357. }
  3358. static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
  3359. {
  3360. struct gbe_priv *gbe_dev = inst_priv;
  3361. del_timer_sync(&gbe_dev->timer);
  3362. cpts_release(gbe_dev->cpts);
  3363. cpsw_ale_stop(gbe_dev->ale);
  3364. netcp_txpipe_close(&gbe_dev->tx_pipe);
  3365. free_secondary_ports(gbe_dev);
  3366. if (!list_empty(&gbe_dev->gbe_intf_head))
  3367. dev_alert(gbe_dev->dev,
  3368. "unreleased ethss interfaces present\n");
  3369. return 0;
  3370. }
  3371. static struct netcp_module gbe_module = {
  3372. .name = GBE_MODULE_NAME,
  3373. .owner = THIS_MODULE,
  3374. .primary = true,
  3375. .probe = gbe_probe,
  3376. .open = gbe_open,
  3377. .close = gbe_close,
  3378. .remove = gbe_remove,
  3379. .attach = gbe_attach,
  3380. .release = gbe_release,
  3381. .add_addr = gbe_add_addr,
  3382. .del_addr = gbe_del_addr,
  3383. .add_vid = gbe_add_vid,
  3384. .del_vid = gbe_del_vid,
  3385. .ioctl = gbe_ioctl,
  3386. };
  3387. static struct netcp_module xgbe_module = {
  3388. .name = XGBE_MODULE_NAME,
  3389. .owner = THIS_MODULE,
  3390. .primary = true,
  3391. .probe = gbe_probe,
  3392. .open = gbe_open,
  3393. .close = gbe_close,
  3394. .remove = gbe_remove,
  3395. .attach = gbe_attach,
  3396. .release = gbe_release,
  3397. .add_addr = gbe_add_addr,
  3398. .del_addr = gbe_del_addr,
  3399. .add_vid = gbe_add_vid,
  3400. .del_vid = gbe_del_vid,
  3401. .ioctl = gbe_ioctl,
  3402. };
  3403. static int __init keystone_gbe_init(void)
  3404. {
  3405. int ret;
  3406. ret = netcp_register_module(&gbe_module);
  3407. if (ret)
  3408. return ret;
  3409. ret = netcp_register_module(&xgbe_module);
  3410. if (ret)
  3411. return ret;
  3412. return 0;
  3413. }
  3414. module_init(keystone_gbe_init);
  3415. static void __exit keystone_gbe_exit(void)
  3416. {
  3417. netcp_unregister_module(&gbe_module);
  3418. netcp_unregister_module(&xgbe_module);
  3419. }
  3420. module_exit(keystone_gbe_exit);
  3421. MODULE_LICENSE("GPL v2");
  3422. MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
  3423. MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");