ksz884x.c 179 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267
  1. /**
  2. * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
  3. *
  4. * Copyright (c) 2009-2010 Micrel, Inc.
  5. * Tristram Ha <Tristram.Ha@micrel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/init.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/ioport.h>
  22. #include <linux/pci.h>
  23. #include <linux/proc_fs.h>
  24. #include <linux/mii.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/ethtool.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/in.h>
  29. #include <linux/ip.h>
  30. #include <linux/if_vlan.h>
  31. #include <linux/crc32.h>
  32. #include <linux/sched.h>
  33. #include <linux/slab.h>
  34. /* DMA Registers */
  35. #define KS_DMA_TX_CTRL 0x0000
  36. #define DMA_TX_ENABLE 0x00000001
  37. #define DMA_TX_CRC_ENABLE 0x00000002
  38. #define DMA_TX_PAD_ENABLE 0x00000004
  39. #define DMA_TX_LOOPBACK 0x00000100
  40. #define DMA_TX_FLOW_ENABLE 0x00000200
  41. #define DMA_TX_CSUM_IP 0x00010000
  42. #define DMA_TX_CSUM_TCP 0x00020000
  43. #define DMA_TX_CSUM_UDP 0x00040000
  44. #define DMA_TX_BURST_SIZE 0x3F000000
  45. #define KS_DMA_RX_CTRL 0x0004
  46. #define DMA_RX_ENABLE 0x00000001
  47. #define KS884X_DMA_RX_MULTICAST 0x00000002
  48. #define DMA_RX_PROMISCUOUS 0x00000004
  49. #define DMA_RX_ERROR 0x00000008
  50. #define DMA_RX_UNICAST 0x00000010
  51. #define DMA_RX_ALL_MULTICAST 0x00000020
  52. #define DMA_RX_BROADCAST 0x00000040
  53. #define DMA_RX_FLOW_ENABLE 0x00000200
  54. #define DMA_RX_CSUM_IP 0x00010000
  55. #define DMA_RX_CSUM_TCP 0x00020000
  56. #define DMA_RX_CSUM_UDP 0x00040000
  57. #define DMA_RX_BURST_SIZE 0x3F000000
  58. #define DMA_BURST_SHIFT 24
  59. #define DMA_BURST_DEFAULT 8
  60. #define KS_DMA_TX_START 0x0008
  61. #define KS_DMA_RX_START 0x000C
  62. #define DMA_START 0x00000001
  63. #define KS_DMA_TX_ADDR 0x0010
  64. #define KS_DMA_RX_ADDR 0x0014
  65. #define DMA_ADDR_LIST_MASK 0xFFFFFFFC
  66. #define DMA_ADDR_LIST_SHIFT 2
  67. /* MTR0 */
  68. #define KS884X_MULTICAST_0_OFFSET 0x0020
  69. #define KS884X_MULTICAST_1_OFFSET 0x0021
  70. #define KS884X_MULTICAST_2_OFFSET 0x0022
  71. #define KS884x_MULTICAST_3_OFFSET 0x0023
  72. /* MTR1 */
  73. #define KS884X_MULTICAST_4_OFFSET 0x0024
  74. #define KS884X_MULTICAST_5_OFFSET 0x0025
  75. #define KS884X_MULTICAST_6_OFFSET 0x0026
  76. #define KS884X_MULTICAST_7_OFFSET 0x0027
  77. /* Interrupt Registers */
  78. /* INTEN */
  79. #define KS884X_INTERRUPTS_ENABLE 0x0028
  80. /* INTST */
  81. #define KS884X_INTERRUPTS_STATUS 0x002C
  82. #define KS884X_INT_RX_STOPPED 0x02000000
  83. #define KS884X_INT_TX_STOPPED 0x04000000
  84. #define KS884X_INT_RX_OVERRUN 0x08000000
  85. #define KS884X_INT_TX_EMPTY 0x10000000
  86. #define KS884X_INT_RX 0x20000000
  87. #define KS884X_INT_TX 0x40000000
  88. #define KS884X_INT_PHY 0x80000000
  89. #define KS884X_INT_RX_MASK \
  90. (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
  91. #define KS884X_INT_TX_MASK \
  92. (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
  93. #define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
  94. /* MAC Additional Station Address */
  95. /* MAAL0 */
  96. #define KS_ADD_ADDR_0_LO 0x0080
  97. /* MAAH0 */
  98. #define KS_ADD_ADDR_0_HI 0x0084
  99. /* MAAL1 */
  100. #define KS_ADD_ADDR_1_LO 0x0088
  101. /* MAAH1 */
  102. #define KS_ADD_ADDR_1_HI 0x008C
  103. /* MAAL2 */
  104. #define KS_ADD_ADDR_2_LO 0x0090
  105. /* MAAH2 */
  106. #define KS_ADD_ADDR_2_HI 0x0094
  107. /* MAAL3 */
  108. #define KS_ADD_ADDR_3_LO 0x0098
  109. /* MAAH3 */
  110. #define KS_ADD_ADDR_3_HI 0x009C
  111. /* MAAL4 */
  112. #define KS_ADD_ADDR_4_LO 0x00A0
  113. /* MAAH4 */
  114. #define KS_ADD_ADDR_4_HI 0x00A4
  115. /* MAAL5 */
  116. #define KS_ADD_ADDR_5_LO 0x00A8
  117. /* MAAH5 */
  118. #define KS_ADD_ADDR_5_HI 0x00AC
  119. /* MAAL6 */
  120. #define KS_ADD_ADDR_6_LO 0x00B0
  121. /* MAAH6 */
  122. #define KS_ADD_ADDR_6_HI 0x00B4
  123. /* MAAL7 */
  124. #define KS_ADD_ADDR_7_LO 0x00B8
  125. /* MAAH7 */
  126. #define KS_ADD_ADDR_7_HI 0x00BC
  127. /* MAAL8 */
  128. #define KS_ADD_ADDR_8_LO 0x00C0
  129. /* MAAH8 */
  130. #define KS_ADD_ADDR_8_HI 0x00C4
  131. /* MAAL9 */
  132. #define KS_ADD_ADDR_9_LO 0x00C8
  133. /* MAAH9 */
  134. #define KS_ADD_ADDR_9_HI 0x00CC
  135. /* MAAL10 */
  136. #define KS_ADD_ADDR_A_LO 0x00D0
  137. /* MAAH10 */
  138. #define KS_ADD_ADDR_A_HI 0x00D4
  139. /* MAAL11 */
  140. #define KS_ADD_ADDR_B_LO 0x00D8
  141. /* MAAH11 */
  142. #define KS_ADD_ADDR_B_HI 0x00DC
  143. /* MAAL12 */
  144. #define KS_ADD_ADDR_C_LO 0x00E0
  145. /* MAAH12 */
  146. #define KS_ADD_ADDR_C_HI 0x00E4
  147. /* MAAL13 */
  148. #define KS_ADD_ADDR_D_LO 0x00E8
  149. /* MAAH13 */
  150. #define KS_ADD_ADDR_D_HI 0x00EC
  151. /* MAAL14 */
  152. #define KS_ADD_ADDR_E_LO 0x00F0
  153. /* MAAH14 */
  154. #define KS_ADD_ADDR_E_HI 0x00F4
  155. /* MAAL15 */
  156. #define KS_ADD_ADDR_F_LO 0x00F8
  157. /* MAAH15 */
  158. #define KS_ADD_ADDR_F_HI 0x00FC
  159. #define ADD_ADDR_HI_MASK 0x0000FFFF
  160. #define ADD_ADDR_ENABLE 0x80000000
  161. #define ADD_ADDR_INCR 8
  162. /* Miscellaneous Registers */
  163. /* MARL */
  164. #define KS884X_ADDR_0_OFFSET 0x0200
  165. #define KS884X_ADDR_1_OFFSET 0x0201
  166. /* MARM */
  167. #define KS884X_ADDR_2_OFFSET 0x0202
  168. #define KS884X_ADDR_3_OFFSET 0x0203
  169. /* MARH */
  170. #define KS884X_ADDR_4_OFFSET 0x0204
  171. #define KS884X_ADDR_5_OFFSET 0x0205
  172. /* OBCR */
  173. #define KS884X_BUS_CTRL_OFFSET 0x0210
  174. #define BUS_SPEED_125_MHZ 0x0000
  175. #define BUS_SPEED_62_5_MHZ 0x0001
  176. #define BUS_SPEED_41_66_MHZ 0x0002
  177. #define BUS_SPEED_25_MHZ 0x0003
  178. /* EEPCR */
  179. #define KS884X_EEPROM_CTRL_OFFSET 0x0212
  180. #define EEPROM_CHIP_SELECT 0x0001
  181. #define EEPROM_SERIAL_CLOCK 0x0002
  182. #define EEPROM_DATA_OUT 0x0004
  183. #define EEPROM_DATA_IN 0x0008
  184. #define EEPROM_ACCESS_ENABLE 0x0010
  185. /* MBIR */
  186. #define KS884X_MEM_INFO_OFFSET 0x0214
  187. #define RX_MEM_TEST_FAILED 0x0008
  188. #define RX_MEM_TEST_FINISHED 0x0010
  189. #define TX_MEM_TEST_FAILED 0x0800
  190. #define TX_MEM_TEST_FINISHED 0x1000
  191. /* GCR */
  192. #define KS884X_GLOBAL_CTRL_OFFSET 0x0216
  193. #define GLOBAL_SOFTWARE_RESET 0x0001
  194. #define KS8841_POWER_MANAGE_OFFSET 0x0218
  195. /* WFCR */
  196. #define KS8841_WOL_CTRL_OFFSET 0x021A
  197. #define KS8841_WOL_MAGIC_ENABLE 0x0080
  198. #define KS8841_WOL_FRAME3_ENABLE 0x0008
  199. #define KS8841_WOL_FRAME2_ENABLE 0x0004
  200. #define KS8841_WOL_FRAME1_ENABLE 0x0002
  201. #define KS8841_WOL_FRAME0_ENABLE 0x0001
  202. /* WF0 */
  203. #define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
  204. #define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
  205. #define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
  206. /* IACR */
  207. #define KS884X_IACR_P 0x04A0
  208. #define KS884X_IACR_OFFSET KS884X_IACR_P
  209. /* IADR1 */
  210. #define KS884X_IADR1_P 0x04A2
  211. #define KS884X_IADR2_P 0x04A4
  212. #define KS884X_IADR3_P 0x04A6
  213. #define KS884X_IADR4_P 0x04A8
  214. #define KS884X_IADR5_P 0x04AA
  215. #define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
  216. #define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
  217. #define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
  218. #define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
  219. #define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
  220. #define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
  221. #define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
  222. #define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
  223. #define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
  224. #define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
  225. #define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
  226. /* P1MBCR */
  227. #define KS884X_P1MBCR_P 0x04D0
  228. #define KS884X_P1MBSR_P 0x04D2
  229. #define KS884X_PHY1ILR_P 0x04D4
  230. #define KS884X_PHY1IHR_P 0x04D6
  231. #define KS884X_P1ANAR_P 0x04D8
  232. #define KS884X_P1ANLPR_P 0x04DA
  233. /* P2MBCR */
  234. #define KS884X_P2MBCR_P 0x04E0
  235. #define KS884X_P2MBSR_P 0x04E2
  236. #define KS884X_PHY2ILR_P 0x04E4
  237. #define KS884X_PHY2IHR_P 0x04E6
  238. #define KS884X_P2ANAR_P 0x04E8
  239. #define KS884X_P2ANLPR_P 0x04EA
  240. #define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
  241. #define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
  242. #define KS884X_PHY_CTRL_OFFSET 0x00
  243. /* Mode Control Register */
  244. #define PHY_REG_CTRL 0
  245. #define PHY_RESET 0x8000
  246. #define PHY_LOOPBACK 0x4000
  247. #define PHY_SPEED_100MBIT 0x2000
  248. #define PHY_AUTO_NEG_ENABLE 0x1000
  249. #define PHY_POWER_DOWN 0x0800
  250. #define PHY_MII_DISABLE 0x0400
  251. #define PHY_AUTO_NEG_RESTART 0x0200
  252. #define PHY_FULL_DUPLEX 0x0100
  253. #define PHY_COLLISION_TEST 0x0080
  254. #define PHY_HP_MDIX 0x0020
  255. #define PHY_FORCE_MDIX 0x0010
  256. #define PHY_AUTO_MDIX_DISABLE 0x0008
  257. #define PHY_REMOTE_FAULT_DISABLE 0x0004
  258. #define PHY_TRANSMIT_DISABLE 0x0002
  259. #define PHY_LED_DISABLE 0x0001
  260. #define KS884X_PHY_STATUS_OFFSET 0x02
  261. /* Mode Status Register */
  262. #define PHY_REG_STATUS 1
  263. #define PHY_100BT4_CAPABLE 0x8000
  264. #define PHY_100BTX_FD_CAPABLE 0x4000
  265. #define PHY_100BTX_CAPABLE 0x2000
  266. #define PHY_10BT_FD_CAPABLE 0x1000
  267. #define PHY_10BT_CAPABLE 0x0800
  268. #define PHY_MII_SUPPRESS_CAPABLE 0x0040
  269. #define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
  270. #define PHY_REMOTE_FAULT 0x0010
  271. #define PHY_AUTO_NEG_CAPABLE 0x0008
  272. #define PHY_LINK_STATUS 0x0004
  273. #define PHY_JABBER_DETECT 0x0002
  274. #define PHY_EXTENDED_CAPABILITY 0x0001
  275. #define KS884X_PHY_ID_1_OFFSET 0x04
  276. #define KS884X_PHY_ID_2_OFFSET 0x06
  277. /* PHY Identifier Registers */
  278. #define PHY_REG_ID_1 2
  279. #define PHY_REG_ID_2 3
  280. #define KS884X_PHY_AUTO_NEG_OFFSET 0x08
  281. /* Auto-Negotiation Advertisement Register */
  282. #define PHY_REG_AUTO_NEGOTIATION 4
  283. #define PHY_AUTO_NEG_NEXT_PAGE 0x8000
  284. #define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
  285. /* Not supported. */
  286. #define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
  287. #define PHY_AUTO_NEG_SYM_PAUSE 0x0400
  288. #define PHY_AUTO_NEG_100BT4 0x0200
  289. #define PHY_AUTO_NEG_100BTX_FD 0x0100
  290. #define PHY_AUTO_NEG_100BTX 0x0080
  291. #define PHY_AUTO_NEG_10BT_FD 0x0040
  292. #define PHY_AUTO_NEG_10BT 0x0020
  293. #define PHY_AUTO_NEG_SELECTOR 0x001F
  294. #define PHY_AUTO_NEG_802_3 0x0001
  295. #define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
  296. #define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
  297. /* Auto-Negotiation Link Partner Ability Register */
  298. #define PHY_REG_REMOTE_CAPABILITY 5
  299. #define PHY_REMOTE_NEXT_PAGE 0x8000
  300. #define PHY_REMOTE_ACKNOWLEDGE 0x4000
  301. #define PHY_REMOTE_REMOTE_FAULT 0x2000
  302. #define PHY_REMOTE_SYM_PAUSE 0x0400
  303. #define PHY_REMOTE_100BTX_FD 0x0100
  304. #define PHY_REMOTE_100BTX 0x0080
  305. #define PHY_REMOTE_10BT_FD 0x0040
  306. #define PHY_REMOTE_10BT 0x0020
  307. /* P1VCT */
  308. #define KS884X_P1VCT_P 0x04F0
  309. #define KS884X_P1PHYCTRL_P 0x04F2
  310. /* P2VCT */
  311. #define KS884X_P2VCT_P 0x04F4
  312. #define KS884X_P2PHYCTRL_P 0x04F6
  313. #define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
  314. #define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
  315. #define KS884X_PHY_LINK_MD_OFFSET 0x00
  316. #define PHY_START_CABLE_DIAG 0x8000
  317. #define PHY_CABLE_DIAG_RESULT 0x6000
  318. #define PHY_CABLE_STAT_NORMAL 0x0000
  319. #define PHY_CABLE_STAT_OPEN 0x2000
  320. #define PHY_CABLE_STAT_SHORT 0x4000
  321. #define PHY_CABLE_STAT_FAILED 0x6000
  322. #define PHY_CABLE_10M_SHORT 0x1000
  323. #define PHY_CABLE_FAULT_COUNTER 0x01FF
  324. #define KS884X_PHY_PHY_CTRL_OFFSET 0x02
  325. #define PHY_STAT_REVERSED_POLARITY 0x0020
  326. #define PHY_STAT_MDIX 0x0010
  327. #define PHY_FORCE_LINK 0x0008
  328. #define PHY_POWER_SAVING_DISABLE 0x0004
  329. #define PHY_REMOTE_LOOPBACK 0x0002
  330. /* SIDER */
  331. #define KS884X_SIDER_P 0x0400
  332. #define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
  333. #define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
  334. #define REG_FAMILY_ID 0x88
  335. #define REG_CHIP_ID_41 0x8810
  336. #define REG_CHIP_ID_42 0x8800
  337. #define KS884X_CHIP_ID_MASK_41 0xFF10
  338. #define KS884X_CHIP_ID_MASK 0xFFF0
  339. #define KS884X_CHIP_ID_SHIFT 4
  340. #define KS884X_REVISION_MASK 0x000E
  341. #define KS884X_REVISION_SHIFT 1
  342. #define KS8842_START 0x0001
  343. #define CHIP_IP_41_M 0x8810
  344. #define CHIP_IP_42_M 0x8800
  345. #define CHIP_IP_61_M 0x8890
  346. #define CHIP_IP_62_M 0x8880
  347. #define CHIP_IP_41_P 0x8850
  348. #define CHIP_IP_42_P 0x8840
  349. #define CHIP_IP_61_P 0x88D0
  350. #define CHIP_IP_62_P 0x88C0
  351. /* SGCR1 */
  352. #define KS8842_SGCR1_P 0x0402
  353. #define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
  354. #define SWITCH_PASS_ALL 0x8000
  355. #define SWITCH_TX_FLOW_CTRL 0x2000
  356. #define SWITCH_RX_FLOW_CTRL 0x1000
  357. #define SWITCH_CHECK_LENGTH 0x0800
  358. #define SWITCH_AGING_ENABLE 0x0400
  359. #define SWITCH_FAST_AGING 0x0200
  360. #define SWITCH_AGGR_BACKOFF 0x0100
  361. #define SWITCH_PASS_PAUSE 0x0008
  362. #define SWITCH_LINK_AUTO_AGING 0x0001
  363. /* SGCR2 */
  364. #define KS8842_SGCR2_P 0x0404
  365. #define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
  366. #define SWITCH_VLAN_ENABLE 0x8000
  367. #define SWITCH_IGMP_SNOOP 0x4000
  368. #define IPV6_MLD_SNOOP_ENABLE 0x2000
  369. #define IPV6_MLD_SNOOP_OPTION 0x1000
  370. #define PRIORITY_SCHEME_SELECT 0x0800
  371. #define SWITCH_MIRROR_RX_TX 0x0100
  372. #define UNICAST_VLAN_BOUNDARY 0x0080
  373. #define MULTICAST_STORM_DISABLE 0x0040
  374. #define SWITCH_BACK_PRESSURE 0x0020
  375. #define FAIR_FLOW_CTRL 0x0010
  376. #define NO_EXC_COLLISION_DROP 0x0008
  377. #define SWITCH_HUGE_PACKET 0x0004
  378. #define SWITCH_LEGAL_PACKET 0x0002
  379. #define SWITCH_BUF_RESERVE 0x0001
  380. /* SGCR3 */
  381. #define KS8842_SGCR3_P 0x0406
  382. #define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
  383. #define BROADCAST_STORM_RATE_LO 0xFF00
  384. #define SWITCH_REPEATER 0x0080
  385. #define SWITCH_HALF_DUPLEX 0x0040
  386. #define SWITCH_FLOW_CTRL 0x0020
  387. #define SWITCH_10_MBIT 0x0010
  388. #define SWITCH_REPLACE_NULL_VID 0x0008
  389. #define BROADCAST_STORM_RATE_HI 0x0007
  390. #define BROADCAST_STORM_RATE 0x07FF
  391. /* SGCR4 */
  392. #define KS8842_SGCR4_P 0x0408
  393. /* SGCR5 */
  394. #define KS8842_SGCR5_P 0x040A
  395. #define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
  396. #define LED_MODE 0x8200
  397. #define LED_SPEED_DUPLEX_ACT 0x0000
  398. #define LED_SPEED_DUPLEX_LINK_ACT 0x8000
  399. #define LED_DUPLEX_10_100 0x0200
  400. /* SGCR6 */
  401. #define KS8842_SGCR6_P 0x0410
  402. #define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
  403. #define KS8842_PRIORITY_MASK 3
  404. #define KS8842_PRIORITY_SHIFT 2
  405. /* SGCR7 */
  406. #define KS8842_SGCR7_P 0x0412
  407. #define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
  408. #define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
  409. #define SWITCH_UNK_DEF_PORT_3 0x0004
  410. #define SWITCH_UNK_DEF_PORT_2 0x0002
  411. #define SWITCH_UNK_DEF_PORT_1 0x0001
  412. /* MACAR1 */
  413. #define KS8842_MACAR1_P 0x0470
  414. #define KS8842_MACAR2_P 0x0472
  415. #define KS8842_MACAR3_P 0x0474
  416. #define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
  417. #define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
  418. #define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
  419. #define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
  420. #define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
  421. #define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
  422. /* TOSR1 */
  423. #define KS8842_TOSR1_P 0x0480
  424. #define KS8842_TOSR2_P 0x0482
  425. #define KS8842_TOSR3_P 0x0484
  426. #define KS8842_TOSR4_P 0x0486
  427. #define KS8842_TOSR5_P 0x0488
  428. #define KS8842_TOSR6_P 0x048A
  429. #define KS8842_TOSR7_P 0x0490
  430. #define KS8842_TOSR8_P 0x0492
  431. #define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
  432. #define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
  433. #define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
  434. #define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
  435. #define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
  436. #define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
  437. #define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
  438. #define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
  439. /* P1CR1 */
  440. #define KS8842_P1CR1_P 0x0500
  441. #define KS8842_P1CR2_P 0x0502
  442. #define KS8842_P1VIDR_P 0x0504
  443. #define KS8842_P1CR3_P 0x0506
  444. #define KS8842_P1IRCR_P 0x0508
  445. #define KS8842_P1ERCR_P 0x050A
  446. #define KS884X_P1SCSLMD_P 0x0510
  447. #define KS884X_P1CR4_P 0x0512
  448. #define KS884X_P1SR_P 0x0514
  449. /* P2CR1 */
  450. #define KS8842_P2CR1_P 0x0520
  451. #define KS8842_P2CR2_P 0x0522
  452. #define KS8842_P2VIDR_P 0x0524
  453. #define KS8842_P2CR3_P 0x0526
  454. #define KS8842_P2IRCR_P 0x0528
  455. #define KS8842_P2ERCR_P 0x052A
  456. #define KS884X_P2SCSLMD_P 0x0530
  457. #define KS884X_P2CR4_P 0x0532
  458. #define KS884X_P2SR_P 0x0534
  459. /* P3CR1 */
  460. #define KS8842_P3CR1_P 0x0540
  461. #define KS8842_P3CR2_P 0x0542
  462. #define KS8842_P3VIDR_P 0x0544
  463. #define KS8842_P3CR3_P 0x0546
  464. #define KS8842_P3IRCR_P 0x0548
  465. #define KS8842_P3ERCR_P 0x054A
  466. #define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
  467. #define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
  468. #define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
  469. #define PORT_CTRL_ADDR(port, addr) \
  470. (addr = KS8842_PORT_1_CTRL_1 + (port) * \
  471. (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
  472. #define KS8842_PORT_CTRL_1_OFFSET 0x00
  473. #define PORT_BROADCAST_STORM 0x0080
  474. #define PORT_DIFFSERV_ENABLE 0x0040
  475. #define PORT_802_1P_ENABLE 0x0020
  476. #define PORT_BASED_PRIORITY_MASK 0x0018
  477. #define PORT_BASED_PRIORITY_BASE 0x0003
  478. #define PORT_BASED_PRIORITY_SHIFT 3
  479. #define PORT_BASED_PRIORITY_0 0x0000
  480. #define PORT_BASED_PRIORITY_1 0x0008
  481. #define PORT_BASED_PRIORITY_2 0x0010
  482. #define PORT_BASED_PRIORITY_3 0x0018
  483. #define PORT_INSERT_TAG 0x0004
  484. #define PORT_REMOVE_TAG 0x0002
  485. #define PORT_PRIO_QUEUE_ENABLE 0x0001
  486. #define KS8842_PORT_CTRL_2_OFFSET 0x02
  487. #define PORT_INGRESS_VLAN_FILTER 0x4000
  488. #define PORT_DISCARD_NON_VID 0x2000
  489. #define PORT_FORCE_FLOW_CTRL 0x1000
  490. #define PORT_BACK_PRESSURE 0x0800
  491. #define PORT_TX_ENABLE 0x0400
  492. #define PORT_RX_ENABLE 0x0200
  493. #define PORT_LEARN_DISABLE 0x0100
  494. #define PORT_MIRROR_SNIFFER 0x0080
  495. #define PORT_MIRROR_RX 0x0040
  496. #define PORT_MIRROR_TX 0x0020
  497. #define PORT_USER_PRIORITY_CEILING 0x0008
  498. #define PORT_VLAN_MEMBERSHIP 0x0007
  499. #define KS8842_PORT_CTRL_VID_OFFSET 0x04
  500. #define PORT_DEFAULT_VID 0x0001
  501. #define KS8842_PORT_CTRL_3_OFFSET 0x06
  502. #define PORT_INGRESS_LIMIT_MODE 0x000C
  503. #define PORT_INGRESS_ALL 0x0000
  504. #define PORT_INGRESS_UNICAST 0x0004
  505. #define PORT_INGRESS_MULTICAST 0x0008
  506. #define PORT_INGRESS_BROADCAST 0x000C
  507. #define PORT_COUNT_IFG 0x0002
  508. #define PORT_COUNT_PREAMBLE 0x0001
  509. #define KS8842_PORT_IN_RATE_OFFSET 0x08
  510. #define KS8842_PORT_OUT_RATE_OFFSET 0x0A
  511. #define PORT_PRIORITY_RATE 0x0F
  512. #define PORT_PRIORITY_RATE_SHIFT 4
  513. #define KS884X_PORT_LINK_MD 0x10
  514. #define PORT_CABLE_10M_SHORT 0x8000
  515. #define PORT_CABLE_DIAG_RESULT 0x6000
  516. #define PORT_CABLE_STAT_NORMAL 0x0000
  517. #define PORT_CABLE_STAT_OPEN 0x2000
  518. #define PORT_CABLE_STAT_SHORT 0x4000
  519. #define PORT_CABLE_STAT_FAILED 0x6000
  520. #define PORT_START_CABLE_DIAG 0x1000
  521. #define PORT_FORCE_LINK 0x0800
  522. #define PORT_POWER_SAVING_DISABLE 0x0400
  523. #define PORT_PHY_REMOTE_LOOPBACK 0x0200
  524. #define PORT_CABLE_FAULT_COUNTER 0x01FF
  525. #define KS884X_PORT_CTRL_4_OFFSET 0x12
  526. #define PORT_LED_OFF 0x8000
  527. #define PORT_TX_DISABLE 0x4000
  528. #define PORT_AUTO_NEG_RESTART 0x2000
  529. #define PORT_REMOTE_FAULT_DISABLE 0x1000
  530. #define PORT_POWER_DOWN 0x0800
  531. #define PORT_AUTO_MDIX_DISABLE 0x0400
  532. #define PORT_FORCE_MDIX 0x0200
  533. #define PORT_LOOPBACK 0x0100
  534. #define PORT_AUTO_NEG_ENABLE 0x0080
  535. #define PORT_FORCE_100_MBIT 0x0040
  536. #define PORT_FORCE_FULL_DUPLEX 0x0020
  537. #define PORT_AUTO_NEG_SYM_PAUSE 0x0010
  538. #define PORT_AUTO_NEG_100BTX_FD 0x0008
  539. #define PORT_AUTO_NEG_100BTX 0x0004
  540. #define PORT_AUTO_NEG_10BT_FD 0x0002
  541. #define PORT_AUTO_NEG_10BT 0x0001
  542. #define KS884X_PORT_STATUS_OFFSET 0x14
  543. #define PORT_HP_MDIX 0x8000
  544. #define PORT_REVERSED_POLARITY 0x2000
  545. #define PORT_RX_FLOW_CTRL 0x0800
  546. #define PORT_TX_FLOW_CTRL 0x1000
  547. #define PORT_STATUS_SPEED_100MBIT 0x0400
  548. #define PORT_STATUS_FULL_DUPLEX 0x0200
  549. #define PORT_REMOTE_FAULT 0x0100
  550. #define PORT_MDIX_STATUS 0x0080
  551. #define PORT_AUTO_NEG_COMPLETE 0x0040
  552. #define PORT_STATUS_LINK_GOOD 0x0020
  553. #define PORT_REMOTE_SYM_PAUSE 0x0010
  554. #define PORT_REMOTE_100BTX_FD 0x0008
  555. #define PORT_REMOTE_100BTX 0x0004
  556. #define PORT_REMOTE_10BT_FD 0x0002
  557. #define PORT_REMOTE_10BT 0x0001
  558. /*
  559. #define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
  560. #define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
  561. #define STATIC_MAC_TABLE_VALID 00-00080000-00000000
  562. #define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
  563. #define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
  564. #define STATIC_MAC_TABLE_FID 00-03C00000-00000000
  565. */
  566. #define STATIC_MAC_TABLE_ADDR 0x0000FFFF
  567. #define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
  568. #define STATIC_MAC_TABLE_VALID 0x00080000
  569. #define STATIC_MAC_TABLE_OVERRIDE 0x00100000
  570. #define STATIC_MAC_TABLE_USE_FID 0x00200000
  571. #define STATIC_MAC_TABLE_FID 0x03C00000
  572. #define STATIC_MAC_FWD_PORTS_SHIFT 16
  573. #define STATIC_MAC_FID_SHIFT 22
  574. /*
  575. #define VLAN_TABLE_VID 00-00000000-00000FFF
  576. #define VLAN_TABLE_FID 00-00000000-0000F000
  577. #define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
  578. #define VLAN_TABLE_VALID 00-00000000-00080000
  579. */
  580. #define VLAN_TABLE_VID 0x00000FFF
  581. #define VLAN_TABLE_FID 0x0000F000
  582. #define VLAN_TABLE_MEMBERSHIP 0x00070000
  583. #define VLAN_TABLE_VALID 0x00080000
  584. #define VLAN_TABLE_FID_SHIFT 12
  585. #define VLAN_TABLE_MEMBERSHIP_SHIFT 16
  586. /*
  587. #define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
  588. #define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
  589. #define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
  590. #define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
  591. #define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
  592. #define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
  593. #define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
  594. #define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
  595. */
  596. #define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
  597. #define DYNAMIC_MAC_TABLE_FID 0x000F0000
  598. #define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
  599. #define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
  600. #define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
  601. #define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
  602. #define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
  603. #define DYNAMIC_MAC_TABLE_RESERVED 0x78
  604. #define DYNAMIC_MAC_TABLE_NOT_READY 0x80
  605. #define DYNAMIC_MAC_FID_SHIFT 16
  606. #define DYNAMIC_MAC_SRC_PORT_SHIFT 20
  607. #define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
  608. #define DYNAMIC_MAC_ENTRIES_SHIFT 24
  609. #define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
  610. /*
  611. #define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
  612. #define MIB_COUNTER_VALID 00-00000000-40000000
  613. #define MIB_COUNTER_OVERFLOW 00-00000000-80000000
  614. */
  615. #define MIB_COUNTER_VALUE 0x3FFFFFFF
  616. #define MIB_COUNTER_VALID 0x40000000
  617. #define MIB_COUNTER_OVERFLOW 0x80000000
  618. #define MIB_PACKET_DROPPED 0x0000FFFF
  619. #define KS_MIB_PACKET_DROPPED_TX_0 0x100
  620. #define KS_MIB_PACKET_DROPPED_TX_1 0x101
  621. #define KS_MIB_PACKET_DROPPED_TX 0x102
  622. #define KS_MIB_PACKET_DROPPED_RX_0 0x103
  623. #define KS_MIB_PACKET_DROPPED_RX_1 0x104
  624. #define KS_MIB_PACKET_DROPPED_RX 0x105
  625. /* Change default LED mode. */
  626. #define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
  627. #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
  628. #define MAX_ETHERNET_BODY_SIZE 1500
  629. #define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN)
  630. #define MAX_ETHERNET_PACKET_SIZE \
  631. (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
  632. #define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
  633. #define MAX_RX_BUF_SIZE (1912 + 4)
  634. #define ADDITIONAL_ENTRIES 16
  635. #define MAX_MULTICAST_LIST 32
  636. #define HW_MULTICAST_SIZE 8
  637. #define HW_TO_DEV_PORT(port) (port - 1)
  638. enum {
  639. media_connected,
  640. media_disconnected
  641. };
  642. enum {
  643. OID_COUNTER_UNKOWN,
  644. OID_COUNTER_FIRST,
  645. /* total transmit errors */
  646. OID_COUNTER_XMIT_ERROR,
  647. /* total receive errors */
  648. OID_COUNTER_RCV_ERROR,
  649. OID_COUNTER_LAST
  650. };
  651. /*
  652. * Hardware descriptor definitions
  653. */
  654. #define DESC_ALIGNMENT 16
  655. #define BUFFER_ALIGNMENT 8
  656. #define NUM_OF_RX_DESC 64
  657. #define NUM_OF_TX_DESC 64
  658. #define KS_DESC_RX_FRAME_LEN 0x000007FF
  659. #define KS_DESC_RX_FRAME_TYPE 0x00008000
  660. #define KS_DESC_RX_ERROR_CRC 0x00010000
  661. #define KS_DESC_RX_ERROR_RUNT 0x00020000
  662. #define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
  663. #define KS_DESC_RX_ERROR_PHY 0x00080000
  664. #define KS884X_DESC_RX_PORT_MASK 0x00300000
  665. #define KS_DESC_RX_MULTICAST 0x01000000
  666. #define KS_DESC_RX_ERROR 0x02000000
  667. #define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
  668. #define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
  669. #define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
  670. #define KS_DESC_RX_LAST 0x20000000
  671. #define KS_DESC_RX_FIRST 0x40000000
  672. #define KS_DESC_RX_ERROR_COND \
  673. (KS_DESC_RX_ERROR_CRC | \
  674. KS_DESC_RX_ERROR_RUNT | \
  675. KS_DESC_RX_ERROR_PHY | \
  676. KS_DESC_RX_ERROR_TOO_LONG)
  677. #define KS_DESC_HW_OWNED 0x80000000
  678. #define KS_DESC_BUF_SIZE 0x000007FF
  679. #define KS884X_DESC_TX_PORT_MASK 0x00300000
  680. #define KS_DESC_END_OF_RING 0x02000000
  681. #define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
  682. #define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
  683. #define KS_DESC_TX_CSUM_GEN_IP 0x10000000
  684. #define KS_DESC_TX_LAST 0x20000000
  685. #define KS_DESC_TX_FIRST 0x40000000
  686. #define KS_DESC_TX_INTERRUPT 0x80000000
  687. #define KS_DESC_PORT_SHIFT 20
  688. #define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
  689. #define KS_DESC_TX_MASK \
  690. (KS_DESC_TX_INTERRUPT | \
  691. KS_DESC_TX_FIRST | \
  692. KS_DESC_TX_LAST | \
  693. KS_DESC_TX_CSUM_GEN_IP | \
  694. KS_DESC_TX_CSUM_GEN_TCP | \
  695. KS_DESC_TX_CSUM_GEN_UDP | \
  696. KS_DESC_BUF_SIZE)
  697. struct ksz_desc_rx_stat {
  698. #ifdef __BIG_ENDIAN_BITFIELD
  699. u32 hw_owned:1;
  700. u32 first_desc:1;
  701. u32 last_desc:1;
  702. u32 csum_err_ip:1;
  703. u32 csum_err_tcp:1;
  704. u32 csum_err_udp:1;
  705. u32 error:1;
  706. u32 multicast:1;
  707. u32 src_port:4;
  708. u32 err_phy:1;
  709. u32 err_too_long:1;
  710. u32 err_runt:1;
  711. u32 err_crc:1;
  712. u32 frame_type:1;
  713. u32 reserved1:4;
  714. u32 frame_len:11;
  715. #else
  716. u32 frame_len:11;
  717. u32 reserved1:4;
  718. u32 frame_type:1;
  719. u32 err_crc:1;
  720. u32 err_runt:1;
  721. u32 err_too_long:1;
  722. u32 err_phy:1;
  723. u32 src_port:4;
  724. u32 multicast:1;
  725. u32 error:1;
  726. u32 csum_err_udp:1;
  727. u32 csum_err_tcp:1;
  728. u32 csum_err_ip:1;
  729. u32 last_desc:1;
  730. u32 first_desc:1;
  731. u32 hw_owned:1;
  732. #endif
  733. };
  734. struct ksz_desc_tx_stat {
  735. #ifdef __BIG_ENDIAN_BITFIELD
  736. u32 hw_owned:1;
  737. u32 reserved1:31;
  738. #else
  739. u32 reserved1:31;
  740. u32 hw_owned:1;
  741. #endif
  742. };
  743. struct ksz_desc_rx_buf {
  744. #ifdef __BIG_ENDIAN_BITFIELD
  745. u32 reserved4:6;
  746. u32 end_of_ring:1;
  747. u32 reserved3:14;
  748. u32 buf_size:11;
  749. #else
  750. u32 buf_size:11;
  751. u32 reserved3:14;
  752. u32 end_of_ring:1;
  753. u32 reserved4:6;
  754. #endif
  755. };
  756. struct ksz_desc_tx_buf {
  757. #ifdef __BIG_ENDIAN_BITFIELD
  758. u32 intr:1;
  759. u32 first_seg:1;
  760. u32 last_seg:1;
  761. u32 csum_gen_ip:1;
  762. u32 csum_gen_tcp:1;
  763. u32 csum_gen_udp:1;
  764. u32 end_of_ring:1;
  765. u32 reserved4:1;
  766. u32 dest_port:4;
  767. u32 reserved3:9;
  768. u32 buf_size:11;
  769. #else
  770. u32 buf_size:11;
  771. u32 reserved3:9;
  772. u32 dest_port:4;
  773. u32 reserved4:1;
  774. u32 end_of_ring:1;
  775. u32 csum_gen_udp:1;
  776. u32 csum_gen_tcp:1;
  777. u32 csum_gen_ip:1;
  778. u32 last_seg:1;
  779. u32 first_seg:1;
  780. u32 intr:1;
  781. #endif
  782. };
  783. union desc_stat {
  784. struct ksz_desc_rx_stat rx;
  785. struct ksz_desc_tx_stat tx;
  786. u32 data;
  787. };
  788. union desc_buf {
  789. struct ksz_desc_rx_buf rx;
  790. struct ksz_desc_tx_buf tx;
  791. u32 data;
  792. };
  793. /**
  794. * struct ksz_hw_desc - Hardware descriptor data structure
  795. * @ctrl: Descriptor control value.
  796. * @buf: Descriptor buffer value.
  797. * @addr: Physical address of memory buffer.
  798. * @next: Pointer to next hardware descriptor.
  799. */
  800. struct ksz_hw_desc {
  801. union desc_stat ctrl;
  802. union desc_buf buf;
  803. u32 addr;
  804. u32 next;
  805. };
  806. /**
  807. * struct ksz_sw_desc - Software descriptor data structure
  808. * @ctrl: Descriptor control value.
  809. * @buf: Descriptor buffer value.
  810. * @buf_size: Current buffers size value in hardware descriptor.
  811. */
  812. struct ksz_sw_desc {
  813. union desc_stat ctrl;
  814. union desc_buf buf;
  815. u32 buf_size;
  816. };
  817. /**
  818. * struct ksz_dma_buf - OS dependent DMA buffer data structure
  819. * @skb: Associated socket buffer.
  820. * @dma: Associated physical DMA address.
  821. * len: Actual len used.
  822. */
  823. struct ksz_dma_buf {
  824. struct sk_buff *skb;
  825. dma_addr_t dma;
  826. int len;
  827. };
  828. /**
  829. * struct ksz_desc - Descriptor structure
  830. * @phw: Hardware descriptor pointer to uncached physical memory.
  831. * @sw: Cached memory to hold hardware descriptor values for
  832. * manipulation.
  833. * @dma_buf: Operating system dependent data structure to hold physical
  834. * memory buffer allocation information.
  835. */
  836. struct ksz_desc {
  837. struct ksz_hw_desc *phw;
  838. struct ksz_sw_desc sw;
  839. struct ksz_dma_buf dma_buf;
  840. };
  841. #define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
  842. /**
  843. * struct ksz_desc_info - Descriptor information data structure
  844. * @ring: First descriptor in the ring.
  845. * @cur: Current descriptor being manipulated.
  846. * @ring_virt: First hardware descriptor in the ring.
  847. * @ring_phys: The physical address of the first descriptor of the ring.
  848. * @size: Size of hardware descriptor.
  849. * @alloc: Number of descriptors allocated.
  850. * @avail: Number of descriptors available for use.
  851. * @last: Index for last descriptor released to hardware.
  852. * @next: Index for next descriptor available for use.
  853. * @mask: Mask for index wrapping.
  854. */
  855. struct ksz_desc_info {
  856. struct ksz_desc *ring;
  857. struct ksz_desc *cur;
  858. struct ksz_hw_desc *ring_virt;
  859. u32 ring_phys;
  860. int size;
  861. int alloc;
  862. int avail;
  863. int last;
  864. int next;
  865. int mask;
  866. };
  867. /*
  868. * KSZ8842 switch definitions
  869. */
  870. enum {
  871. TABLE_STATIC_MAC = 0,
  872. TABLE_VLAN,
  873. TABLE_DYNAMIC_MAC,
  874. TABLE_MIB
  875. };
  876. #define LEARNED_MAC_TABLE_ENTRIES 1024
  877. #define STATIC_MAC_TABLE_ENTRIES 8
  878. /**
  879. * struct ksz_mac_table - Static MAC table data structure
  880. * @mac_addr: MAC address to filter.
  881. * @vid: VID value.
  882. * @fid: FID value.
  883. * @ports: Port membership.
  884. * @override: Override setting.
  885. * @use_fid: FID use setting.
  886. * @valid: Valid setting indicating the entry is being used.
  887. */
  888. struct ksz_mac_table {
  889. u8 mac_addr[ETH_ALEN];
  890. u16 vid;
  891. u8 fid;
  892. u8 ports;
  893. u8 override:1;
  894. u8 use_fid:1;
  895. u8 valid:1;
  896. };
  897. #define VLAN_TABLE_ENTRIES 16
  898. /**
  899. * struct ksz_vlan_table - VLAN table data structure
  900. * @vid: VID value.
  901. * @fid: FID value.
  902. * @member: Port membership.
  903. */
  904. struct ksz_vlan_table {
  905. u16 vid;
  906. u8 fid;
  907. u8 member;
  908. };
  909. #define DIFFSERV_ENTRIES 64
  910. #define PRIO_802_1P_ENTRIES 8
  911. #define PRIO_QUEUES 4
  912. #define SWITCH_PORT_NUM 2
  913. #define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
  914. #define HOST_MASK (1 << SWITCH_PORT_NUM)
  915. #define PORT_MASK 7
  916. #define MAIN_PORT 0
  917. #define OTHER_PORT 1
  918. #define HOST_PORT SWITCH_PORT_NUM
  919. #define PORT_COUNTER_NUM 0x20
  920. #define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
  921. #define MIB_COUNTER_RX_LO_PRIORITY 0x00
  922. #define MIB_COUNTER_RX_HI_PRIORITY 0x01
  923. #define MIB_COUNTER_RX_UNDERSIZE 0x02
  924. #define MIB_COUNTER_RX_FRAGMENT 0x03
  925. #define MIB_COUNTER_RX_OVERSIZE 0x04
  926. #define MIB_COUNTER_RX_JABBER 0x05
  927. #define MIB_COUNTER_RX_SYMBOL_ERR 0x06
  928. #define MIB_COUNTER_RX_CRC_ERR 0x07
  929. #define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
  930. #define MIB_COUNTER_RX_CTRL_8808 0x09
  931. #define MIB_COUNTER_RX_PAUSE 0x0A
  932. #define MIB_COUNTER_RX_BROADCAST 0x0B
  933. #define MIB_COUNTER_RX_MULTICAST 0x0C
  934. #define MIB_COUNTER_RX_UNICAST 0x0D
  935. #define MIB_COUNTER_RX_OCTET_64 0x0E
  936. #define MIB_COUNTER_RX_OCTET_65_127 0x0F
  937. #define MIB_COUNTER_RX_OCTET_128_255 0x10
  938. #define MIB_COUNTER_RX_OCTET_256_511 0x11
  939. #define MIB_COUNTER_RX_OCTET_512_1023 0x12
  940. #define MIB_COUNTER_RX_OCTET_1024_1522 0x13
  941. #define MIB_COUNTER_TX_LO_PRIORITY 0x14
  942. #define MIB_COUNTER_TX_HI_PRIORITY 0x15
  943. #define MIB_COUNTER_TX_LATE_COLLISION 0x16
  944. #define MIB_COUNTER_TX_PAUSE 0x17
  945. #define MIB_COUNTER_TX_BROADCAST 0x18
  946. #define MIB_COUNTER_TX_MULTICAST 0x19
  947. #define MIB_COUNTER_TX_UNICAST 0x1A
  948. #define MIB_COUNTER_TX_DEFERRED 0x1B
  949. #define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
  950. #define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
  951. #define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
  952. #define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
  953. #define MIB_COUNTER_RX_DROPPED_PACKET 0x20
  954. #define MIB_COUNTER_TX_DROPPED_PACKET 0x21
  955. /**
  956. * struct ksz_port_mib - Port MIB data structure
  957. * @cnt_ptr: Current pointer to MIB counter index.
  958. * @link_down: Indication the link has just gone down.
  959. * @state: Connection status of the port.
  960. * @mib_start: The starting counter index. Some ports do not start at 0.
  961. * @counter: 64-bit MIB counter value.
  962. * @dropped: Temporary buffer to remember last read packet dropped values.
  963. *
  964. * MIB counters needs to be read periodically so that counters do not get
  965. * overflowed and give incorrect values. A right balance is needed to
  966. * satisfy this condition and not waste too much CPU time.
  967. *
  968. * It is pointless to read MIB counters when the port is disconnected. The
  969. * @state provides the connection status so that MIB counters are read only
  970. * when the port is connected. The @link_down indicates the port is just
  971. * disconnected so that all MIB counters are read one last time to update the
  972. * information.
  973. */
  974. struct ksz_port_mib {
  975. u8 cnt_ptr;
  976. u8 link_down;
  977. u8 state;
  978. u8 mib_start;
  979. u64 counter[TOTAL_PORT_COUNTER_NUM];
  980. u32 dropped[2];
  981. };
  982. /**
  983. * struct ksz_port_cfg - Port configuration data structure
  984. * @vid: VID value.
  985. * @member: Port membership.
  986. * @port_prio: Port priority.
  987. * @rx_rate: Receive priority rate.
  988. * @tx_rate: Transmit priority rate.
  989. * @stp_state: Current Spanning Tree Protocol state.
  990. */
  991. struct ksz_port_cfg {
  992. u16 vid;
  993. u8 member;
  994. u8 port_prio;
  995. u32 rx_rate[PRIO_QUEUES];
  996. u32 tx_rate[PRIO_QUEUES];
  997. int stp_state;
  998. };
  999. /**
  1000. * struct ksz_switch - KSZ8842 switch data structure
  1001. * @mac_table: MAC table entries information.
  1002. * @vlan_table: VLAN table entries information.
  1003. * @port_cfg: Port configuration information.
  1004. * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
  1005. * (bit7 ~ bit2) field.
  1006. * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
  1007. * Tag priority field.
  1008. * @br_addr: Bridge address. Used for STP.
  1009. * @other_addr: Other MAC address. Used for multiple network device mode.
  1010. * @broad_per: Broadcast storm percentage.
  1011. * @member: Current port membership. Used for STP.
  1012. */
  1013. struct ksz_switch {
  1014. struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
  1015. struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
  1016. struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
  1017. u8 diffserv[DIFFSERV_ENTRIES];
  1018. u8 p_802_1p[PRIO_802_1P_ENTRIES];
  1019. u8 br_addr[ETH_ALEN];
  1020. u8 other_addr[ETH_ALEN];
  1021. u8 broad_per;
  1022. u8 member;
  1023. };
  1024. #define TX_RATE_UNIT 10000
  1025. /**
  1026. * struct ksz_port_info - Port information data structure
  1027. * @state: Connection status of the port.
  1028. * @tx_rate: Transmit rate divided by 10000 to get Mbit.
  1029. * @duplex: Duplex mode.
  1030. * @advertised: Advertised auto-negotiation setting. Used to determine link.
  1031. * @partner: Auto-negotiation partner setting. Used to determine link.
  1032. * @port_id: Port index to access actual hardware register.
  1033. * @pdev: Pointer to OS dependent network device.
  1034. */
  1035. struct ksz_port_info {
  1036. uint state;
  1037. uint tx_rate;
  1038. u8 duplex;
  1039. u8 advertised;
  1040. u8 partner;
  1041. u8 port_id;
  1042. void *pdev;
  1043. };
  1044. #define MAX_TX_HELD_SIZE 52000
  1045. /* Hardware features and bug fixes. */
  1046. #define LINK_INT_WORKING (1 << 0)
  1047. #define SMALL_PACKET_TX_BUG (1 << 1)
  1048. #define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
  1049. #define RX_HUGE_FRAME (1 << 4)
  1050. #define STP_SUPPORT (1 << 8)
  1051. /* Software overrides. */
  1052. #define PAUSE_FLOW_CTRL (1 << 0)
  1053. #define FAST_AGING (1 << 1)
  1054. /**
  1055. * struct ksz_hw - KSZ884X hardware data structure
  1056. * @io: Virtual address assigned.
  1057. * @ksz_switch: Pointer to KSZ8842 switch.
  1058. * @port_info: Port information.
  1059. * @port_mib: Port MIB information.
  1060. * @dev_count: Number of network devices this hardware supports.
  1061. * @dst_ports: Destination ports in switch for transmission.
  1062. * @id: Hardware ID. Used for display only.
  1063. * @mib_cnt: Number of MIB counters this hardware has.
  1064. * @mib_port_cnt: Number of ports with MIB counters.
  1065. * @tx_cfg: Cached transmit control settings.
  1066. * @rx_cfg: Cached receive control settings.
  1067. * @intr_mask: Current interrupt mask.
  1068. * @intr_set: Current interrup set.
  1069. * @intr_blocked: Interrupt blocked.
  1070. * @rx_desc_info: Receive descriptor information.
  1071. * @tx_desc_info: Transmit descriptor information.
  1072. * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
  1073. * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
  1074. * @tx_size: Transmit data size. Used for TX optimization.
  1075. * The maximum is defined by MAX_TX_HELD_SIZE.
  1076. * @perm_addr: Permanent MAC address.
  1077. * @override_addr: Overridden MAC address.
  1078. * @address: Additional MAC address entries.
  1079. * @addr_list_size: Additional MAC address list size.
  1080. * @mac_override: Indication of MAC address overridden.
  1081. * @promiscuous: Counter to keep track of promiscuous mode set.
  1082. * @all_multi: Counter to keep track of all multicast mode set.
  1083. * @multi_list: Multicast address entries.
  1084. * @multi_bits: Cached multicast hash table settings.
  1085. * @multi_list_size: Multicast address list size.
  1086. * @enabled: Indication of hardware enabled.
  1087. * @rx_stop: Indication of receive process stop.
  1088. * @features: Hardware features to enable.
  1089. * @overrides: Hardware features to override.
  1090. * @parent: Pointer to parent, network device private structure.
  1091. */
  1092. struct ksz_hw {
  1093. void __iomem *io;
  1094. struct ksz_switch *ksz_switch;
  1095. struct ksz_port_info port_info[SWITCH_PORT_NUM];
  1096. struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
  1097. int dev_count;
  1098. int dst_ports;
  1099. int id;
  1100. int mib_cnt;
  1101. int mib_port_cnt;
  1102. u32 tx_cfg;
  1103. u32 rx_cfg;
  1104. u32 intr_mask;
  1105. u32 intr_set;
  1106. uint intr_blocked;
  1107. struct ksz_desc_info rx_desc_info;
  1108. struct ksz_desc_info tx_desc_info;
  1109. int tx_int_cnt;
  1110. int tx_int_mask;
  1111. int tx_size;
  1112. u8 perm_addr[ETH_ALEN];
  1113. u8 override_addr[ETH_ALEN];
  1114. u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
  1115. u8 addr_list_size;
  1116. u8 mac_override;
  1117. u8 promiscuous;
  1118. u8 all_multi;
  1119. u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
  1120. u8 multi_bits[HW_MULTICAST_SIZE];
  1121. u8 multi_list_size;
  1122. u8 enabled;
  1123. u8 rx_stop;
  1124. u8 reserved2[1];
  1125. uint features;
  1126. uint overrides;
  1127. void *parent;
  1128. };
  1129. enum {
  1130. PHY_NO_FLOW_CTRL,
  1131. PHY_FLOW_CTRL,
  1132. PHY_TX_ONLY,
  1133. PHY_RX_ONLY
  1134. };
  1135. /**
  1136. * struct ksz_port - Virtual port data structure
  1137. * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
  1138. * duplex, and 0 for auto, which normally results in full
  1139. * duplex.
  1140. * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
  1141. * 0 for auto, which normally results in 100 Mbit.
  1142. * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
  1143. * force.
  1144. * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
  1145. * control, and PHY_FLOW_CTRL for flow control.
  1146. * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
  1147. * Mbit PHY.
  1148. * @first_port: Index of first port this port supports.
  1149. * @mib_port_cnt: Number of ports with MIB counters.
  1150. * @port_cnt: Number of ports this port supports.
  1151. * @counter: Port statistics counter.
  1152. * @hw: Pointer to hardware structure.
  1153. * @linked: Pointer to port information linked to this port.
  1154. */
  1155. struct ksz_port {
  1156. u8 duplex;
  1157. u8 speed;
  1158. u8 force_link;
  1159. u8 flow_ctrl;
  1160. int first_port;
  1161. int mib_port_cnt;
  1162. int port_cnt;
  1163. u64 counter[OID_COUNTER_LAST];
  1164. struct ksz_hw *hw;
  1165. struct ksz_port_info *linked;
  1166. };
  1167. /**
  1168. * struct ksz_timer_info - Timer information data structure
  1169. * @timer: Kernel timer.
  1170. * @cnt: Running timer counter.
  1171. * @max: Number of times to run timer; -1 for infinity.
  1172. * @period: Timer period in jiffies.
  1173. */
  1174. struct ksz_timer_info {
  1175. struct timer_list timer;
  1176. int cnt;
  1177. int max;
  1178. int period;
  1179. };
  1180. /**
  1181. * struct ksz_shared_mem - OS dependent shared memory data structure
  1182. * @dma_addr: Physical DMA address allocated.
  1183. * @alloc_size: Allocation size.
  1184. * @phys: Actual physical address used.
  1185. * @alloc_virt: Virtual address allocated.
  1186. * @virt: Actual virtual address used.
  1187. */
  1188. struct ksz_shared_mem {
  1189. dma_addr_t dma_addr;
  1190. uint alloc_size;
  1191. uint phys;
  1192. u8 *alloc_virt;
  1193. u8 *virt;
  1194. };
  1195. /**
  1196. * struct ksz_counter_info - OS dependent counter information data structure
  1197. * @counter: Wait queue to wakeup after counters are read.
  1198. * @time: Next time in jiffies to read counter.
  1199. * @read: Indication of counters read in full or not.
  1200. */
  1201. struct ksz_counter_info {
  1202. wait_queue_head_t counter;
  1203. unsigned long time;
  1204. int read;
  1205. };
  1206. /**
  1207. * struct dev_info - Network device information data structure
  1208. * @dev: Pointer to network device.
  1209. * @pdev: Pointer to PCI device.
  1210. * @hw: Hardware structure.
  1211. * @desc_pool: Physical memory used for descriptor pool.
  1212. * @hwlock: Spinlock to prevent hardware from accessing.
  1213. * @lock: Mutex lock to prevent device from accessing.
  1214. * @dev_rcv: Receive process function used.
  1215. * @last_skb: Socket buffer allocated for descriptor rx fragments.
  1216. * @skb_index: Buffer index for receiving fragments.
  1217. * @skb_len: Buffer length for receiving fragments.
  1218. * @mib_read: Workqueue to read MIB counters.
  1219. * @mib_timer_info: Timer to read MIB counters.
  1220. * @counter: Used for MIB reading.
  1221. * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
  1222. * the maximum is MAX_RX_BUF_SIZE.
  1223. * @opened: Counter to keep track of device open.
  1224. * @rx_tasklet: Receive processing tasklet.
  1225. * @tx_tasklet: Transmit processing tasklet.
  1226. * @wol_enable: Wake-on-LAN enable set by ethtool.
  1227. * @wol_support: Wake-on-LAN support used by ethtool.
  1228. * @pme_wait: Used for KSZ8841 power management.
  1229. */
  1230. struct dev_info {
  1231. struct net_device *dev;
  1232. struct pci_dev *pdev;
  1233. struct ksz_hw hw;
  1234. struct ksz_shared_mem desc_pool;
  1235. spinlock_t hwlock;
  1236. struct mutex lock;
  1237. int (*dev_rcv)(struct dev_info *);
  1238. struct sk_buff *last_skb;
  1239. int skb_index;
  1240. int skb_len;
  1241. struct work_struct mib_read;
  1242. struct ksz_timer_info mib_timer_info;
  1243. struct ksz_counter_info counter[TOTAL_PORT_NUM];
  1244. int mtu;
  1245. int opened;
  1246. struct tasklet_struct rx_tasklet;
  1247. struct tasklet_struct tx_tasklet;
  1248. int wol_enable;
  1249. int wol_support;
  1250. unsigned long pme_wait;
  1251. };
  1252. /**
  1253. * struct dev_priv - Network device private data structure
  1254. * @adapter: Adapter device information.
  1255. * @port: Port information.
  1256. * @monitor_time_info: Timer to monitor ports.
  1257. * @proc_sem: Semaphore for proc accessing.
  1258. * @id: Device ID.
  1259. * @mii_if: MII interface information.
  1260. * @advertising: Temporary variable to store advertised settings.
  1261. * @msg_enable: The message flags controlling driver output.
  1262. * @media_state: The connection status of the device.
  1263. * @multicast: The all multicast state of the device.
  1264. * @promiscuous: The promiscuous state of the device.
  1265. */
  1266. struct dev_priv {
  1267. struct dev_info *adapter;
  1268. struct ksz_port port;
  1269. struct ksz_timer_info monitor_timer_info;
  1270. struct semaphore proc_sem;
  1271. int id;
  1272. struct mii_if_info mii_if;
  1273. u32 advertising;
  1274. u32 msg_enable;
  1275. int media_state;
  1276. int multicast;
  1277. int promiscuous;
  1278. };
  1279. #define DRV_NAME "KSZ884X PCI"
  1280. #define DEVICE_NAME "KSZ884x PCI"
  1281. #define DRV_VERSION "1.0.0"
  1282. #define DRV_RELDATE "Feb 8, 2010"
  1283. static char version[] =
  1284. "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
  1285. static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
  1286. /*
  1287. * Interrupt processing primary routines
  1288. */
  1289. static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
  1290. {
  1291. writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
  1292. }
  1293. static inline void hw_dis_intr(struct ksz_hw *hw)
  1294. {
  1295. hw->intr_blocked = hw->intr_mask;
  1296. writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
  1297. hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1298. }
  1299. static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
  1300. {
  1301. hw->intr_set = interrupt;
  1302. writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
  1303. }
  1304. static inline void hw_ena_intr(struct ksz_hw *hw)
  1305. {
  1306. hw->intr_blocked = 0;
  1307. hw_set_intr(hw, hw->intr_mask);
  1308. }
  1309. static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
  1310. {
  1311. hw->intr_mask &= ~(bit);
  1312. }
  1313. static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
  1314. {
  1315. u32 read_intr;
  1316. read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1317. hw->intr_set = read_intr & ~interrupt;
  1318. writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
  1319. hw_dis_intr_bit(hw, interrupt);
  1320. }
  1321. /**
  1322. * hw_turn_on_intr - turn on specified interrupts
  1323. * @hw: The hardware instance.
  1324. * @bit: The interrupt bits to be on.
  1325. *
  1326. * This routine turns on the specified interrupts in the interrupt mask so that
  1327. * those interrupts will be enabled.
  1328. */
  1329. static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
  1330. {
  1331. hw->intr_mask |= bit;
  1332. if (!hw->intr_blocked)
  1333. hw_set_intr(hw, hw->intr_mask);
  1334. }
  1335. static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
  1336. {
  1337. u32 read_intr;
  1338. read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1339. hw->intr_set = read_intr | interrupt;
  1340. writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
  1341. }
  1342. static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
  1343. {
  1344. *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
  1345. *status = *status & hw->intr_set;
  1346. }
  1347. static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
  1348. {
  1349. if (interrupt)
  1350. hw_ena_intr(hw);
  1351. }
  1352. /**
  1353. * hw_block_intr - block hardware interrupts
  1354. *
  1355. * This function blocks all interrupts of the hardware and returns the current
  1356. * interrupt enable mask so that interrupts can be restored later.
  1357. *
  1358. * Return the current interrupt enable mask.
  1359. */
  1360. static uint hw_block_intr(struct ksz_hw *hw)
  1361. {
  1362. uint interrupt = 0;
  1363. if (!hw->intr_blocked) {
  1364. hw_dis_intr(hw);
  1365. interrupt = hw->intr_blocked;
  1366. }
  1367. return interrupt;
  1368. }
  1369. /*
  1370. * Hardware descriptor routines
  1371. */
  1372. static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
  1373. {
  1374. status.rx.hw_owned = 0;
  1375. desc->phw->ctrl.data = cpu_to_le32(status.data);
  1376. }
  1377. static inline void release_desc(struct ksz_desc *desc)
  1378. {
  1379. desc->sw.ctrl.tx.hw_owned = 1;
  1380. if (desc->sw.buf_size != desc->sw.buf.data) {
  1381. desc->sw.buf_size = desc->sw.buf.data;
  1382. desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
  1383. }
  1384. desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
  1385. }
  1386. static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
  1387. {
  1388. *desc = &info->ring[info->last];
  1389. info->last++;
  1390. info->last &= info->mask;
  1391. info->avail--;
  1392. (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
  1393. }
  1394. static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
  1395. {
  1396. desc->phw->addr = cpu_to_le32(addr);
  1397. }
  1398. static inline void set_rx_len(struct ksz_desc *desc, u32 len)
  1399. {
  1400. desc->sw.buf.rx.buf_size = len;
  1401. }
  1402. static inline void get_tx_pkt(struct ksz_desc_info *info,
  1403. struct ksz_desc **desc)
  1404. {
  1405. *desc = &info->ring[info->next];
  1406. info->next++;
  1407. info->next &= info->mask;
  1408. info->avail--;
  1409. (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
  1410. }
  1411. static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
  1412. {
  1413. desc->phw->addr = cpu_to_le32(addr);
  1414. }
  1415. static inline void set_tx_len(struct ksz_desc *desc, u32 len)
  1416. {
  1417. desc->sw.buf.tx.buf_size = len;
  1418. }
  1419. /* Switch functions */
  1420. #define TABLE_READ 0x10
  1421. #define TABLE_SEL_SHIFT 2
  1422. #define HW_DELAY(hw, reg) \
  1423. do { \
  1424. u16 dummy; \
  1425. dummy = readw(hw->io + reg); \
  1426. } while (0)
  1427. /**
  1428. * sw_r_table - read 4 bytes of data from switch table
  1429. * @hw: The hardware instance.
  1430. * @table: The table selector.
  1431. * @addr: The address of the table entry.
  1432. * @data: Buffer to store the read data.
  1433. *
  1434. * This routine reads 4 bytes of data from the table of the switch.
  1435. * Hardware interrupts are disabled to minimize corruption of read data.
  1436. */
  1437. static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
  1438. {
  1439. u16 ctrl_addr;
  1440. uint interrupt;
  1441. ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
  1442. interrupt = hw_block_intr(hw);
  1443. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1444. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1445. *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1446. hw_restore_intr(hw, interrupt);
  1447. }
  1448. /**
  1449. * sw_w_table_64 - write 8 bytes of data to the switch table
  1450. * @hw: The hardware instance.
  1451. * @table: The table selector.
  1452. * @addr: The address of the table entry.
  1453. * @data_hi: The high part of data to be written (bit63 ~ bit32).
  1454. * @data_lo: The low part of data to be written (bit31 ~ bit0).
  1455. *
  1456. * This routine writes 8 bytes of data to the table of the switch.
  1457. * Hardware interrupts are disabled to minimize corruption of written data.
  1458. */
  1459. static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
  1460. u32 data_lo)
  1461. {
  1462. u16 ctrl_addr;
  1463. uint interrupt;
  1464. ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
  1465. interrupt = hw_block_intr(hw);
  1466. writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
  1467. writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
  1468. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1469. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1470. hw_restore_intr(hw, interrupt);
  1471. }
  1472. /**
  1473. * sw_w_sta_mac_table - write to the static MAC table
  1474. * @hw: The hardware instance.
  1475. * @addr: The address of the table entry.
  1476. * @mac_addr: The MAC address.
  1477. * @ports: The port members.
  1478. * @override: The flag to override the port receive/transmit settings.
  1479. * @valid: The flag to indicate entry is valid.
  1480. * @use_fid: The flag to indicate the FID is valid.
  1481. * @fid: The FID value.
  1482. *
  1483. * This routine writes an entry of the static MAC table of the switch. It
  1484. * calls sw_w_table_64() to write the data.
  1485. */
  1486. static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
  1487. u8 ports, int override, int valid, int use_fid, u8 fid)
  1488. {
  1489. u32 data_hi;
  1490. u32 data_lo;
  1491. data_lo = ((u32) mac_addr[2] << 24) |
  1492. ((u32) mac_addr[3] << 16) |
  1493. ((u32) mac_addr[4] << 8) | mac_addr[5];
  1494. data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
  1495. data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
  1496. if (override)
  1497. data_hi |= STATIC_MAC_TABLE_OVERRIDE;
  1498. if (use_fid) {
  1499. data_hi |= STATIC_MAC_TABLE_USE_FID;
  1500. data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
  1501. }
  1502. if (valid)
  1503. data_hi |= STATIC_MAC_TABLE_VALID;
  1504. sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
  1505. }
  1506. /**
  1507. * sw_r_vlan_table - read from the VLAN table
  1508. * @hw: The hardware instance.
  1509. * @addr: The address of the table entry.
  1510. * @vid: Buffer to store the VID.
  1511. * @fid: Buffer to store the VID.
  1512. * @member: Buffer to store the port membership.
  1513. *
  1514. * This function reads an entry of the VLAN table of the switch. It calls
  1515. * sw_r_table() to get the data.
  1516. *
  1517. * Return 0 if the entry is valid; otherwise -1.
  1518. */
  1519. static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
  1520. u8 *member)
  1521. {
  1522. u32 data;
  1523. sw_r_table(hw, TABLE_VLAN, addr, &data);
  1524. if (data & VLAN_TABLE_VALID) {
  1525. *vid = (u16)(data & VLAN_TABLE_VID);
  1526. *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
  1527. *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
  1528. VLAN_TABLE_MEMBERSHIP_SHIFT);
  1529. return 0;
  1530. }
  1531. return -1;
  1532. }
  1533. /**
  1534. * port_r_mib_cnt - read MIB counter
  1535. * @hw: The hardware instance.
  1536. * @port: The port index.
  1537. * @addr: The address of the counter.
  1538. * @cnt: Buffer to store the counter.
  1539. *
  1540. * This routine reads a MIB counter of the port.
  1541. * Hardware interrupts are disabled to minimize corruption of read data.
  1542. */
  1543. static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
  1544. {
  1545. u32 data;
  1546. u16 ctrl_addr;
  1547. uint interrupt;
  1548. int timeout;
  1549. ctrl_addr = addr + PORT_COUNTER_NUM * port;
  1550. interrupt = hw_block_intr(hw);
  1551. ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
  1552. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1553. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1554. for (timeout = 100; timeout > 0; timeout--) {
  1555. data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1556. if (data & MIB_COUNTER_VALID) {
  1557. if (data & MIB_COUNTER_OVERFLOW)
  1558. *cnt += MIB_COUNTER_VALUE + 1;
  1559. *cnt += data & MIB_COUNTER_VALUE;
  1560. break;
  1561. }
  1562. }
  1563. hw_restore_intr(hw, interrupt);
  1564. }
  1565. /**
  1566. * port_r_mib_pkt - read dropped packet counts
  1567. * @hw: The hardware instance.
  1568. * @port: The port index.
  1569. * @cnt: Buffer to store the receive and transmit dropped packet counts.
  1570. *
  1571. * This routine reads the dropped packet counts of the port.
  1572. * Hardware interrupts are disabled to minimize corruption of read data.
  1573. */
  1574. static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
  1575. {
  1576. u32 cur;
  1577. u32 data;
  1578. u16 ctrl_addr;
  1579. uint interrupt;
  1580. int index;
  1581. index = KS_MIB_PACKET_DROPPED_RX_0 + port;
  1582. do {
  1583. interrupt = hw_block_intr(hw);
  1584. ctrl_addr = (u16) index;
  1585. ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
  1586. << 8);
  1587. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1588. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1589. data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1590. hw_restore_intr(hw, interrupt);
  1591. data &= MIB_PACKET_DROPPED;
  1592. cur = *last;
  1593. if (data != cur) {
  1594. *last = data;
  1595. if (data < cur)
  1596. data += MIB_PACKET_DROPPED + 1;
  1597. data -= cur;
  1598. *cnt += data;
  1599. }
  1600. ++last;
  1601. ++cnt;
  1602. index -= KS_MIB_PACKET_DROPPED_TX -
  1603. KS_MIB_PACKET_DROPPED_TX_0 + 1;
  1604. } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
  1605. }
  1606. /**
  1607. * port_r_cnt - read MIB counters periodically
  1608. * @hw: The hardware instance.
  1609. * @port: The port index.
  1610. *
  1611. * This routine is used to read the counters of the port periodically to avoid
  1612. * counter overflow. The hardware should be acquired first before calling this
  1613. * routine.
  1614. *
  1615. * Return non-zero when not all counters not read.
  1616. */
  1617. static int port_r_cnt(struct ksz_hw *hw, int port)
  1618. {
  1619. struct ksz_port_mib *mib = &hw->port_mib[port];
  1620. if (mib->mib_start < PORT_COUNTER_NUM)
  1621. while (mib->cnt_ptr < PORT_COUNTER_NUM) {
  1622. port_r_mib_cnt(hw, port, mib->cnt_ptr,
  1623. &mib->counter[mib->cnt_ptr]);
  1624. ++mib->cnt_ptr;
  1625. }
  1626. if (hw->mib_cnt > PORT_COUNTER_NUM)
  1627. port_r_mib_pkt(hw, port, mib->dropped,
  1628. &mib->counter[PORT_COUNTER_NUM]);
  1629. mib->cnt_ptr = 0;
  1630. return 0;
  1631. }
  1632. /**
  1633. * port_init_cnt - initialize MIB counter values
  1634. * @hw: The hardware instance.
  1635. * @port: The port index.
  1636. *
  1637. * This routine is used to initialize all counters to zero if the hardware
  1638. * cannot do it after reset.
  1639. */
  1640. static void port_init_cnt(struct ksz_hw *hw, int port)
  1641. {
  1642. struct ksz_port_mib *mib = &hw->port_mib[port];
  1643. mib->cnt_ptr = 0;
  1644. if (mib->mib_start < PORT_COUNTER_NUM)
  1645. do {
  1646. port_r_mib_cnt(hw, port, mib->cnt_ptr,
  1647. &mib->counter[mib->cnt_ptr]);
  1648. ++mib->cnt_ptr;
  1649. } while (mib->cnt_ptr < PORT_COUNTER_NUM);
  1650. if (hw->mib_cnt > PORT_COUNTER_NUM)
  1651. port_r_mib_pkt(hw, port, mib->dropped,
  1652. &mib->counter[PORT_COUNTER_NUM]);
  1653. memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
  1654. mib->cnt_ptr = 0;
  1655. }
  1656. /*
  1657. * Port functions
  1658. */
  1659. /**
  1660. * port_chk - check port register bits
  1661. * @hw: The hardware instance.
  1662. * @port: The port index.
  1663. * @offset: The offset of the port register.
  1664. * @bits: The data bits to check.
  1665. *
  1666. * This function checks whether the specified bits of the port register are set
  1667. * or not.
  1668. *
  1669. * Return 0 if the bits are not set.
  1670. */
  1671. static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
  1672. {
  1673. u32 addr;
  1674. u16 data;
  1675. PORT_CTRL_ADDR(port, addr);
  1676. addr += offset;
  1677. data = readw(hw->io + addr);
  1678. return (data & bits) == bits;
  1679. }
  1680. /**
  1681. * port_cfg - set port register bits
  1682. * @hw: The hardware instance.
  1683. * @port: The port index.
  1684. * @offset: The offset of the port register.
  1685. * @bits: The data bits to set.
  1686. * @set: The flag indicating whether the bits are to be set or not.
  1687. *
  1688. * This routine sets or resets the specified bits of the port register.
  1689. */
  1690. static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
  1691. int set)
  1692. {
  1693. u32 addr;
  1694. u16 data;
  1695. PORT_CTRL_ADDR(port, addr);
  1696. addr += offset;
  1697. data = readw(hw->io + addr);
  1698. if (set)
  1699. data |= bits;
  1700. else
  1701. data &= ~bits;
  1702. writew(data, hw->io + addr);
  1703. }
  1704. /**
  1705. * port_chk_shift - check port bit
  1706. * @hw: The hardware instance.
  1707. * @port: The port index.
  1708. * @offset: The offset of the register.
  1709. * @shift: Number of bits to shift.
  1710. *
  1711. * This function checks whether the specified port is set in the register or
  1712. * not.
  1713. *
  1714. * Return 0 if the port is not set.
  1715. */
  1716. static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
  1717. {
  1718. u16 data;
  1719. u16 bit = 1 << port;
  1720. data = readw(hw->io + addr);
  1721. data >>= shift;
  1722. return (data & bit) == bit;
  1723. }
  1724. /**
  1725. * port_cfg_shift - set port bit
  1726. * @hw: The hardware instance.
  1727. * @port: The port index.
  1728. * @offset: The offset of the register.
  1729. * @shift: Number of bits to shift.
  1730. * @set: The flag indicating whether the port is to be set or not.
  1731. *
  1732. * This routine sets or resets the specified port in the register.
  1733. */
  1734. static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
  1735. int set)
  1736. {
  1737. u16 data;
  1738. u16 bits = 1 << port;
  1739. data = readw(hw->io + addr);
  1740. bits <<= shift;
  1741. if (set)
  1742. data |= bits;
  1743. else
  1744. data &= ~bits;
  1745. writew(data, hw->io + addr);
  1746. }
  1747. /**
  1748. * port_r8 - read byte from port register
  1749. * @hw: The hardware instance.
  1750. * @port: The port index.
  1751. * @offset: The offset of the port register.
  1752. * @data: Buffer to store the data.
  1753. *
  1754. * This routine reads a byte from the port register.
  1755. */
  1756. static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
  1757. {
  1758. u32 addr;
  1759. PORT_CTRL_ADDR(port, addr);
  1760. addr += offset;
  1761. *data = readb(hw->io + addr);
  1762. }
  1763. /**
  1764. * port_r16 - read word from port register.
  1765. * @hw: The hardware instance.
  1766. * @port: The port index.
  1767. * @offset: The offset of the port register.
  1768. * @data: Buffer to store the data.
  1769. *
  1770. * This routine reads a word from the port register.
  1771. */
  1772. static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
  1773. {
  1774. u32 addr;
  1775. PORT_CTRL_ADDR(port, addr);
  1776. addr += offset;
  1777. *data = readw(hw->io + addr);
  1778. }
  1779. /**
  1780. * port_w16 - write word to port register.
  1781. * @hw: The hardware instance.
  1782. * @port: The port index.
  1783. * @offset: The offset of the port register.
  1784. * @data: Data to write.
  1785. *
  1786. * This routine writes a word to the port register.
  1787. */
  1788. static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
  1789. {
  1790. u32 addr;
  1791. PORT_CTRL_ADDR(port, addr);
  1792. addr += offset;
  1793. writew(data, hw->io + addr);
  1794. }
  1795. /**
  1796. * sw_chk - check switch register bits
  1797. * @hw: The hardware instance.
  1798. * @addr: The address of the switch register.
  1799. * @bits: The data bits to check.
  1800. *
  1801. * This function checks whether the specified bits of the switch register are
  1802. * set or not.
  1803. *
  1804. * Return 0 if the bits are not set.
  1805. */
  1806. static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
  1807. {
  1808. u16 data;
  1809. data = readw(hw->io + addr);
  1810. return (data & bits) == bits;
  1811. }
  1812. /**
  1813. * sw_cfg - set switch register bits
  1814. * @hw: The hardware instance.
  1815. * @addr: The address of the switch register.
  1816. * @bits: The data bits to set.
  1817. * @set: The flag indicating whether the bits are to be set or not.
  1818. *
  1819. * This function sets or resets the specified bits of the switch register.
  1820. */
  1821. static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
  1822. {
  1823. u16 data;
  1824. data = readw(hw->io + addr);
  1825. if (set)
  1826. data |= bits;
  1827. else
  1828. data &= ~bits;
  1829. writew(data, hw->io + addr);
  1830. }
  1831. /* Bandwidth */
  1832. static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
  1833. {
  1834. port_cfg(hw, p,
  1835. KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
  1836. }
  1837. static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
  1838. {
  1839. return port_chk(hw, p,
  1840. KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
  1841. }
  1842. /* Driver set switch broadcast storm protection at 10% rate. */
  1843. #define BROADCAST_STORM_PROTECTION_RATE 10
  1844. /* 148,800 frames * 67 ms / 100 */
  1845. #define BROADCAST_STORM_VALUE 9969
  1846. /**
  1847. * sw_cfg_broad_storm - configure broadcast storm threshold
  1848. * @hw: The hardware instance.
  1849. * @percent: Broadcast storm threshold in percent of transmit rate.
  1850. *
  1851. * This routine configures the broadcast storm threshold of the switch.
  1852. */
  1853. static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
  1854. {
  1855. u16 data;
  1856. u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
  1857. if (value > BROADCAST_STORM_RATE)
  1858. value = BROADCAST_STORM_RATE;
  1859. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1860. data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
  1861. data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
  1862. writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1863. }
  1864. /**
  1865. * sw_get_board_storm - get broadcast storm threshold
  1866. * @hw: The hardware instance.
  1867. * @percent: Buffer to store the broadcast storm threshold percentage.
  1868. *
  1869. * This routine retrieves the broadcast storm threshold of the switch.
  1870. */
  1871. static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
  1872. {
  1873. int num;
  1874. u16 data;
  1875. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1876. num = (data & BROADCAST_STORM_RATE_HI);
  1877. num <<= 8;
  1878. num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
  1879. num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
  1880. *percent = (u8) num;
  1881. }
  1882. /**
  1883. * sw_dis_broad_storm - disable broadstorm
  1884. * @hw: The hardware instance.
  1885. * @port: The port index.
  1886. *
  1887. * This routine disables the broadcast storm limit function of the switch.
  1888. */
  1889. static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
  1890. {
  1891. port_cfg_broad_storm(hw, port, 0);
  1892. }
  1893. /**
  1894. * sw_ena_broad_storm - enable broadcast storm
  1895. * @hw: The hardware instance.
  1896. * @port: The port index.
  1897. *
  1898. * This routine enables the broadcast storm limit function of the switch.
  1899. */
  1900. static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
  1901. {
  1902. sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
  1903. port_cfg_broad_storm(hw, port, 1);
  1904. }
  1905. /**
  1906. * sw_init_broad_storm - initialize broadcast storm
  1907. * @hw: The hardware instance.
  1908. *
  1909. * This routine initializes the broadcast storm limit function of the switch.
  1910. */
  1911. static void sw_init_broad_storm(struct ksz_hw *hw)
  1912. {
  1913. int port;
  1914. hw->ksz_switch->broad_per = 1;
  1915. sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
  1916. for (port = 0; port < TOTAL_PORT_NUM; port++)
  1917. sw_dis_broad_storm(hw, port);
  1918. sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
  1919. }
  1920. /**
  1921. * hw_cfg_broad_storm - configure broadcast storm
  1922. * @hw: The hardware instance.
  1923. * @percent: Broadcast storm threshold in percent of transmit rate.
  1924. *
  1925. * This routine configures the broadcast storm threshold of the switch.
  1926. * It is called by user functions. The hardware should be acquired first.
  1927. */
  1928. static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
  1929. {
  1930. if (percent > 100)
  1931. percent = 100;
  1932. sw_cfg_broad_storm(hw, percent);
  1933. sw_get_broad_storm(hw, &percent);
  1934. hw->ksz_switch->broad_per = percent;
  1935. }
  1936. /**
  1937. * sw_dis_prio_rate - disable switch priority rate
  1938. * @hw: The hardware instance.
  1939. * @port: The port index.
  1940. *
  1941. * This routine disables the priority rate function of the switch.
  1942. */
  1943. static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
  1944. {
  1945. u32 addr;
  1946. PORT_CTRL_ADDR(port, addr);
  1947. addr += KS8842_PORT_IN_RATE_OFFSET;
  1948. writel(0, hw->io + addr);
  1949. }
  1950. /**
  1951. * sw_init_prio_rate - initialize switch prioirty rate
  1952. * @hw: The hardware instance.
  1953. *
  1954. * This routine initializes the priority rate function of the switch.
  1955. */
  1956. static void sw_init_prio_rate(struct ksz_hw *hw)
  1957. {
  1958. int port;
  1959. int prio;
  1960. struct ksz_switch *sw = hw->ksz_switch;
  1961. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  1962. for (prio = 0; prio < PRIO_QUEUES; prio++) {
  1963. sw->port_cfg[port].rx_rate[prio] =
  1964. sw->port_cfg[port].tx_rate[prio] = 0;
  1965. }
  1966. sw_dis_prio_rate(hw, port);
  1967. }
  1968. }
  1969. /* Communication */
  1970. static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
  1971. {
  1972. port_cfg(hw, p,
  1973. KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
  1974. }
  1975. static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
  1976. {
  1977. port_cfg(hw, p,
  1978. KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
  1979. }
  1980. static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
  1981. {
  1982. return port_chk(hw, p,
  1983. KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
  1984. }
  1985. static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
  1986. {
  1987. return port_chk(hw, p,
  1988. KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
  1989. }
  1990. /* Spanning Tree */
  1991. static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
  1992. {
  1993. port_cfg(hw, p,
  1994. KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
  1995. }
  1996. static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
  1997. {
  1998. port_cfg(hw, p,
  1999. KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
  2000. }
  2001. static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
  2002. {
  2003. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
  2004. }
  2005. static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
  2006. {
  2007. if (!(hw->overrides & FAST_AGING)) {
  2008. sw_cfg_fast_aging(hw, 1);
  2009. mdelay(1);
  2010. sw_cfg_fast_aging(hw, 0);
  2011. }
  2012. }
  2013. /* VLAN */
  2014. static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
  2015. {
  2016. port_cfg(hw, p,
  2017. KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
  2018. }
  2019. static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
  2020. {
  2021. port_cfg(hw, p,
  2022. KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
  2023. }
  2024. static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
  2025. {
  2026. return port_chk(hw, p,
  2027. KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
  2028. }
  2029. static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
  2030. {
  2031. return port_chk(hw, p,
  2032. KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
  2033. }
  2034. static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
  2035. {
  2036. port_cfg(hw, p,
  2037. KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
  2038. }
  2039. static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
  2040. {
  2041. port_cfg(hw, p,
  2042. KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
  2043. }
  2044. static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
  2045. {
  2046. return port_chk(hw, p,
  2047. KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
  2048. }
  2049. static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
  2050. {
  2051. return port_chk(hw, p,
  2052. KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
  2053. }
  2054. /* Mirroring */
  2055. static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
  2056. {
  2057. port_cfg(hw, p,
  2058. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
  2059. }
  2060. static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
  2061. {
  2062. port_cfg(hw, p,
  2063. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
  2064. }
  2065. static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
  2066. {
  2067. port_cfg(hw, p,
  2068. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
  2069. }
  2070. static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
  2071. {
  2072. sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
  2073. }
  2074. static void sw_init_mirror(struct ksz_hw *hw)
  2075. {
  2076. int port;
  2077. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2078. port_cfg_mirror_sniffer(hw, port, 0);
  2079. port_cfg_mirror_rx(hw, port, 0);
  2080. port_cfg_mirror_tx(hw, port, 0);
  2081. }
  2082. sw_cfg_mirror_rx_tx(hw, 0);
  2083. }
  2084. static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
  2085. {
  2086. sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
  2087. SWITCH_UNK_DEF_PORT_ENABLE, set);
  2088. }
  2089. static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
  2090. {
  2091. return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
  2092. SWITCH_UNK_DEF_PORT_ENABLE);
  2093. }
  2094. static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
  2095. {
  2096. port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
  2097. }
  2098. static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
  2099. {
  2100. return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
  2101. }
  2102. /* Priority */
  2103. static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
  2104. {
  2105. port_cfg(hw, p,
  2106. KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
  2107. }
  2108. static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
  2109. {
  2110. port_cfg(hw, p,
  2111. KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
  2112. }
  2113. static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
  2114. {
  2115. port_cfg(hw, p,
  2116. KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
  2117. }
  2118. static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
  2119. {
  2120. port_cfg(hw, p,
  2121. KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
  2122. }
  2123. static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
  2124. {
  2125. return port_chk(hw, p,
  2126. KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
  2127. }
  2128. static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
  2129. {
  2130. return port_chk(hw, p,
  2131. KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
  2132. }
  2133. static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
  2134. {
  2135. return port_chk(hw, p,
  2136. KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
  2137. }
  2138. static inline int port_chk_prio(struct ksz_hw *hw, int p)
  2139. {
  2140. return port_chk(hw, p,
  2141. KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
  2142. }
  2143. /**
  2144. * sw_dis_diffserv - disable switch DiffServ priority
  2145. * @hw: The hardware instance.
  2146. * @port: The port index.
  2147. *
  2148. * This routine disables the DiffServ priority function of the switch.
  2149. */
  2150. static void sw_dis_diffserv(struct ksz_hw *hw, int port)
  2151. {
  2152. port_cfg_diffserv(hw, port, 0);
  2153. }
  2154. /**
  2155. * sw_dis_802_1p - disable switch 802.1p priority
  2156. * @hw: The hardware instance.
  2157. * @port: The port index.
  2158. *
  2159. * This routine disables the 802.1p priority function of the switch.
  2160. */
  2161. static void sw_dis_802_1p(struct ksz_hw *hw, int port)
  2162. {
  2163. port_cfg_802_1p(hw, port, 0);
  2164. }
  2165. /**
  2166. * sw_cfg_replace_null_vid -
  2167. * @hw: The hardware instance.
  2168. * @set: The flag to disable or enable.
  2169. *
  2170. */
  2171. static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
  2172. {
  2173. sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
  2174. }
  2175. /**
  2176. * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
  2177. * @hw: The hardware instance.
  2178. * @port: The port index.
  2179. * @set: The flag to disable or enable.
  2180. *
  2181. * This routine enables the 802.1p priority re-mapping function of the switch.
  2182. * That allows 802.1p priority field to be replaced with the port's default
  2183. * tag's priority value if the ingress packet's 802.1p priority has a higher
  2184. * priority than port's default tag's priority.
  2185. */
  2186. static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
  2187. {
  2188. port_cfg_replace_vid(hw, port, set);
  2189. }
  2190. /**
  2191. * sw_cfg_port_based - configure switch port based priority
  2192. * @hw: The hardware instance.
  2193. * @port: The port index.
  2194. * @prio: The priority to set.
  2195. *
  2196. * This routine configures the port based priority of the switch.
  2197. */
  2198. static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
  2199. {
  2200. u16 data;
  2201. if (prio > PORT_BASED_PRIORITY_BASE)
  2202. prio = PORT_BASED_PRIORITY_BASE;
  2203. hw->ksz_switch->port_cfg[port].port_prio = prio;
  2204. port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
  2205. data &= ~PORT_BASED_PRIORITY_MASK;
  2206. data |= prio << PORT_BASED_PRIORITY_SHIFT;
  2207. port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
  2208. }
  2209. /**
  2210. * sw_dis_multi_queue - disable transmit multiple queues
  2211. * @hw: The hardware instance.
  2212. * @port: The port index.
  2213. *
  2214. * This routine disables the transmit multiple queues selection of the switch
  2215. * port. Only single transmit queue on the port.
  2216. */
  2217. static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
  2218. {
  2219. port_cfg_prio(hw, port, 0);
  2220. }
  2221. /**
  2222. * sw_init_prio - initialize switch priority
  2223. * @hw: The hardware instance.
  2224. *
  2225. * This routine initializes the switch QoS priority functions.
  2226. */
  2227. static void sw_init_prio(struct ksz_hw *hw)
  2228. {
  2229. int port;
  2230. int tos;
  2231. struct ksz_switch *sw = hw->ksz_switch;
  2232. /*
  2233. * Init all the 802.1p tag priority value to be assigned to different
  2234. * priority queue.
  2235. */
  2236. sw->p_802_1p[0] = 0;
  2237. sw->p_802_1p[1] = 0;
  2238. sw->p_802_1p[2] = 1;
  2239. sw->p_802_1p[3] = 1;
  2240. sw->p_802_1p[4] = 2;
  2241. sw->p_802_1p[5] = 2;
  2242. sw->p_802_1p[6] = 3;
  2243. sw->p_802_1p[7] = 3;
  2244. /*
  2245. * Init all the DiffServ priority value to be assigned to priority
  2246. * queue 0.
  2247. */
  2248. for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
  2249. sw->diffserv[tos] = 0;
  2250. /* All QoS functions disabled. */
  2251. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2252. sw_dis_multi_queue(hw, port);
  2253. sw_dis_diffserv(hw, port);
  2254. sw_dis_802_1p(hw, port);
  2255. sw_cfg_replace_vid(hw, port, 0);
  2256. sw->port_cfg[port].port_prio = 0;
  2257. sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
  2258. }
  2259. sw_cfg_replace_null_vid(hw, 0);
  2260. }
  2261. /**
  2262. * port_get_def_vid - get port default VID.
  2263. * @hw: The hardware instance.
  2264. * @port: The port index.
  2265. * @vid: Buffer to store the VID.
  2266. *
  2267. * This routine retrieves the default VID of the port.
  2268. */
  2269. static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
  2270. {
  2271. u32 addr;
  2272. PORT_CTRL_ADDR(port, addr);
  2273. addr += KS8842_PORT_CTRL_VID_OFFSET;
  2274. *vid = readw(hw->io + addr);
  2275. }
  2276. /**
  2277. * sw_init_vlan - initialize switch VLAN
  2278. * @hw: The hardware instance.
  2279. *
  2280. * This routine initializes the VLAN function of the switch.
  2281. */
  2282. static void sw_init_vlan(struct ksz_hw *hw)
  2283. {
  2284. int port;
  2285. int entry;
  2286. struct ksz_switch *sw = hw->ksz_switch;
  2287. /* Read 16 VLAN entries from device's VLAN table. */
  2288. for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
  2289. sw_r_vlan_table(hw, entry,
  2290. &sw->vlan_table[entry].vid,
  2291. &sw->vlan_table[entry].fid,
  2292. &sw->vlan_table[entry].member);
  2293. }
  2294. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2295. port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
  2296. sw->port_cfg[port].member = PORT_MASK;
  2297. }
  2298. }
  2299. /**
  2300. * sw_cfg_port_base_vlan - configure port-based VLAN membership
  2301. * @hw: The hardware instance.
  2302. * @port: The port index.
  2303. * @member: The port-based VLAN membership.
  2304. *
  2305. * This routine configures the port-based VLAN membership of the port.
  2306. */
  2307. static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
  2308. {
  2309. u32 addr;
  2310. u8 data;
  2311. PORT_CTRL_ADDR(port, addr);
  2312. addr += KS8842_PORT_CTRL_2_OFFSET;
  2313. data = readb(hw->io + addr);
  2314. data &= ~PORT_VLAN_MEMBERSHIP;
  2315. data |= (member & PORT_MASK);
  2316. writeb(data, hw->io + addr);
  2317. hw->ksz_switch->port_cfg[port].member = member;
  2318. }
  2319. /**
  2320. * sw_get_addr - get the switch MAC address.
  2321. * @hw: The hardware instance.
  2322. * @mac_addr: Buffer to store the MAC address.
  2323. *
  2324. * This function retrieves the MAC address of the switch.
  2325. */
  2326. static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
  2327. {
  2328. int i;
  2329. for (i = 0; i < 6; i += 2) {
  2330. mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
  2331. mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
  2332. }
  2333. }
  2334. /**
  2335. * sw_set_addr - configure switch MAC address
  2336. * @hw: The hardware instance.
  2337. * @mac_addr: The MAC address.
  2338. *
  2339. * This function configures the MAC address of the switch.
  2340. */
  2341. static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
  2342. {
  2343. int i;
  2344. for (i = 0; i < 6; i += 2) {
  2345. writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
  2346. writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
  2347. }
  2348. }
  2349. /**
  2350. * sw_set_global_ctrl - set switch global control
  2351. * @hw: The hardware instance.
  2352. *
  2353. * This routine sets the global control of the switch function.
  2354. */
  2355. static void sw_set_global_ctrl(struct ksz_hw *hw)
  2356. {
  2357. u16 data;
  2358. /* Enable switch MII flow control. */
  2359. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  2360. data |= SWITCH_FLOW_CTRL;
  2361. writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  2362. data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
  2363. /* Enable aggressive back off algorithm in half duplex mode. */
  2364. data |= SWITCH_AGGR_BACKOFF;
  2365. /* Enable automatic fast aging when link changed detected. */
  2366. data |= SWITCH_AGING_ENABLE;
  2367. data |= SWITCH_LINK_AUTO_AGING;
  2368. if (hw->overrides & FAST_AGING)
  2369. data |= SWITCH_FAST_AGING;
  2370. else
  2371. data &= ~SWITCH_FAST_AGING;
  2372. writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
  2373. data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  2374. /* Enable no excessive collision drop. */
  2375. data |= NO_EXC_COLLISION_DROP;
  2376. writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  2377. }
  2378. enum {
  2379. STP_STATE_DISABLED = 0,
  2380. STP_STATE_LISTENING,
  2381. STP_STATE_LEARNING,
  2382. STP_STATE_FORWARDING,
  2383. STP_STATE_BLOCKED,
  2384. STP_STATE_SIMPLE
  2385. };
  2386. /**
  2387. * port_set_stp_state - configure port spanning tree state
  2388. * @hw: The hardware instance.
  2389. * @port: The port index.
  2390. * @state: The spanning tree state.
  2391. *
  2392. * This routine configures the spanning tree state of the port.
  2393. */
  2394. static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
  2395. {
  2396. u16 data;
  2397. port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
  2398. switch (state) {
  2399. case STP_STATE_DISABLED:
  2400. data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
  2401. data |= PORT_LEARN_DISABLE;
  2402. break;
  2403. case STP_STATE_LISTENING:
  2404. /*
  2405. * No need to turn on transmit because of port direct mode.
  2406. * Turning on receive is required if static MAC table is not setup.
  2407. */
  2408. data &= ~PORT_TX_ENABLE;
  2409. data |= PORT_RX_ENABLE;
  2410. data |= PORT_LEARN_DISABLE;
  2411. break;
  2412. case STP_STATE_LEARNING:
  2413. data &= ~PORT_TX_ENABLE;
  2414. data |= PORT_RX_ENABLE;
  2415. data &= ~PORT_LEARN_DISABLE;
  2416. break;
  2417. case STP_STATE_FORWARDING:
  2418. data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
  2419. data &= ~PORT_LEARN_DISABLE;
  2420. break;
  2421. case STP_STATE_BLOCKED:
  2422. /*
  2423. * Need to setup static MAC table with override to keep receiving BPDU
  2424. * messages. See sw_init_stp routine.
  2425. */
  2426. data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
  2427. data |= PORT_LEARN_DISABLE;
  2428. break;
  2429. case STP_STATE_SIMPLE:
  2430. data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
  2431. data |= PORT_LEARN_DISABLE;
  2432. break;
  2433. }
  2434. port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
  2435. hw->ksz_switch->port_cfg[port].stp_state = state;
  2436. }
  2437. #define STP_ENTRY 0
  2438. #define BROADCAST_ENTRY 1
  2439. #define BRIDGE_ADDR_ENTRY 2
  2440. #define IPV6_ADDR_ENTRY 3
  2441. /**
  2442. * sw_clr_sta_mac_table - clear static MAC table
  2443. * @hw: The hardware instance.
  2444. *
  2445. * This routine clears the static MAC table.
  2446. */
  2447. static void sw_clr_sta_mac_table(struct ksz_hw *hw)
  2448. {
  2449. struct ksz_mac_table *entry;
  2450. int i;
  2451. for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
  2452. entry = &hw->ksz_switch->mac_table[i];
  2453. sw_w_sta_mac_table(hw, i,
  2454. entry->mac_addr, entry->ports,
  2455. entry->override, 0,
  2456. entry->use_fid, entry->fid);
  2457. }
  2458. }
  2459. /**
  2460. * sw_init_stp - initialize switch spanning tree support
  2461. * @hw: The hardware instance.
  2462. *
  2463. * This routine initializes the spanning tree support of the switch.
  2464. */
  2465. static void sw_init_stp(struct ksz_hw *hw)
  2466. {
  2467. struct ksz_mac_table *entry;
  2468. entry = &hw->ksz_switch->mac_table[STP_ENTRY];
  2469. entry->mac_addr[0] = 0x01;
  2470. entry->mac_addr[1] = 0x80;
  2471. entry->mac_addr[2] = 0xC2;
  2472. entry->mac_addr[3] = 0x00;
  2473. entry->mac_addr[4] = 0x00;
  2474. entry->mac_addr[5] = 0x00;
  2475. entry->ports = HOST_MASK;
  2476. entry->override = 1;
  2477. entry->valid = 1;
  2478. sw_w_sta_mac_table(hw, STP_ENTRY,
  2479. entry->mac_addr, entry->ports,
  2480. entry->override, entry->valid,
  2481. entry->use_fid, entry->fid);
  2482. }
  2483. /**
  2484. * sw_block_addr - block certain packets from the host port
  2485. * @hw: The hardware instance.
  2486. *
  2487. * This routine blocks certain packets from reaching to the host port.
  2488. */
  2489. static void sw_block_addr(struct ksz_hw *hw)
  2490. {
  2491. struct ksz_mac_table *entry;
  2492. int i;
  2493. for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
  2494. entry = &hw->ksz_switch->mac_table[i];
  2495. entry->valid = 0;
  2496. sw_w_sta_mac_table(hw, i,
  2497. entry->mac_addr, entry->ports,
  2498. entry->override, entry->valid,
  2499. entry->use_fid, entry->fid);
  2500. }
  2501. }
  2502. #define PHY_LINK_SUPPORT \
  2503. (PHY_AUTO_NEG_ASYM_PAUSE | \
  2504. PHY_AUTO_NEG_SYM_PAUSE | \
  2505. PHY_AUTO_NEG_100BT4 | \
  2506. PHY_AUTO_NEG_100BTX_FD | \
  2507. PHY_AUTO_NEG_100BTX | \
  2508. PHY_AUTO_NEG_10BT_FD | \
  2509. PHY_AUTO_NEG_10BT)
  2510. static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
  2511. {
  2512. *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2513. }
  2514. static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
  2515. {
  2516. writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2517. }
  2518. static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
  2519. {
  2520. *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
  2521. }
  2522. static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
  2523. {
  2524. *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
  2525. }
  2526. static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
  2527. {
  2528. writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
  2529. }
  2530. static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
  2531. {
  2532. *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
  2533. }
  2534. static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
  2535. {
  2536. *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2537. }
  2538. static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
  2539. {
  2540. writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2541. }
  2542. static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
  2543. {
  2544. *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
  2545. }
  2546. static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
  2547. {
  2548. writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
  2549. }
  2550. static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
  2551. {
  2552. *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
  2553. }
  2554. static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
  2555. {
  2556. writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
  2557. }
  2558. /**
  2559. * hw_r_phy - read data from PHY register
  2560. * @hw: The hardware instance.
  2561. * @port: Port to read.
  2562. * @reg: PHY register to read.
  2563. * @val: Buffer to store the read data.
  2564. *
  2565. * This routine reads data from the PHY register.
  2566. */
  2567. static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
  2568. {
  2569. int phy;
  2570. phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
  2571. *val = readw(hw->io + phy);
  2572. }
  2573. /**
  2574. * port_w_phy - write data to PHY register
  2575. * @hw: The hardware instance.
  2576. * @port: Port to write.
  2577. * @reg: PHY register to write.
  2578. * @val: Word data to write.
  2579. *
  2580. * This routine writes data to the PHY register.
  2581. */
  2582. static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
  2583. {
  2584. int phy;
  2585. phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
  2586. writew(val, hw->io + phy);
  2587. }
  2588. /*
  2589. * EEPROM access functions
  2590. */
  2591. #define AT93C_CODE 0
  2592. #define AT93C_WR_OFF 0x00
  2593. #define AT93C_WR_ALL 0x10
  2594. #define AT93C_ER_ALL 0x20
  2595. #define AT93C_WR_ON 0x30
  2596. #define AT93C_WRITE 1
  2597. #define AT93C_READ 2
  2598. #define AT93C_ERASE 3
  2599. #define EEPROM_DELAY 4
  2600. static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
  2601. {
  2602. u16 data;
  2603. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2604. data &= ~gpio;
  2605. writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2606. }
  2607. static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
  2608. {
  2609. u16 data;
  2610. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2611. data |= gpio;
  2612. writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2613. }
  2614. static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
  2615. {
  2616. u16 data;
  2617. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2618. return (u8)(data & gpio);
  2619. }
  2620. static void eeprom_clk(struct ksz_hw *hw)
  2621. {
  2622. raise_gpio(hw, EEPROM_SERIAL_CLOCK);
  2623. udelay(EEPROM_DELAY);
  2624. drop_gpio(hw, EEPROM_SERIAL_CLOCK);
  2625. udelay(EEPROM_DELAY);
  2626. }
  2627. static u16 spi_r(struct ksz_hw *hw)
  2628. {
  2629. int i;
  2630. u16 temp = 0;
  2631. for (i = 15; i >= 0; i--) {
  2632. raise_gpio(hw, EEPROM_SERIAL_CLOCK);
  2633. udelay(EEPROM_DELAY);
  2634. temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
  2635. drop_gpio(hw, EEPROM_SERIAL_CLOCK);
  2636. udelay(EEPROM_DELAY);
  2637. }
  2638. return temp;
  2639. }
  2640. static void spi_w(struct ksz_hw *hw, u16 data)
  2641. {
  2642. int i;
  2643. for (i = 15; i >= 0; i--) {
  2644. (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2645. drop_gpio(hw, EEPROM_DATA_OUT);
  2646. eeprom_clk(hw);
  2647. }
  2648. }
  2649. static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
  2650. {
  2651. int i;
  2652. /* Initial start bit */
  2653. raise_gpio(hw, EEPROM_DATA_OUT);
  2654. eeprom_clk(hw);
  2655. /* AT93C operation */
  2656. for (i = 1; i >= 0; i--) {
  2657. (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2658. drop_gpio(hw, EEPROM_DATA_OUT);
  2659. eeprom_clk(hw);
  2660. }
  2661. /* Address location */
  2662. for (i = 5; i >= 0; i--) {
  2663. (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2664. drop_gpio(hw, EEPROM_DATA_OUT);
  2665. eeprom_clk(hw);
  2666. }
  2667. }
  2668. #define EEPROM_DATA_RESERVED 0
  2669. #define EEPROM_DATA_MAC_ADDR_0 1
  2670. #define EEPROM_DATA_MAC_ADDR_1 2
  2671. #define EEPROM_DATA_MAC_ADDR_2 3
  2672. #define EEPROM_DATA_SUBSYS_ID 4
  2673. #define EEPROM_DATA_SUBSYS_VEN_ID 5
  2674. #define EEPROM_DATA_PM_CAP 6
  2675. /* User defined EEPROM data */
  2676. #define EEPROM_DATA_OTHER_MAC_ADDR 9
  2677. /**
  2678. * eeprom_read - read from AT93C46 EEPROM
  2679. * @hw: The hardware instance.
  2680. * @reg: The register offset.
  2681. *
  2682. * This function reads a word from the AT93C46 EEPROM.
  2683. *
  2684. * Return the data value.
  2685. */
  2686. static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
  2687. {
  2688. u16 data;
  2689. raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2690. spi_reg(hw, AT93C_READ, reg);
  2691. data = spi_r(hw);
  2692. drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2693. return data;
  2694. }
  2695. /**
  2696. * eeprom_write - write to AT93C46 EEPROM
  2697. * @hw: The hardware instance.
  2698. * @reg: The register offset.
  2699. * @data: The data value.
  2700. *
  2701. * This procedure writes a word to the AT93C46 EEPROM.
  2702. */
  2703. static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
  2704. {
  2705. int timeout;
  2706. raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2707. /* Enable write. */
  2708. spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
  2709. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2710. udelay(1);
  2711. /* Erase the register. */
  2712. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2713. spi_reg(hw, AT93C_ERASE, reg);
  2714. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2715. udelay(1);
  2716. /* Check operation complete. */
  2717. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2718. timeout = 8;
  2719. mdelay(2);
  2720. do {
  2721. mdelay(1);
  2722. } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
  2723. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2724. udelay(1);
  2725. /* Write the register. */
  2726. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2727. spi_reg(hw, AT93C_WRITE, reg);
  2728. spi_w(hw, data);
  2729. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2730. udelay(1);
  2731. /* Check operation complete. */
  2732. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2733. timeout = 8;
  2734. mdelay(2);
  2735. do {
  2736. mdelay(1);
  2737. } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
  2738. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2739. udelay(1);
  2740. /* Disable write. */
  2741. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2742. spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
  2743. drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2744. }
  2745. /*
  2746. * Link detection routines
  2747. */
  2748. static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
  2749. {
  2750. ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
  2751. switch (port->flow_ctrl) {
  2752. case PHY_FLOW_CTRL:
  2753. ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
  2754. break;
  2755. /* Not supported. */
  2756. case PHY_TX_ONLY:
  2757. case PHY_RX_ONLY:
  2758. default:
  2759. break;
  2760. }
  2761. return ctrl;
  2762. }
  2763. static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
  2764. {
  2765. u32 rx_cfg;
  2766. u32 tx_cfg;
  2767. rx_cfg = hw->rx_cfg;
  2768. tx_cfg = hw->tx_cfg;
  2769. if (rx)
  2770. hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
  2771. else
  2772. hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
  2773. if (tx)
  2774. hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
  2775. else
  2776. hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
  2777. if (hw->enabled) {
  2778. if (rx_cfg != hw->rx_cfg)
  2779. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  2780. if (tx_cfg != hw->tx_cfg)
  2781. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  2782. }
  2783. }
  2784. static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
  2785. u16 local, u16 remote)
  2786. {
  2787. int rx;
  2788. int tx;
  2789. if (hw->overrides & PAUSE_FLOW_CTRL)
  2790. return;
  2791. rx = tx = 0;
  2792. if (port->force_link)
  2793. rx = tx = 1;
  2794. if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
  2795. if (local & PHY_AUTO_NEG_SYM_PAUSE) {
  2796. rx = tx = 1;
  2797. } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
  2798. (local & PHY_AUTO_NEG_PAUSE) ==
  2799. PHY_AUTO_NEG_ASYM_PAUSE) {
  2800. tx = 1;
  2801. }
  2802. } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
  2803. if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
  2804. rx = 1;
  2805. }
  2806. if (!hw->ksz_switch)
  2807. set_flow_ctrl(hw, rx, tx);
  2808. }
  2809. static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
  2810. struct ksz_port_info *info, u16 link_status)
  2811. {
  2812. if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
  2813. !(hw->overrides & PAUSE_FLOW_CTRL)) {
  2814. u32 cfg = hw->tx_cfg;
  2815. /* Disable flow control in the half duplex mode. */
  2816. if (1 == info->duplex)
  2817. hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
  2818. if (hw->enabled && cfg != hw->tx_cfg)
  2819. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  2820. }
  2821. }
  2822. /**
  2823. * port_get_link_speed - get current link status
  2824. * @port: The port instance.
  2825. *
  2826. * This routine reads PHY registers to determine the current link status of the
  2827. * switch ports.
  2828. */
  2829. static void port_get_link_speed(struct ksz_port *port)
  2830. {
  2831. uint interrupt;
  2832. struct ksz_port_info *info;
  2833. struct ksz_port_info *linked = NULL;
  2834. struct ksz_hw *hw = port->hw;
  2835. u16 data;
  2836. u16 status;
  2837. u8 local;
  2838. u8 remote;
  2839. int i;
  2840. int p;
  2841. int change = 0;
  2842. interrupt = hw_block_intr(hw);
  2843. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2844. info = &hw->port_info[p];
  2845. port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
  2846. port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
  2847. /*
  2848. * Link status is changing all the time even when there is no
  2849. * cable connection!
  2850. */
  2851. remote = status & (PORT_AUTO_NEG_COMPLETE |
  2852. PORT_STATUS_LINK_GOOD);
  2853. local = (u8) data;
  2854. /* No change to status. */
  2855. if (local == info->advertised && remote == info->partner)
  2856. continue;
  2857. info->advertised = local;
  2858. info->partner = remote;
  2859. if (status & PORT_STATUS_LINK_GOOD) {
  2860. /* Remember the first linked port. */
  2861. if (!linked)
  2862. linked = info;
  2863. info->tx_rate = 10 * TX_RATE_UNIT;
  2864. if (status & PORT_STATUS_SPEED_100MBIT)
  2865. info->tx_rate = 100 * TX_RATE_UNIT;
  2866. info->duplex = 1;
  2867. if (status & PORT_STATUS_FULL_DUPLEX)
  2868. info->duplex = 2;
  2869. if (media_connected != info->state) {
  2870. hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
  2871. &data);
  2872. hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
  2873. &status);
  2874. determine_flow_ctrl(hw, port, data, status);
  2875. if (hw->ksz_switch) {
  2876. port_cfg_back_pressure(hw, p,
  2877. (1 == info->duplex));
  2878. }
  2879. change |= 1 << i;
  2880. port_cfg_change(hw, port, info, status);
  2881. }
  2882. info->state = media_connected;
  2883. } else {
  2884. if (media_disconnected != info->state) {
  2885. change |= 1 << i;
  2886. /* Indicate the link just goes down. */
  2887. hw->port_mib[p].link_down = 1;
  2888. }
  2889. info->state = media_disconnected;
  2890. }
  2891. hw->port_mib[p].state = (u8) info->state;
  2892. }
  2893. if (linked && media_disconnected == port->linked->state)
  2894. port->linked = linked;
  2895. hw_restore_intr(hw, interrupt);
  2896. }
  2897. #define PHY_RESET_TIMEOUT 10
  2898. /**
  2899. * port_set_link_speed - set port speed
  2900. * @port: The port instance.
  2901. *
  2902. * This routine sets the link speed of the switch ports.
  2903. */
  2904. static void port_set_link_speed(struct ksz_port *port)
  2905. {
  2906. struct ksz_hw *hw = port->hw;
  2907. u16 data;
  2908. u16 cfg;
  2909. u8 status;
  2910. int i;
  2911. int p;
  2912. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2913. port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
  2914. port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
  2915. cfg = 0;
  2916. if (status & PORT_STATUS_LINK_GOOD)
  2917. cfg = data;
  2918. data |= PORT_AUTO_NEG_ENABLE;
  2919. data = advertised_flow_ctrl(port, data);
  2920. data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
  2921. PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
  2922. /* Check if manual configuration is specified by the user. */
  2923. if (port->speed || port->duplex) {
  2924. if (10 == port->speed)
  2925. data &= ~(PORT_AUTO_NEG_100BTX_FD |
  2926. PORT_AUTO_NEG_100BTX);
  2927. else if (100 == port->speed)
  2928. data &= ~(PORT_AUTO_NEG_10BT_FD |
  2929. PORT_AUTO_NEG_10BT);
  2930. if (1 == port->duplex)
  2931. data &= ~(PORT_AUTO_NEG_100BTX_FD |
  2932. PORT_AUTO_NEG_10BT_FD);
  2933. else if (2 == port->duplex)
  2934. data &= ~(PORT_AUTO_NEG_100BTX |
  2935. PORT_AUTO_NEG_10BT);
  2936. }
  2937. if (data != cfg) {
  2938. data |= PORT_AUTO_NEG_RESTART;
  2939. port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
  2940. }
  2941. }
  2942. }
  2943. /**
  2944. * port_force_link_speed - force port speed
  2945. * @port: The port instance.
  2946. *
  2947. * This routine forces the link speed of the switch ports.
  2948. */
  2949. static void port_force_link_speed(struct ksz_port *port)
  2950. {
  2951. struct ksz_hw *hw = port->hw;
  2952. u16 data;
  2953. int i;
  2954. int phy;
  2955. int p;
  2956. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2957. phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
  2958. hw_r_phy_ctrl(hw, phy, &data);
  2959. data &= ~PHY_AUTO_NEG_ENABLE;
  2960. if (10 == port->speed)
  2961. data &= ~PHY_SPEED_100MBIT;
  2962. else if (100 == port->speed)
  2963. data |= PHY_SPEED_100MBIT;
  2964. if (1 == port->duplex)
  2965. data &= ~PHY_FULL_DUPLEX;
  2966. else if (2 == port->duplex)
  2967. data |= PHY_FULL_DUPLEX;
  2968. hw_w_phy_ctrl(hw, phy, data);
  2969. }
  2970. }
  2971. static void port_set_power_saving(struct ksz_port *port, int enable)
  2972. {
  2973. struct ksz_hw *hw = port->hw;
  2974. int i;
  2975. int p;
  2976. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
  2977. port_cfg(hw, p,
  2978. KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
  2979. }
  2980. /*
  2981. * KSZ8841 power management functions
  2982. */
  2983. /**
  2984. * hw_chk_wol_pme_status - check PMEN pin
  2985. * @hw: The hardware instance.
  2986. *
  2987. * This function is used to check PMEN pin is asserted.
  2988. *
  2989. * Return 1 if PMEN pin is asserted; otherwise, 0.
  2990. */
  2991. static int hw_chk_wol_pme_status(struct ksz_hw *hw)
  2992. {
  2993. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  2994. struct pci_dev *pdev = hw_priv->pdev;
  2995. u16 data;
  2996. if (!pdev->pm_cap)
  2997. return 0;
  2998. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  2999. return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
  3000. }
  3001. /**
  3002. * hw_clr_wol_pme_status - clear PMEN pin
  3003. * @hw: The hardware instance.
  3004. *
  3005. * This routine is used to clear PME_Status to deassert PMEN pin.
  3006. */
  3007. static void hw_clr_wol_pme_status(struct ksz_hw *hw)
  3008. {
  3009. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  3010. struct pci_dev *pdev = hw_priv->pdev;
  3011. u16 data;
  3012. if (!pdev->pm_cap)
  3013. return;
  3014. /* Clear PME_Status to deassert PMEN pin. */
  3015. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  3016. data |= PCI_PM_CTRL_PME_STATUS;
  3017. pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
  3018. }
  3019. /**
  3020. * hw_cfg_wol_pme - enable or disable Wake-on-LAN
  3021. * @hw: The hardware instance.
  3022. * @set: The flag indicating whether to enable or disable.
  3023. *
  3024. * This routine is used to enable or disable Wake-on-LAN.
  3025. */
  3026. static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
  3027. {
  3028. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  3029. struct pci_dev *pdev = hw_priv->pdev;
  3030. u16 data;
  3031. if (!pdev->pm_cap)
  3032. return;
  3033. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  3034. data &= ~PCI_PM_CTRL_STATE_MASK;
  3035. if (set)
  3036. data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
  3037. else
  3038. data &= ~PCI_PM_CTRL_PME_ENABLE;
  3039. pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
  3040. }
  3041. /**
  3042. * hw_cfg_wol - configure Wake-on-LAN features
  3043. * @hw: The hardware instance.
  3044. * @frame: The pattern frame bit.
  3045. * @set: The flag indicating whether to enable or disable.
  3046. *
  3047. * This routine is used to enable or disable certain Wake-on-LAN features.
  3048. */
  3049. static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
  3050. {
  3051. u16 data;
  3052. data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
  3053. if (set)
  3054. data |= frame;
  3055. else
  3056. data &= ~frame;
  3057. writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
  3058. }
  3059. /**
  3060. * hw_set_wol_frame - program Wake-on-LAN pattern
  3061. * @hw: The hardware instance.
  3062. * @i: The frame index.
  3063. * @mask_size: The size of the mask.
  3064. * @mask: Mask to ignore certain bytes in the pattern.
  3065. * @frame_size: The size of the frame.
  3066. * @pattern: The frame data.
  3067. *
  3068. * This routine is used to program Wake-on-LAN pattern.
  3069. */
  3070. static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
  3071. const u8 *mask, uint frame_size, const u8 *pattern)
  3072. {
  3073. int bits;
  3074. int from;
  3075. int len;
  3076. int to;
  3077. u32 crc;
  3078. u8 data[64];
  3079. u8 val = 0;
  3080. if (frame_size > mask_size * 8)
  3081. frame_size = mask_size * 8;
  3082. if (frame_size > 64)
  3083. frame_size = 64;
  3084. i *= 0x10;
  3085. writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
  3086. writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
  3087. bits = len = from = to = 0;
  3088. do {
  3089. if (bits) {
  3090. if ((val & 1))
  3091. data[to++] = pattern[from];
  3092. val >>= 1;
  3093. ++from;
  3094. --bits;
  3095. } else {
  3096. val = mask[len];
  3097. writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
  3098. + len);
  3099. ++len;
  3100. if (val)
  3101. bits = 8;
  3102. else
  3103. from += 8;
  3104. }
  3105. } while (from < (int) frame_size);
  3106. if (val) {
  3107. bits = mask[len - 1];
  3108. val <<= (from % 8);
  3109. bits &= ~val;
  3110. writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
  3111. 1);
  3112. }
  3113. crc = ether_crc(to, data);
  3114. writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
  3115. }
  3116. /**
  3117. * hw_add_wol_arp - add ARP pattern
  3118. * @hw: The hardware instance.
  3119. * @ip_addr: The IPv4 address assigned to the device.
  3120. *
  3121. * This routine is used to add ARP pattern for waking up the host.
  3122. */
  3123. static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
  3124. {
  3125. static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
  3126. u8 pattern[42] = {
  3127. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  3128. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3129. 0x08, 0x06,
  3130. 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
  3131. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3132. 0x00, 0x00, 0x00, 0x00,
  3133. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3134. 0x00, 0x00, 0x00, 0x00 };
  3135. memcpy(&pattern[38], ip_addr, 4);
  3136. hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
  3137. }
  3138. /**
  3139. * hw_add_wol_bcast - add broadcast pattern
  3140. * @hw: The hardware instance.
  3141. *
  3142. * This routine is used to add broadcast pattern for waking up the host.
  3143. */
  3144. static void hw_add_wol_bcast(struct ksz_hw *hw)
  3145. {
  3146. static const u8 mask[] = { 0x3F };
  3147. static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
  3148. hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
  3149. }
  3150. /**
  3151. * hw_add_wol_mcast - add multicast pattern
  3152. * @hw: The hardware instance.
  3153. *
  3154. * This routine is used to add multicast pattern for waking up the host.
  3155. *
  3156. * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
  3157. * by IPv6 ping command. Note that multicast packets are filtred through the
  3158. * multicast hash table, so not all multicast packets can wake up the host.
  3159. */
  3160. static void hw_add_wol_mcast(struct ksz_hw *hw)
  3161. {
  3162. static const u8 mask[] = { 0x3F };
  3163. u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
  3164. memcpy(&pattern[3], &hw->override_addr[3], 3);
  3165. hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
  3166. }
  3167. /**
  3168. * hw_add_wol_ucast - add unicast pattern
  3169. * @hw: The hardware instance.
  3170. *
  3171. * This routine is used to add unicast pattern to wakeup the host.
  3172. *
  3173. * It is assumed the unicast packet is directed to the device, as the hardware
  3174. * can only receive them in normal case.
  3175. */
  3176. static void hw_add_wol_ucast(struct ksz_hw *hw)
  3177. {
  3178. static const u8 mask[] = { 0x3F };
  3179. hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
  3180. }
  3181. /**
  3182. * hw_enable_wol - enable Wake-on-LAN
  3183. * @hw: The hardware instance.
  3184. * @wol_enable: The Wake-on-LAN settings.
  3185. * @net_addr: The IPv4 address assigned to the device.
  3186. *
  3187. * This routine is used to enable Wake-on-LAN depending on driver settings.
  3188. */
  3189. static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
  3190. {
  3191. hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
  3192. hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
  3193. hw_add_wol_ucast(hw);
  3194. hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
  3195. hw_add_wol_mcast(hw);
  3196. hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
  3197. hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
  3198. hw_add_wol_arp(hw, net_addr);
  3199. }
  3200. /**
  3201. * hw_init - check driver is correct for the hardware
  3202. * @hw: The hardware instance.
  3203. *
  3204. * This function checks the hardware is correct for this driver and sets the
  3205. * hardware up for proper initialization.
  3206. *
  3207. * Return number of ports or 0 if not right.
  3208. */
  3209. static int hw_init(struct ksz_hw *hw)
  3210. {
  3211. int rc = 0;
  3212. u16 data;
  3213. u16 revision;
  3214. /* Set bus speed to 125MHz. */
  3215. writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
  3216. /* Check KSZ884x chip ID. */
  3217. data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
  3218. revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
  3219. data &= KS884X_CHIP_ID_MASK_41;
  3220. if (REG_CHIP_ID_41 == data)
  3221. rc = 1;
  3222. else if (REG_CHIP_ID_42 == data)
  3223. rc = 2;
  3224. else
  3225. return 0;
  3226. /* Setup hardware features or bug workarounds. */
  3227. if (revision <= 1) {
  3228. hw->features |= SMALL_PACKET_TX_BUG;
  3229. if (1 == rc)
  3230. hw->features |= HALF_DUPLEX_SIGNAL_BUG;
  3231. }
  3232. return rc;
  3233. }
  3234. /**
  3235. * hw_reset - reset the hardware
  3236. * @hw: The hardware instance.
  3237. *
  3238. * This routine resets the hardware.
  3239. */
  3240. static void hw_reset(struct ksz_hw *hw)
  3241. {
  3242. writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
  3243. /* Wait for device to reset. */
  3244. mdelay(10);
  3245. /* Write 0 to clear device reset. */
  3246. writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
  3247. }
  3248. /**
  3249. * hw_setup - setup the hardware
  3250. * @hw: The hardware instance.
  3251. *
  3252. * This routine setup the hardware for proper operation.
  3253. */
  3254. static void hw_setup(struct ksz_hw *hw)
  3255. {
  3256. #if SET_DEFAULT_LED
  3257. u16 data;
  3258. /* Change default LED mode. */
  3259. data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
  3260. data &= ~LED_MODE;
  3261. data |= SET_DEFAULT_LED;
  3262. writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
  3263. #endif
  3264. /* Setup transmit control. */
  3265. hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
  3266. (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
  3267. /* Setup receive control. */
  3268. hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
  3269. (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
  3270. hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
  3271. /* Hardware cannot handle UDP packet in IP fragments. */
  3272. hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
  3273. if (hw->all_multi)
  3274. hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
  3275. if (hw->promiscuous)
  3276. hw->rx_cfg |= DMA_RX_PROMISCUOUS;
  3277. }
  3278. /**
  3279. * hw_setup_intr - setup interrupt mask
  3280. * @hw: The hardware instance.
  3281. *
  3282. * This routine setup the interrupt mask for proper operation.
  3283. */
  3284. static void hw_setup_intr(struct ksz_hw *hw)
  3285. {
  3286. hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
  3287. }
  3288. static void ksz_check_desc_num(struct ksz_desc_info *info)
  3289. {
  3290. #define MIN_DESC_SHIFT 2
  3291. int alloc = info->alloc;
  3292. int shift;
  3293. shift = 0;
  3294. while (!(alloc & 1)) {
  3295. shift++;
  3296. alloc >>= 1;
  3297. }
  3298. if (alloc != 1 || shift < MIN_DESC_SHIFT) {
  3299. pr_alert("Hardware descriptor numbers not right!\n");
  3300. while (alloc) {
  3301. shift++;
  3302. alloc >>= 1;
  3303. }
  3304. if (shift < MIN_DESC_SHIFT)
  3305. shift = MIN_DESC_SHIFT;
  3306. alloc = 1 << shift;
  3307. info->alloc = alloc;
  3308. }
  3309. info->mask = info->alloc - 1;
  3310. }
  3311. static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
  3312. {
  3313. int i;
  3314. u32 phys = desc_info->ring_phys;
  3315. struct ksz_hw_desc *desc = desc_info->ring_virt;
  3316. struct ksz_desc *cur = desc_info->ring;
  3317. struct ksz_desc *previous = NULL;
  3318. for (i = 0; i < desc_info->alloc; i++) {
  3319. cur->phw = desc++;
  3320. phys += desc_info->size;
  3321. previous = cur++;
  3322. previous->phw->next = cpu_to_le32(phys);
  3323. }
  3324. previous->phw->next = cpu_to_le32(desc_info->ring_phys);
  3325. previous->sw.buf.rx.end_of_ring = 1;
  3326. previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
  3327. desc_info->avail = desc_info->alloc;
  3328. desc_info->last = desc_info->next = 0;
  3329. desc_info->cur = desc_info->ring;
  3330. }
  3331. /**
  3332. * hw_set_desc_base - set descriptor base addresses
  3333. * @hw: The hardware instance.
  3334. * @tx_addr: The transmit descriptor base.
  3335. * @rx_addr: The receive descriptor base.
  3336. *
  3337. * This routine programs the descriptor base addresses after reset.
  3338. */
  3339. static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
  3340. {
  3341. /* Set base address of Tx/Rx descriptors. */
  3342. writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
  3343. writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
  3344. }
  3345. static void hw_reset_pkts(struct ksz_desc_info *info)
  3346. {
  3347. info->cur = info->ring;
  3348. info->avail = info->alloc;
  3349. info->last = info->next = 0;
  3350. }
  3351. static inline void hw_resume_rx(struct ksz_hw *hw)
  3352. {
  3353. writel(DMA_START, hw->io + KS_DMA_RX_START);
  3354. }
  3355. /**
  3356. * hw_start_rx - start receiving
  3357. * @hw: The hardware instance.
  3358. *
  3359. * This routine starts the receive function of the hardware.
  3360. */
  3361. static void hw_start_rx(struct ksz_hw *hw)
  3362. {
  3363. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  3364. /* Notify when the receive stops. */
  3365. hw->intr_mask |= KS884X_INT_RX_STOPPED;
  3366. writel(DMA_START, hw->io + KS_DMA_RX_START);
  3367. hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
  3368. hw->rx_stop++;
  3369. /* Variable overflows. */
  3370. if (0 == hw->rx_stop)
  3371. hw->rx_stop = 2;
  3372. }
  3373. /**
  3374. * hw_stop_rx - stop receiving
  3375. * @hw: The hardware instance.
  3376. *
  3377. * This routine stops the receive function of the hardware.
  3378. */
  3379. static void hw_stop_rx(struct ksz_hw *hw)
  3380. {
  3381. hw->rx_stop = 0;
  3382. hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
  3383. writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
  3384. }
  3385. /**
  3386. * hw_start_tx - start transmitting
  3387. * @hw: The hardware instance.
  3388. *
  3389. * This routine starts the transmit function of the hardware.
  3390. */
  3391. static void hw_start_tx(struct ksz_hw *hw)
  3392. {
  3393. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  3394. }
  3395. /**
  3396. * hw_stop_tx - stop transmitting
  3397. * @hw: The hardware instance.
  3398. *
  3399. * This routine stops the transmit function of the hardware.
  3400. */
  3401. static void hw_stop_tx(struct ksz_hw *hw)
  3402. {
  3403. writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
  3404. }
  3405. /**
  3406. * hw_disable - disable hardware
  3407. * @hw: The hardware instance.
  3408. *
  3409. * This routine disables the hardware.
  3410. */
  3411. static void hw_disable(struct ksz_hw *hw)
  3412. {
  3413. hw_stop_rx(hw);
  3414. hw_stop_tx(hw);
  3415. hw->enabled = 0;
  3416. }
  3417. /**
  3418. * hw_enable - enable hardware
  3419. * @hw: The hardware instance.
  3420. *
  3421. * This routine enables the hardware.
  3422. */
  3423. static void hw_enable(struct ksz_hw *hw)
  3424. {
  3425. hw_start_tx(hw);
  3426. hw_start_rx(hw);
  3427. hw->enabled = 1;
  3428. }
  3429. /**
  3430. * hw_alloc_pkt - allocate enough descriptors for transmission
  3431. * @hw: The hardware instance.
  3432. * @length: The length of the packet.
  3433. * @physical: Number of descriptors required.
  3434. *
  3435. * This function allocates descriptors for transmission.
  3436. *
  3437. * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
  3438. */
  3439. static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
  3440. {
  3441. /* Always leave one descriptor free. */
  3442. if (hw->tx_desc_info.avail <= 1)
  3443. return 0;
  3444. /* Allocate a descriptor for transmission and mark it current. */
  3445. get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
  3446. hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
  3447. /* Keep track of number of transmit descriptors used so far. */
  3448. ++hw->tx_int_cnt;
  3449. hw->tx_size += length;
  3450. /* Cannot hold on too much data. */
  3451. if (hw->tx_size >= MAX_TX_HELD_SIZE)
  3452. hw->tx_int_cnt = hw->tx_int_mask + 1;
  3453. if (physical > hw->tx_desc_info.avail)
  3454. return 1;
  3455. return hw->tx_desc_info.avail;
  3456. }
  3457. /**
  3458. * hw_send_pkt - mark packet for transmission
  3459. * @hw: The hardware instance.
  3460. *
  3461. * This routine marks the packet for transmission in PCI version.
  3462. */
  3463. static void hw_send_pkt(struct ksz_hw *hw)
  3464. {
  3465. struct ksz_desc *cur = hw->tx_desc_info.cur;
  3466. cur->sw.buf.tx.last_seg = 1;
  3467. /* Interrupt only after specified number of descriptors used. */
  3468. if (hw->tx_int_cnt > hw->tx_int_mask) {
  3469. cur->sw.buf.tx.intr = 1;
  3470. hw->tx_int_cnt = 0;
  3471. hw->tx_size = 0;
  3472. }
  3473. /* KSZ8842 supports port directed transmission. */
  3474. cur->sw.buf.tx.dest_port = hw->dst_ports;
  3475. release_desc(cur);
  3476. writel(0, hw->io + KS_DMA_TX_START);
  3477. }
  3478. static int empty_addr(u8 *addr)
  3479. {
  3480. u32 *addr1 = (u32 *) addr;
  3481. u16 *addr2 = (u16 *) &addr[4];
  3482. return 0 == *addr1 && 0 == *addr2;
  3483. }
  3484. /**
  3485. * hw_set_addr - set MAC address
  3486. * @hw: The hardware instance.
  3487. *
  3488. * This routine programs the MAC address of the hardware when the address is
  3489. * overridden.
  3490. */
  3491. static void hw_set_addr(struct ksz_hw *hw)
  3492. {
  3493. int i;
  3494. for (i = 0; i < ETH_ALEN; i++)
  3495. writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
  3496. hw->io + KS884X_ADDR_0_OFFSET + i);
  3497. sw_set_addr(hw, hw->override_addr);
  3498. }
  3499. /**
  3500. * hw_read_addr - read MAC address
  3501. * @hw: The hardware instance.
  3502. *
  3503. * This routine retrieves the MAC address of the hardware.
  3504. */
  3505. static void hw_read_addr(struct ksz_hw *hw)
  3506. {
  3507. int i;
  3508. for (i = 0; i < ETH_ALEN; i++)
  3509. hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
  3510. KS884X_ADDR_0_OFFSET + i);
  3511. if (!hw->mac_override) {
  3512. memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
  3513. if (empty_addr(hw->override_addr)) {
  3514. memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
  3515. memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
  3516. ETH_ALEN);
  3517. hw->override_addr[5] += hw->id;
  3518. hw_set_addr(hw);
  3519. }
  3520. }
  3521. }
  3522. static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
  3523. {
  3524. int i;
  3525. u32 mac_addr_lo;
  3526. u32 mac_addr_hi;
  3527. mac_addr_hi = 0;
  3528. for (i = 0; i < 2; i++) {
  3529. mac_addr_hi <<= 8;
  3530. mac_addr_hi |= mac_addr[i];
  3531. }
  3532. mac_addr_hi |= ADD_ADDR_ENABLE;
  3533. mac_addr_lo = 0;
  3534. for (i = 2; i < 6; i++) {
  3535. mac_addr_lo <<= 8;
  3536. mac_addr_lo |= mac_addr[i];
  3537. }
  3538. index *= ADD_ADDR_INCR;
  3539. writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
  3540. writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
  3541. }
  3542. static void hw_set_add_addr(struct ksz_hw *hw)
  3543. {
  3544. int i;
  3545. for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
  3546. if (empty_addr(hw->address[i]))
  3547. writel(0, hw->io + ADD_ADDR_INCR * i +
  3548. KS_ADD_ADDR_0_HI);
  3549. else
  3550. hw_ena_add_addr(hw, i, hw->address[i]);
  3551. }
  3552. }
  3553. static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
  3554. {
  3555. int i;
  3556. int j = ADDITIONAL_ENTRIES;
  3557. if (ether_addr_equal(hw->override_addr, mac_addr))
  3558. return 0;
  3559. for (i = 0; i < hw->addr_list_size; i++) {
  3560. if (ether_addr_equal(hw->address[i], mac_addr))
  3561. return 0;
  3562. if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
  3563. j = i;
  3564. }
  3565. if (j < ADDITIONAL_ENTRIES) {
  3566. memcpy(hw->address[j], mac_addr, ETH_ALEN);
  3567. hw_ena_add_addr(hw, j, hw->address[j]);
  3568. return 0;
  3569. }
  3570. return -1;
  3571. }
  3572. static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
  3573. {
  3574. int i;
  3575. for (i = 0; i < hw->addr_list_size; i++) {
  3576. if (ether_addr_equal(hw->address[i], mac_addr)) {
  3577. eth_zero_addr(hw->address[i]);
  3578. writel(0, hw->io + ADD_ADDR_INCR * i +
  3579. KS_ADD_ADDR_0_HI);
  3580. return 0;
  3581. }
  3582. }
  3583. return -1;
  3584. }
  3585. /**
  3586. * hw_clr_multicast - clear multicast addresses
  3587. * @hw: The hardware instance.
  3588. *
  3589. * This routine removes all multicast addresses set in the hardware.
  3590. */
  3591. static void hw_clr_multicast(struct ksz_hw *hw)
  3592. {
  3593. int i;
  3594. for (i = 0; i < HW_MULTICAST_SIZE; i++) {
  3595. hw->multi_bits[i] = 0;
  3596. writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
  3597. }
  3598. }
  3599. /**
  3600. * hw_set_grp_addr - set multicast addresses
  3601. * @hw: The hardware instance.
  3602. *
  3603. * This routine programs multicast addresses for the hardware to accept those
  3604. * addresses.
  3605. */
  3606. static void hw_set_grp_addr(struct ksz_hw *hw)
  3607. {
  3608. int i;
  3609. int index;
  3610. int position;
  3611. int value;
  3612. memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
  3613. for (i = 0; i < hw->multi_list_size; i++) {
  3614. position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
  3615. index = position >> 3;
  3616. value = 1 << (position & 7);
  3617. hw->multi_bits[index] |= (u8) value;
  3618. }
  3619. for (i = 0; i < HW_MULTICAST_SIZE; i++)
  3620. writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
  3621. i);
  3622. }
  3623. /**
  3624. * hw_set_multicast - enable or disable all multicast receiving
  3625. * @hw: The hardware instance.
  3626. * @multicast: To turn on or off the all multicast feature.
  3627. *
  3628. * This routine enables/disables the hardware to accept all multicast packets.
  3629. */
  3630. static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
  3631. {
  3632. /* Stop receiving for reconfiguration. */
  3633. hw_stop_rx(hw);
  3634. if (multicast)
  3635. hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
  3636. else
  3637. hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
  3638. if (hw->enabled)
  3639. hw_start_rx(hw);
  3640. }
  3641. /**
  3642. * hw_set_promiscuous - enable or disable promiscuous receiving
  3643. * @hw: The hardware instance.
  3644. * @prom: To turn on or off the promiscuous feature.
  3645. *
  3646. * This routine enables/disables the hardware to accept all packets.
  3647. */
  3648. static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
  3649. {
  3650. /* Stop receiving for reconfiguration. */
  3651. hw_stop_rx(hw);
  3652. if (prom)
  3653. hw->rx_cfg |= DMA_RX_PROMISCUOUS;
  3654. else
  3655. hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
  3656. if (hw->enabled)
  3657. hw_start_rx(hw);
  3658. }
  3659. /**
  3660. * sw_enable - enable the switch
  3661. * @hw: The hardware instance.
  3662. * @enable: The flag to enable or disable the switch
  3663. *
  3664. * This routine is used to enable/disable the switch in KSZ8842.
  3665. */
  3666. static void sw_enable(struct ksz_hw *hw, int enable)
  3667. {
  3668. int port;
  3669. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  3670. if (hw->dev_count > 1) {
  3671. /* Set port-base vlan membership with host port. */
  3672. sw_cfg_port_base_vlan(hw, port,
  3673. HOST_MASK | (1 << port));
  3674. port_set_stp_state(hw, port, STP_STATE_DISABLED);
  3675. } else {
  3676. sw_cfg_port_base_vlan(hw, port, PORT_MASK);
  3677. port_set_stp_state(hw, port, STP_STATE_FORWARDING);
  3678. }
  3679. }
  3680. if (hw->dev_count > 1)
  3681. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
  3682. else
  3683. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
  3684. if (enable)
  3685. enable = KS8842_START;
  3686. writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
  3687. }
  3688. /**
  3689. * sw_setup - setup the switch
  3690. * @hw: The hardware instance.
  3691. *
  3692. * This routine setup the hardware switch engine for default operation.
  3693. */
  3694. static void sw_setup(struct ksz_hw *hw)
  3695. {
  3696. int port;
  3697. sw_set_global_ctrl(hw);
  3698. /* Enable switch broadcast storm protection at 10% percent rate. */
  3699. sw_init_broad_storm(hw);
  3700. hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
  3701. for (port = 0; port < SWITCH_PORT_NUM; port++)
  3702. sw_ena_broad_storm(hw, port);
  3703. sw_init_prio(hw);
  3704. sw_init_mirror(hw);
  3705. sw_init_prio_rate(hw);
  3706. sw_init_vlan(hw);
  3707. if (hw->features & STP_SUPPORT)
  3708. sw_init_stp(hw);
  3709. if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  3710. SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
  3711. hw->overrides |= PAUSE_FLOW_CTRL;
  3712. sw_enable(hw, 1);
  3713. }
  3714. /**
  3715. * ksz_start_timer - start kernel timer
  3716. * @info: Kernel timer information.
  3717. * @time: The time tick.
  3718. *
  3719. * This routine starts the kernel timer after the specified time tick.
  3720. */
  3721. static void ksz_start_timer(struct ksz_timer_info *info, int time)
  3722. {
  3723. info->cnt = 0;
  3724. info->timer.expires = jiffies + time;
  3725. add_timer(&info->timer);
  3726. /* infinity */
  3727. info->max = -1;
  3728. }
  3729. /**
  3730. * ksz_stop_timer - stop kernel timer
  3731. * @info: Kernel timer information.
  3732. *
  3733. * This routine stops the kernel timer.
  3734. */
  3735. static void ksz_stop_timer(struct ksz_timer_info *info)
  3736. {
  3737. if (info->max) {
  3738. info->max = 0;
  3739. del_timer_sync(&info->timer);
  3740. }
  3741. }
  3742. static void ksz_init_timer(struct ksz_timer_info *info, int period,
  3743. void (*function)(struct timer_list *))
  3744. {
  3745. info->max = 0;
  3746. info->period = period;
  3747. timer_setup(&info->timer, function, 0);
  3748. }
  3749. static void ksz_update_timer(struct ksz_timer_info *info)
  3750. {
  3751. ++info->cnt;
  3752. if (info->max > 0) {
  3753. if (info->cnt < info->max) {
  3754. info->timer.expires = jiffies + info->period;
  3755. add_timer(&info->timer);
  3756. } else
  3757. info->max = 0;
  3758. } else if (info->max < 0) {
  3759. info->timer.expires = jiffies + info->period;
  3760. add_timer(&info->timer);
  3761. }
  3762. }
  3763. /**
  3764. * ksz_alloc_soft_desc - allocate software descriptors
  3765. * @desc_info: Descriptor information structure.
  3766. * @transmit: Indication that descriptors are for transmit.
  3767. *
  3768. * This local function allocates software descriptors for manipulation in
  3769. * memory.
  3770. *
  3771. * Return 0 if successful.
  3772. */
  3773. static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
  3774. {
  3775. desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc),
  3776. GFP_KERNEL);
  3777. if (!desc_info->ring)
  3778. return 1;
  3779. hw_init_desc(desc_info, transmit);
  3780. return 0;
  3781. }
  3782. /**
  3783. * ksz_alloc_desc - allocate hardware descriptors
  3784. * @adapter: Adapter information structure.
  3785. *
  3786. * This local function allocates hardware descriptors for receiving and
  3787. * transmitting.
  3788. *
  3789. * Return 0 if successful.
  3790. */
  3791. static int ksz_alloc_desc(struct dev_info *adapter)
  3792. {
  3793. struct ksz_hw *hw = &adapter->hw;
  3794. int offset;
  3795. /* Allocate memory for RX & TX descriptors. */
  3796. adapter->desc_pool.alloc_size =
  3797. hw->rx_desc_info.size * hw->rx_desc_info.alloc +
  3798. hw->tx_desc_info.size * hw->tx_desc_info.alloc +
  3799. DESC_ALIGNMENT;
  3800. adapter->desc_pool.alloc_virt =
  3801. pci_zalloc_consistent(adapter->pdev,
  3802. adapter->desc_pool.alloc_size,
  3803. &adapter->desc_pool.dma_addr);
  3804. if (adapter->desc_pool.alloc_virt == NULL) {
  3805. adapter->desc_pool.alloc_size = 0;
  3806. return 1;
  3807. }
  3808. /* Align to the next cache line boundary. */
  3809. offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
  3810. (DESC_ALIGNMENT -
  3811. ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
  3812. adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
  3813. adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
  3814. /* Allocate receive/transmit descriptors. */
  3815. hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
  3816. adapter->desc_pool.virt;
  3817. hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
  3818. offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
  3819. hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
  3820. (adapter->desc_pool.virt + offset);
  3821. hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
  3822. if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
  3823. return 1;
  3824. if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
  3825. return 1;
  3826. return 0;
  3827. }
  3828. /**
  3829. * free_dma_buf - release DMA buffer resources
  3830. * @adapter: Adapter information structure.
  3831. *
  3832. * This routine is just a helper function to release the DMA buffer resources.
  3833. */
  3834. static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
  3835. int direction)
  3836. {
  3837. pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
  3838. dev_kfree_skb(dma_buf->skb);
  3839. dma_buf->skb = NULL;
  3840. dma_buf->dma = 0;
  3841. }
  3842. /**
  3843. * ksz_init_rx_buffers - initialize receive descriptors
  3844. * @adapter: Adapter information structure.
  3845. *
  3846. * This routine initializes DMA buffers for receiving.
  3847. */
  3848. static void ksz_init_rx_buffers(struct dev_info *adapter)
  3849. {
  3850. int i;
  3851. struct ksz_desc *desc;
  3852. struct ksz_dma_buf *dma_buf;
  3853. struct ksz_hw *hw = &adapter->hw;
  3854. struct ksz_desc_info *info = &hw->rx_desc_info;
  3855. for (i = 0; i < hw->rx_desc_info.alloc; i++) {
  3856. get_rx_pkt(info, &desc);
  3857. dma_buf = DMA_BUFFER(desc);
  3858. if (dma_buf->skb && dma_buf->len != adapter->mtu)
  3859. free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
  3860. dma_buf->len = adapter->mtu;
  3861. if (!dma_buf->skb)
  3862. dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
  3863. if (dma_buf->skb && !dma_buf->dma)
  3864. dma_buf->dma = pci_map_single(
  3865. adapter->pdev,
  3866. skb_tail_pointer(dma_buf->skb),
  3867. dma_buf->len,
  3868. PCI_DMA_FROMDEVICE);
  3869. /* Set descriptor. */
  3870. set_rx_buf(desc, dma_buf->dma);
  3871. set_rx_len(desc, dma_buf->len);
  3872. release_desc(desc);
  3873. }
  3874. }
  3875. /**
  3876. * ksz_alloc_mem - allocate memory for hardware descriptors
  3877. * @adapter: Adapter information structure.
  3878. *
  3879. * This function allocates memory for use by hardware descriptors for receiving
  3880. * and transmitting.
  3881. *
  3882. * Return 0 if successful.
  3883. */
  3884. static int ksz_alloc_mem(struct dev_info *adapter)
  3885. {
  3886. struct ksz_hw *hw = &adapter->hw;
  3887. /* Determine the number of receive and transmit descriptors. */
  3888. hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
  3889. hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
  3890. /* Determine how many descriptors to skip transmit interrupt. */
  3891. hw->tx_int_cnt = 0;
  3892. hw->tx_int_mask = NUM_OF_TX_DESC / 4;
  3893. if (hw->tx_int_mask > 8)
  3894. hw->tx_int_mask = 8;
  3895. while (hw->tx_int_mask) {
  3896. hw->tx_int_cnt++;
  3897. hw->tx_int_mask >>= 1;
  3898. }
  3899. if (hw->tx_int_cnt) {
  3900. hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
  3901. hw->tx_int_cnt = 0;
  3902. }
  3903. /* Determine the descriptor size. */
  3904. hw->rx_desc_info.size =
  3905. (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
  3906. DESC_ALIGNMENT) * DESC_ALIGNMENT);
  3907. hw->tx_desc_info.size =
  3908. (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
  3909. DESC_ALIGNMENT) * DESC_ALIGNMENT);
  3910. if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
  3911. pr_alert("Hardware descriptor size not right!\n");
  3912. ksz_check_desc_num(&hw->rx_desc_info);
  3913. ksz_check_desc_num(&hw->tx_desc_info);
  3914. /* Allocate descriptors. */
  3915. if (ksz_alloc_desc(adapter))
  3916. return 1;
  3917. return 0;
  3918. }
  3919. /**
  3920. * ksz_free_desc - free software and hardware descriptors
  3921. * @adapter: Adapter information structure.
  3922. *
  3923. * This local routine frees the software and hardware descriptors allocated by
  3924. * ksz_alloc_desc().
  3925. */
  3926. static void ksz_free_desc(struct dev_info *adapter)
  3927. {
  3928. struct ksz_hw *hw = &adapter->hw;
  3929. /* Reset descriptor. */
  3930. hw->rx_desc_info.ring_virt = NULL;
  3931. hw->tx_desc_info.ring_virt = NULL;
  3932. hw->rx_desc_info.ring_phys = 0;
  3933. hw->tx_desc_info.ring_phys = 0;
  3934. /* Free memory. */
  3935. if (adapter->desc_pool.alloc_virt)
  3936. pci_free_consistent(
  3937. adapter->pdev,
  3938. adapter->desc_pool.alloc_size,
  3939. adapter->desc_pool.alloc_virt,
  3940. adapter->desc_pool.dma_addr);
  3941. /* Reset resource pool. */
  3942. adapter->desc_pool.alloc_size = 0;
  3943. adapter->desc_pool.alloc_virt = NULL;
  3944. kfree(hw->rx_desc_info.ring);
  3945. hw->rx_desc_info.ring = NULL;
  3946. kfree(hw->tx_desc_info.ring);
  3947. hw->tx_desc_info.ring = NULL;
  3948. }
  3949. /**
  3950. * ksz_free_buffers - free buffers used in the descriptors
  3951. * @adapter: Adapter information structure.
  3952. * @desc_info: Descriptor information structure.
  3953. *
  3954. * This local routine frees buffers used in the DMA buffers.
  3955. */
  3956. static void ksz_free_buffers(struct dev_info *adapter,
  3957. struct ksz_desc_info *desc_info, int direction)
  3958. {
  3959. int i;
  3960. struct ksz_dma_buf *dma_buf;
  3961. struct ksz_desc *desc = desc_info->ring;
  3962. for (i = 0; i < desc_info->alloc; i++) {
  3963. dma_buf = DMA_BUFFER(desc);
  3964. if (dma_buf->skb)
  3965. free_dma_buf(adapter, dma_buf, direction);
  3966. desc++;
  3967. }
  3968. }
  3969. /**
  3970. * ksz_free_mem - free all resources used by descriptors
  3971. * @adapter: Adapter information structure.
  3972. *
  3973. * This local routine frees all the resources allocated by ksz_alloc_mem().
  3974. */
  3975. static void ksz_free_mem(struct dev_info *adapter)
  3976. {
  3977. /* Free transmit buffers. */
  3978. ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
  3979. PCI_DMA_TODEVICE);
  3980. /* Free receive buffers. */
  3981. ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
  3982. PCI_DMA_FROMDEVICE);
  3983. /* Free descriptors. */
  3984. ksz_free_desc(adapter);
  3985. }
  3986. static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
  3987. u64 *counter)
  3988. {
  3989. int i;
  3990. int mib;
  3991. int port;
  3992. struct ksz_port_mib *port_mib;
  3993. memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
  3994. for (i = 0, port = first; i < cnt; i++, port++) {
  3995. port_mib = &hw->port_mib[port];
  3996. for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
  3997. counter[mib] += port_mib->counter[mib];
  3998. }
  3999. }
  4000. /**
  4001. * send_packet - send packet
  4002. * @skb: Socket buffer.
  4003. * @dev: Network device.
  4004. *
  4005. * This routine is used to send a packet out to the network.
  4006. */
  4007. static void send_packet(struct sk_buff *skb, struct net_device *dev)
  4008. {
  4009. struct ksz_desc *desc;
  4010. struct ksz_desc *first;
  4011. struct dev_priv *priv = netdev_priv(dev);
  4012. struct dev_info *hw_priv = priv->adapter;
  4013. struct ksz_hw *hw = &hw_priv->hw;
  4014. struct ksz_desc_info *info = &hw->tx_desc_info;
  4015. struct ksz_dma_buf *dma_buf;
  4016. int len;
  4017. int last_frag = skb_shinfo(skb)->nr_frags;
  4018. /*
  4019. * KSZ8842 with multiple device interfaces needs to be told which port
  4020. * to send.
  4021. */
  4022. if (hw->dev_count > 1)
  4023. hw->dst_ports = 1 << priv->port.first_port;
  4024. /* Hardware will pad the length to 60. */
  4025. len = skb->len;
  4026. /* Remember the very first descriptor. */
  4027. first = info->cur;
  4028. desc = first;
  4029. dma_buf = DMA_BUFFER(desc);
  4030. if (last_frag) {
  4031. int frag;
  4032. skb_frag_t *this_frag;
  4033. dma_buf->len = skb_headlen(skb);
  4034. dma_buf->dma = pci_map_single(
  4035. hw_priv->pdev, skb->data, dma_buf->len,
  4036. PCI_DMA_TODEVICE);
  4037. set_tx_buf(desc, dma_buf->dma);
  4038. set_tx_len(desc, dma_buf->len);
  4039. frag = 0;
  4040. do {
  4041. this_frag = &skb_shinfo(skb)->frags[frag];
  4042. /* Get a new descriptor. */
  4043. get_tx_pkt(info, &desc);
  4044. /* Keep track of descriptors used so far. */
  4045. ++hw->tx_int_cnt;
  4046. dma_buf = DMA_BUFFER(desc);
  4047. dma_buf->len = skb_frag_size(this_frag);
  4048. dma_buf->dma = pci_map_single(
  4049. hw_priv->pdev,
  4050. skb_frag_address(this_frag),
  4051. dma_buf->len,
  4052. PCI_DMA_TODEVICE);
  4053. set_tx_buf(desc, dma_buf->dma);
  4054. set_tx_len(desc, dma_buf->len);
  4055. frag++;
  4056. if (frag == last_frag)
  4057. break;
  4058. /* Do not release the last descriptor here. */
  4059. release_desc(desc);
  4060. } while (1);
  4061. /* current points to the last descriptor. */
  4062. info->cur = desc;
  4063. /* Release the first descriptor. */
  4064. release_desc(first);
  4065. } else {
  4066. dma_buf->len = len;
  4067. dma_buf->dma = pci_map_single(
  4068. hw_priv->pdev, skb->data, dma_buf->len,
  4069. PCI_DMA_TODEVICE);
  4070. set_tx_buf(desc, dma_buf->dma);
  4071. set_tx_len(desc, dma_buf->len);
  4072. }
  4073. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  4074. (desc)->sw.buf.tx.csum_gen_tcp = 1;
  4075. (desc)->sw.buf.tx.csum_gen_udp = 1;
  4076. }
  4077. /*
  4078. * The last descriptor holds the packet so that it can be returned to
  4079. * network subsystem after all descriptors are transmitted.
  4080. */
  4081. dma_buf->skb = skb;
  4082. hw_send_pkt(hw);
  4083. /* Update transmit statistics. */
  4084. dev->stats.tx_packets++;
  4085. dev->stats.tx_bytes += len;
  4086. }
  4087. /**
  4088. * transmit_cleanup - clean up transmit descriptors
  4089. * @dev: Network device.
  4090. *
  4091. * This routine is called to clean up the transmitted buffers.
  4092. */
  4093. static void transmit_cleanup(struct dev_info *hw_priv, int normal)
  4094. {
  4095. int last;
  4096. union desc_stat status;
  4097. struct ksz_hw *hw = &hw_priv->hw;
  4098. struct ksz_desc_info *info = &hw->tx_desc_info;
  4099. struct ksz_desc *desc;
  4100. struct ksz_dma_buf *dma_buf;
  4101. struct net_device *dev = NULL;
  4102. spin_lock_irq(&hw_priv->hwlock);
  4103. last = info->last;
  4104. while (info->avail < info->alloc) {
  4105. /* Get next descriptor which is not hardware owned. */
  4106. desc = &info->ring[last];
  4107. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4108. if (status.tx.hw_owned) {
  4109. if (normal)
  4110. break;
  4111. else
  4112. reset_desc(desc, status);
  4113. }
  4114. dma_buf = DMA_BUFFER(desc);
  4115. pci_unmap_single(
  4116. hw_priv->pdev, dma_buf->dma, dma_buf->len,
  4117. PCI_DMA_TODEVICE);
  4118. /* This descriptor contains the last buffer in the packet. */
  4119. if (dma_buf->skb) {
  4120. dev = dma_buf->skb->dev;
  4121. /* Release the packet back to network subsystem. */
  4122. dev_kfree_skb_irq(dma_buf->skb);
  4123. dma_buf->skb = NULL;
  4124. }
  4125. /* Free the transmitted descriptor. */
  4126. last++;
  4127. last &= info->mask;
  4128. info->avail++;
  4129. }
  4130. info->last = last;
  4131. spin_unlock_irq(&hw_priv->hwlock);
  4132. /* Notify the network subsystem that the packet has been sent. */
  4133. if (dev)
  4134. netif_trans_update(dev);
  4135. }
  4136. /**
  4137. * transmit_done - transmit done processing
  4138. * @dev: Network device.
  4139. *
  4140. * This routine is called when the transmit interrupt is triggered, indicating
  4141. * either a packet is sent successfully or there are transmit errors.
  4142. */
  4143. static void tx_done(struct dev_info *hw_priv)
  4144. {
  4145. struct ksz_hw *hw = &hw_priv->hw;
  4146. int port;
  4147. transmit_cleanup(hw_priv, 1);
  4148. for (port = 0; port < hw->dev_count; port++) {
  4149. struct net_device *dev = hw->port_info[port].pdev;
  4150. if (netif_running(dev) && netif_queue_stopped(dev))
  4151. netif_wake_queue(dev);
  4152. }
  4153. }
  4154. static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
  4155. {
  4156. skb->dev = old->dev;
  4157. skb->protocol = old->protocol;
  4158. skb->ip_summed = old->ip_summed;
  4159. skb->csum = old->csum;
  4160. skb_set_network_header(skb, ETH_HLEN);
  4161. dev_consume_skb_any(old);
  4162. }
  4163. /**
  4164. * netdev_tx - send out packet
  4165. * @skb: Socket buffer.
  4166. * @dev: Network device.
  4167. *
  4168. * This function is used by the upper network layer to send out a packet.
  4169. *
  4170. * Return 0 if successful; otherwise an error code indicating failure.
  4171. */
  4172. static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
  4173. {
  4174. struct dev_priv *priv = netdev_priv(dev);
  4175. struct dev_info *hw_priv = priv->adapter;
  4176. struct ksz_hw *hw = &hw_priv->hw;
  4177. int left;
  4178. int num = 1;
  4179. int rc = 0;
  4180. if (hw->features & SMALL_PACKET_TX_BUG) {
  4181. struct sk_buff *org_skb = skb;
  4182. if (skb->len <= 48) {
  4183. if (skb_end_pointer(skb) - skb->data >= 50) {
  4184. memset(&skb->data[skb->len], 0, 50 - skb->len);
  4185. skb->len = 50;
  4186. } else {
  4187. skb = netdev_alloc_skb(dev, 50);
  4188. if (!skb)
  4189. return NETDEV_TX_BUSY;
  4190. memcpy(skb->data, org_skb->data, org_skb->len);
  4191. memset(&skb->data[org_skb->len], 0,
  4192. 50 - org_skb->len);
  4193. skb->len = 50;
  4194. copy_old_skb(org_skb, skb);
  4195. }
  4196. }
  4197. }
  4198. spin_lock_irq(&hw_priv->hwlock);
  4199. num = skb_shinfo(skb)->nr_frags + 1;
  4200. left = hw_alloc_pkt(hw, skb->len, num);
  4201. if (left) {
  4202. if (left < num ||
  4203. (CHECKSUM_PARTIAL == skb->ip_summed &&
  4204. skb->protocol == htons(ETH_P_IPV6))) {
  4205. struct sk_buff *org_skb = skb;
  4206. skb = netdev_alloc_skb(dev, org_skb->len);
  4207. if (!skb) {
  4208. rc = NETDEV_TX_BUSY;
  4209. goto unlock;
  4210. }
  4211. skb_copy_and_csum_dev(org_skb, skb->data);
  4212. org_skb->ip_summed = CHECKSUM_NONE;
  4213. skb->len = org_skb->len;
  4214. copy_old_skb(org_skb, skb);
  4215. }
  4216. send_packet(skb, dev);
  4217. if (left <= num)
  4218. netif_stop_queue(dev);
  4219. } else {
  4220. /* Stop the transmit queue until packet is allocated. */
  4221. netif_stop_queue(dev);
  4222. rc = NETDEV_TX_BUSY;
  4223. }
  4224. unlock:
  4225. spin_unlock_irq(&hw_priv->hwlock);
  4226. return rc;
  4227. }
  4228. /**
  4229. * netdev_tx_timeout - transmit timeout processing
  4230. * @dev: Network device.
  4231. *
  4232. * This routine is called when the transmit timer expires. That indicates the
  4233. * hardware is not running correctly because transmit interrupts are not
  4234. * triggered to free up resources so that the transmit routine can continue
  4235. * sending out packets. The hardware is reset to correct the problem.
  4236. */
  4237. static void netdev_tx_timeout(struct net_device *dev)
  4238. {
  4239. static unsigned long last_reset;
  4240. struct dev_priv *priv = netdev_priv(dev);
  4241. struct dev_info *hw_priv = priv->adapter;
  4242. struct ksz_hw *hw = &hw_priv->hw;
  4243. int port;
  4244. if (hw->dev_count > 1) {
  4245. /*
  4246. * Only reset the hardware if time between calls is long
  4247. * enough.
  4248. */
  4249. if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
  4250. hw_priv = NULL;
  4251. }
  4252. last_reset = jiffies;
  4253. if (hw_priv) {
  4254. hw_dis_intr(hw);
  4255. hw_disable(hw);
  4256. transmit_cleanup(hw_priv, 0);
  4257. hw_reset_pkts(&hw->rx_desc_info);
  4258. hw_reset_pkts(&hw->tx_desc_info);
  4259. ksz_init_rx_buffers(hw_priv);
  4260. hw_reset(hw);
  4261. hw_set_desc_base(hw,
  4262. hw->tx_desc_info.ring_phys,
  4263. hw->rx_desc_info.ring_phys);
  4264. hw_set_addr(hw);
  4265. if (hw->all_multi)
  4266. hw_set_multicast(hw, hw->all_multi);
  4267. else if (hw->multi_list_size)
  4268. hw_set_grp_addr(hw);
  4269. if (hw->dev_count > 1) {
  4270. hw_set_add_addr(hw);
  4271. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  4272. struct net_device *port_dev;
  4273. port_set_stp_state(hw, port,
  4274. STP_STATE_DISABLED);
  4275. port_dev = hw->port_info[port].pdev;
  4276. if (netif_running(port_dev))
  4277. port_set_stp_state(hw, port,
  4278. STP_STATE_SIMPLE);
  4279. }
  4280. }
  4281. hw_enable(hw);
  4282. hw_ena_intr(hw);
  4283. }
  4284. netif_trans_update(dev);
  4285. netif_wake_queue(dev);
  4286. }
  4287. static inline void csum_verified(struct sk_buff *skb)
  4288. {
  4289. unsigned short protocol;
  4290. struct iphdr *iph;
  4291. protocol = skb->protocol;
  4292. skb_reset_network_header(skb);
  4293. iph = (struct iphdr *) skb_network_header(skb);
  4294. if (protocol == htons(ETH_P_8021Q)) {
  4295. protocol = iph->tot_len;
  4296. skb_set_network_header(skb, VLAN_HLEN);
  4297. iph = (struct iphdr *) skb_network_header(skb);
  4298. }
  4299. if (protocol == htons(ETH_P_IP)) {
  4300. if (iph->protocol == IPPROTO_TCP)
  4301. skb->ip_summed = CHECKSUM_UNNECESSARY;
  4302. }
  4303. }
  4304. static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
  4305. struct ksz_desc *desc, union desc_stat status)
  4306. {
  4307. int packet_len;
  4308. struct dev_priv *priv = netdev_priv(dev);
  4309. struct dev_info *hw_priv = priv->adapter;
  4310. struct ksz_dma_buf *dma_buf;
  4311. struct sk_buff *skb;
  4312. int rx_status;
  4313. /* Received length includes 4-byte CRC. */
  4314. packet_len = status.rx.frame_len - 4;
  4315. dma_buf = DMA_BUFFER(desc);
  4316. pci_dma_sync_single_for_cpu(
  4317. hw_priv->pdev, dma_buf->dma, packet_len + 4,
  4318. PCI_DMA_FROMDEVICE);
  4319. do {
  4320. /* skb->data != skb->head */
  4321. skb = netdev_alloc_skb(dev, packet_len + 2);
  4322. if (!skb) {
  4323. dev->stats.rx_dropped++;
  4324. return -ENOMEM;
  4325. }
  4326. /*
  4327. * Align socket buffer in 4-byte boundary for better
  4328. * performance.
  4329. */
  4330. skb_reserve(skb, 2);
  4331. skb_put_data(skb, dma_buf->skb->data, packet_len);
  4332. } while (0);
  4333. skb->protocol = eth_type_trans(skb, dev);
  4334. if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
  4335. csum_verified(skb);
  4336. /* Update receive statistics. */
  4337. dev->stats.rx_packets++;
  4338. dev->stats.rx_bytes += packet_len;
  4339. /* Notify upper layer for received packet. */
  4340. rx_status = netif_rx(skb);
  4341. return 0;
  4342. }
  4343. static int dev_rcv_packets(struct dev_info *hw_priv)
  4344. {
  4345. int next;
  4346. union desc_stat status;
  4347. struct ksz_hw *hw = &hw_priv->hw;
  4348. struct net_device *dev = hw->port_info[0].pdev;
  4349. struct ksz_desc_info *info = &hw->rx_desc_info;
  4350. int left = info->alloc;
  4351. struct ksz_desc *desc;
  4352. int received = 0;
  4353. next = info->next;
  4354. while (left--) {
  4355. /* Get next descriptor which is not hardware owned. */
  4356. desc = &info->ring[next];
  4357. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4358. if (status.rx.hw_owned)
  4359. break;
  4360. /* Status valid only when last descriptor bit is set. */
  4361. if (status.rx.last_desc && status.rx.first_desc) {
  4362. if (rx_proc(dev, hw, desc, status))
  4363. goto release_packet;
  4364. received++;
  4365. }
  4366. release_packet:
  4367. release_desc(desc);
  4368. next++;
  4369. next &= info->mask;
  4370. }
  4371. info->next = next;
  4372. return received;
  4373. }
  4374. static int port_rcv_packets(struct dev_info *hw_priv)
  4375. {
  4376. int next;
  4377. union desc_stat status;
  4378. struct ksz_hw *hw = &hw_priv->hw;
  4379. struct net_device *dev = hw->port_info[0].pdev;
  4380. struct ksz_desc_info *info = &hw->rx_desc_info;
  4381. int left = info->alloc;
  4382. struct ksz_desc *desc;
  4383. int received = 0;
  4384. next = info->next;
  4385. while (left--) {
  4386. /* Get next descriptor which is not hardware owned. */
  4387. desc = &info->ring[next];
  4388. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4389. if (status.rx.hw_owned)
  4390. break;
  4391. if (hw->dev_count > 1) {
  4392. /* Get received port number. */
  4393. int p = HW_TO_DEV_PORT(status.rx.src_port);
  4394. dev = hw->port_info[p].pdev;
  4395. if (!netif_running(dev))
  4396. goto release_packet;
  4397. }
  4398. /* Status valid only when last descriptor bit is set. */
  4399. if (status.rx.last_desc && status.rx.first_desc) {
  4400. if (rx_proc(dev, hw, desc, status))
  4401. goto release_packet;
  4402. received++;
  4403. }
  4404. release_packet:
  4405. release_desc(desc);
  4406. next++;
  4407. next &= info->mask;
  4408. }
  4409. info->next = next;
  4410. return received;
  4411. }
  4412. static int dev_rcv_special(struct dev_info *hw_priv)
  4413. {
  4414. int next;
  4415. union desc_stat status;
  4416. struct ksz_hw *hw = &hw_priv->hw;
  4417. struct net_device *dev = hw->port_info[0].pdev;
  4418. struct ksz_desc_info *info = &hw->rx_desc_info;
  4419. int left = info->alloc;
  4420. struct ksz_desc *desc;
  4421. int received = 0;
  4422. next = info->next;
  4423. while (left--) {
  4424. /* Get next descriptor which is not hardware owned. */
  4425. desc = &info->ring[next];
  4426. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4427. if (status.rx.hw_owned)
  4428. break;
  4429. if (hw->dev_count > 1) {
  4430. /* Get received port number. */
  4431. int p = HW_TO_DEV_PORT(status.rx.src_port);
  4432. dev = hw->port_info[p].pdev;
  4433. if (!netif_running(dev))
  4434. goto release_packet;
  4435. }
  4436. /* Status valid only when last descriptor bit is set. */
  4437. if (status.rx.last_desc && status.rx.first_desc) {
  4438. /*
  4439. * Receive without error. With receive errors
  4440. * disabled, packets with receive errors will be
  4441. * dropped, so no need to check the error bit.
  4442. */
  4443. if (!status.rx.error || (status.data &
  4444. KS_DESC_RX_ERROR_COND) ==
  4445. KS_DESC_RX_ERROR_TOO_LONG) {
  4446. if (rx_proc(dev, hw, desc, status))
  4447. goto release_packet;
  4448. received++;
  4449. } else {
  4450. struct dev_priv *priv = netdev_priv(dev);
  4451. /* Update receive error statistics. */
  4452. priv->port.counter[OID_COUNTER_RCV_ERROR]++;
  4453. }
  4454. }
  4455. release_packet:
  4456. release_desc(desc);
  4457. next++;
  4458. next &= info->mask;
  4459. }
  4460. info->next = next;
  4461. return received;
  4462. }
  4463. static void rx_proc_task(unsigned long data)
  4464. {
  4465. struct dev_info *hw_priv = (struct dev_info *) data;
  4466. struct ksz_hw *hw = &hw_priv->hw;
  4467. if (!hw->enabled)
  4468. return;
  4469. if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
  4470. /* In case receive process is suspended because of overrun. */
  4471. hw_resume_rx(hw);
  4472. /* tasklets are interruptible. */
  4473. spin_lock_irq(&hw_priv->hwlock);
  4474. hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
  4475. spin_unlock_irq(&hw_priv->hwlock);
  4476. } else {
  4477. hw_ack_intr(hw, KS884X_INT_RX);
  4478. tasklet_schedule(&hw_priv->rx_tasklet);
  4479. }
  4480. }
  4481. static void tx_proc_task(unsigned long data)
  4482. {
  4483. struct dev_info *hw_priv = (struct dev_info *) data;
  4484. struct ksz_hw *hw = &hw_priv->hw;
  4485. hw_ack_intr(hw, KS884X_INT_TX_MASK);
  4486. tx_done(hw_priv);
  4487. /* tasklets are interruptible. */
  4488. spin_lock_irq(&hw_priv->hwlock);
  4489. hw_turn_on_intr(hw, KS884X_INT_TX);
  4490. spin_unlock_irq(&hw_priv->hwlock);
  4491. }
  4492. static inline void handle_rx_stop(struct ksz_hw *hw)
  4493. {
  4494. /* Receive just has been stopped. */
  4495. if (0 == hw->rx_stop)
  4496. hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
  4497. else if (hw->rx_stop > 1) {
  4498. if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
  4499. hw_start_rx(hw);
  4500. } else {
  4501. hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
  4502. hw->rx_stop = 0;
  4503. }
  4504. } else
  4505. /* Receive just has been started. */
  4506. hw->rx_stop++;
  4507. }
  4508. /**
  4509. * netdev_intr - interrupt handling
  4510. * @irq: Interrupt number.
  4511. * @dev_id: Network device.
  4512. *
  4513. * This function is called by upper network layer to signal interrupt.
  4514. *
  4515. * Return IRQ_HANDLED if interrupt is handled.
  4516. */
  4517. static irqreturn_t netdev_intr(int irq, void *dev_id)
  4518. {
  4519. uint int_enable = 0;
  4520. struct net_device *dev = (struct net_device *) dev_id;
  4521. struct dev_priv *priv = netdev_priv(dev);
  4522. struct dev_info *hw_priv = priv->adapter;
  4523. struct ksz_hw *hw = &hw_priv->hw;
  4524. spin_lock(&hw_priv->hwlock);
  4525. hw_read_intr(hw, &int_enable);
  4526. /* Not our interrupt! */
  4527. if (!int_enable) {
  4528. spin_unlock(&hw_priv->hwlock);
  4529. return IRQ_NONE;
  4530. }
  4531. do {
  4532. hw_ack_intr(hw, int_enable);
  4533. int_enable &= hw->intr_mask;
  4534. if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
  4535. hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
  4536. tasklet_schedule(&hw_priv->tx_tasklet);
  4537. }
  4538. if (likely(int_enable & KS884X_INT_RX)) {
  4539. hw_dis_intr_bit(hw, KS884X_INT_RX);
  4540. tasklet_schedule(&hw_priv->rx_tasklet);
  4541. }
  4542. if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
  4543. dev->stats.rx_fifo_errors++;
  4544. hw_resume_rx(hw);
  4545. }
  4546. if (unlikely(int_enable & KS884X_INT_PHY)) {
  4547. struct ksz_port *port = &priv->port;
  4548. hw->features |= LINK_INT_WORKING;
  4549. port_get_link_speed(port);
  4550. }
  4551. if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
  4552. handle_rx_stop(hw);
  4553. break;
  4554. }
  4555. if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
  4556. u32 data;
  4557. hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
  4558. pr_info("Tx stopped\n");
  4559. data = readl(hw->io + KS_DMA_TX_CTRL);
  4560. if (!(data & DMA_TX_ENABLE))
  4561. pr_info("Tx disabled\n");
  4562. break;
  4563. }
  4564. } while (0);
  4565. hw_ena_intr(hw);
  4566. spin_unlock(&hw_priv->hwlock);
  4567. return IRQ_HANDLED;
  4568. }
  4569. /*
  4570. * Linux network device functions
  4571. */
  4572. static unsigned long next_jiffies;
  4573. #ifdef CONFIG_NET_POLL_CONTROLLER
  4574. static void netdev_netpoll(struct net_device *dev)
  4575. {
  4576. struct dev_priv *priv = netdev_priv(dev);
  4577. struct dev_info *hw_priv = priv->adapter;
  4578. hw_dis_intr(&hw_priv->hw);
  4579. netdev_intr(dev->irq, dev);
  4580. }
  4581. #endif
  4582. static void bridge_change(struct ksz_hw *hw)
  4583. {
  4584. int port;
  4585. u8 member;
  4586. struct ksz_switch *sw = hw->ksz_switch;
  4587. /* No ports in forwarding state. */
  4588. if (!sw->member) {
  4589. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
  4590. sw_block_addr(hw);
  4591. }
  4592. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  4593. if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
  4594. member = HOST_MASK | sw->member;
  4595. else
  4596. member = HOST_MASK | (1 << port);
  4597. if (member != sw->port_cfg[port].member)
  4598. sw_cfg_port_base_vlan(hw, port, member);
  4599. }
  4600. }
  4601. /**
  4602. * netdev_close - close network device
  4603. * @dev: Network device.
  4604. *
  4605. * This function process the close operation of network device. This is caused
  4606. * by the user command "ifconfig ethX down."
  4607. *
  4608. * Return 0 if successful; otherwise an error code indicating failure.
  4609. */
  4610. static int netdev_close(struct net_device *dev)
  4611. {
  4612. struct dev_priv *priv = netdev_priv(dev);
  4613. struct dev_info *hw_priv = priv->adapter;
  4614. struct ksz_port *port = &priv->port;
  4615. struct ksz_hw *hw = &hw_priv->hw;
  4616. int pi;
  4617. netif_stop_queue(dev);
  4618. ksz_stop_timer(&priv->monitor_timer_info);
  4619. /* Need to shut the port manually in multiple device interfaces mode. */
  4620. if (hw->dev_count > 1) {
  4621. port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
  4622. /* Port is closed. Need to change bridge setting. */
  4623. if (hw->features & STP_SUPPORT) {
  4624. pi = 1 << port->first_port;
  4625. if (hw->ksz_switch->member & pi) {
  4626. hw->ksz_switch->member &= ~pi;
  4627. bridge_change(hw);
  4628. }
  4629. }
  4630. }
  4631. if (port->first_port > 0)
  4632. hw_del_addr(hw, dev->dev_addr);
  4633. if (!hw_priv->wol_enable)
  4634. port_set_power_saving(port, true);
  4635. if (priv->multicast)
  4636. --hw->all_multi;
  4637. if (priv->promiscuous)
  4638. --hw->promiscuous;
  4639. hw_priv->opened--;
  4640. if (!(hw_priv->opened)) {
  4641. ksz_stop_timer(&hw_priv->mib_timer_info);
  4642. flush_work(&hw_priv->mib_read);
  4643. hw_dis_intr(hw);
  4644. hw_disable(hw);
  4645. hw_clr_multicast(hw);
  4646. /* Delay for receive task to stop scheduling itself. */
  4647. msleep(2000 / HZ);
  4648. tasklet_kill(&hw_priv->rx_tasklet);
  4649. tasklet_kill(&hw_priv->tx_tasklet);
  4650. free_irq(dev->irq, hw_priv->dev);
  4651. transmit_cleanup(hw_priv, 0);
  4652. hw_reset_pkts(&hw->rx_desc_info);
  4653. hw_reset_pkts(&hw->tx_desc_info);
  4654. /* Clean out static MAC table when the switch is shutdown. */
  4655. if (hw->features & STP_SUPPORT)
  4656. sw_clr_sta_mac_table(hw);
  4657. }
  4658. return 0;
  4659. }
  4660. static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
  4661. {
  4662. if (hw->ksz_switch) {
  4663. u32 data;
  4664. data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  4665. if (hw->features & RX_HUGE_FRAME)
  4666. data |= SWITCH_HUGE_PACKET;
  4667. else
  4668. data &= ~SWITCH_HUGE_PACKET;
  4669. writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  4670. }
  4671. if (hw->features & RX_HUGE_FRAME) {
  4672. hw->rx_cfg |= DMA_RX_ERROR;
  4673. hw_priv->dev_rcv = dev_rcv_special;
  4674. } else {
  4675. hw->rx_cfg &= ~DMA_RX_ERROR;
  4676. if (hw->dev_count > 1)
  4677. hw_priv->dev_rcv = port_rcv_packets;
  4678. else
  4679. hw_priv->dev_rcv = dev_rcv_packets;
  4680. }
  4681. }
  4682. static int prepare_hardware(struct net_device *dev)
  4683. {
  4684. struct dev_priv *priv = netdev_priv(dev);
  4685. struct dev_info *hw_priv = priv->adapter;
  4686. struct ksz_hw *hw = &hw_priv->hw;
  4687. int rc = 0;
  4688. /* Remember the network device that requests interrupts. */
  4689. hw_priv->dev = dev;
  4690. rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
  4691. if (rc)
  4692. return rc;
  4693. tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
  4694. (unsigned long) hw_priv);
  4695. tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
  4696. (unsigned long) hw_priv);
  4697. hw->promiscuous = 0;
  4698. hw->all_multi = 0;
  4699. hw->multi_list_size = 0;
  4700. hw_reset(hw);
  4701. hw_set_desc_base(hw,
  4702. hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
  4703. hw_set_addr(hw);
  4704. hw_cfg_huge_frame(hw_priv, hw);
  4705. ksz_init_rx_buffers(hw_priv);
  4706. return 0;
  4707. }
  4708. static void set_media_state(struct net_device *dev, int media_state)
  4709. {
  4710. struct dev_priv *priv = netdev_priv(dev);
  4711. if (media_state == priv->media_state)
  4712. netif_carrier_on(dev);
  4713. else
  4714. netif_carrier_off(dev);
  4715. netif_info(priv, link, dev, "link %s\n",
  4716. media_state == priv->media_state ? "on" : "off");
  4717. }
  4718. /**
  4719. * netdev_open - open network device
  4720. * @dev: Network device.
  4721. *
  4722. * This function process the open operation of network device. This is caused
  4723. * by the user command "ifconfig ethX up."
  4724. *
  4725. * Return 0 if successful; otherwise an error code indicating failure.
  4726. */
  4727. static int netdev_open(struct net_device *dev)
  4728. {
  4729. struct dev_priv *priv = netdev_priv(dev);
  4730. struct dev_info *hw_priv = priv->adapter;
  4731. struct ksz_hw *hw = &hw_priv->hw;
  4732. struct ksz_port *port = &priv->port;
  4733. int i;
  4734. int p;
  4735. int rc = 0;
  4736. priv->multicast = 0;
  4737. priv->promiscuous = 0;
  4738. /* Reset device statistics. */
  4739. memset(&dev->stats, 0, sizeof(struct net_device_stats));
  4740. memset((void *) port->counter, 0,
  4741. (sizeof(u64) * OID_COUNTER_LAST));
  4742. if (!(hw_priv->opened)) {
  4743. rc = prepare_hardware(dev);
  4744. if (rc)
  4745. return rc;
  4746. for (i = 0; i < hw->mib_port_cnt; i++) {
  4747. if (next_jiffies < jiffies)
  4748. next_jiffies = jiffies + HZ * 2;
  4749. else
  4750. next_jiffies += HZ * 1;
  4751. hw_priv->counter[i].time = next_jiffies;
  4752. hw->port_mib[i].state = media_disconnected;
  4753. port_init_cnt(hw, i);
  4754. }
  4755. if (hw->ksz_switch)
  4756. hw->port_mib[HOST_PORT].state = media_connected;
  4757. else {
  4758. hw_add_wol_bcast(hw);
  4759. hw_cfg_wol_pme(hw, 0);
  4760. hw_clr_wol_pme_status(&hw_priv->hw);
  4761. }
  4762. }
  4763. port_set_power_saving(port, false);
  4764. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  4765. /*
  4766. * Initialize to invalid value so that link detection
  4767. * is done.
  4768. */
  4769. hw->port_info[p].partner = 0xFF;
  4770. hw->port_info[p].state = media_disconnected;
  4771. }
  4772. /* Need to open the port in multiple device interfaces mode. */
  4773. if (hw->dev_count > 1) {
  4774. port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
  4775. if (port->first_port > 0)
  4776. hw_add_addr(hw, dev->dev_addr);
  4777. }
  4778. port_get_link_speed(port);
  4779. if (port->force_link)
  4780. port_force_link_speed(port);
  4781. else
  4782. port_set_link_speed(port);
  4783. if (!(hw_priv->opened)) {
  4784. hw_setup_intr(hw);
  4785. hw_enable(hw);
  4786. hw_ena_intr(hw);
  4787. if (hw->mib_port_cnt)
  4788. ksz_start_timer(&hw_priv->mib_timer_info,
  4789. hw_priv->mib_timer_info.period);
  4790. }
  4791. hw_priv->opened++;
  4792. ksz_start_timer(&priv->monitor_timer_info,
  4793. priv->monitor_timer_info.period);
  4794. priv->media_state = port->linked->state;
  4795. set_media_state(dev, media_connected);
  4796. netif_start_queue(dev);
  4797. return 0;
  4798. }
  4799. /* RX errors = rx_errors */
  4800. /* RX dropped = rx_dropped */
  4801. /* RX overruns = rx_fifo_errors */
  4802. /* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
  4803. /* TX errors = tx_errors */
  4804. /* TX dropped = tx_dropped */
  4805. /* TX overruns = tx_fifo_errors */
  4806. /* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
  4807. /* collisions = collisions */
  4808. /**
  4809. * netdev_query_statistics - query network device statistics
  4810. * @dev: Network device.
  4811. *
  4812. * This function returns the statistics of the network device. The device
  4813. * needs not be opened.
  4814. *
  4815. * Return network device statistics.
  4816. */
  4817. static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
  4818. {
  4819. struct dev_priv *priv = netdev_priv(dev);
  4820. struct ksz_port *port = &priv->port;
  4821. struct ksz_hw *hw = &priv->adapter->hw;
  4822. struct ksz_port_mib *mib;
  4823. int i;
  4824. int p;
  4825. dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
  4826. dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
  4827. /* Reset to zero to add count later. */
  4828. dev->stats.multicast = 0;
  4829. dev->stats.collisions = 0;
  4830. dev->stats.rx_length_errors = 0;
  4831. dev->stats.rx_crc_errors = 0;
  4832. dev->stats.rx_frame_errors = 0;
  4833. dev->stats.tx_window_errors = 0;
  4834. for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
  4835. mib = &hw->port_mib[p];
  4836. dev->stats.multicast += (unsigned long)
  4837. mib->counter[MIB_COUNTER_RX_MULTICAST];
  4838. dev->stats.collisions += (unsigned long)
  4839. mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
  4840. dev->stats.rx_length_errors += (unsigned long)(
  4841. mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
  4842. mib->counter[MIB_COUNTER_RX_FRAGMENT] +
  4843. mib->counter[MIB_COUNTER_RX_OVERSIZE] +
  4844. mib->counter[MIB_COUNTER_RX_JABBER]);
  4845. dev->stats.rx_crc_errors += (unsigned long)
  4846. mib->counter[MIB_COUNTER_RX_CRC_ERR];
  4847. dev->stats.rx_frame_errors += (unsigned long)(
  4848. mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
  4849. mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
  4850. dev->stats.tx_window_errors += (unsigned long)
  4851. mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
  4852. }
  4853. return &dev->stats;
  4854. }
  4855. /**
  4856. * netdev_set_mac_address - set network device MAC address
  4857. * @dev: Network device.
  4858. * @addr: Buffer of MAC address.
  4859. *
  4860. * This function is used to set the MAC address of the network device.
  4861. *
  4862. * Return 0 to indicate success.
  4863. */
  4864. static int netdev_set_mac_address(struct net_device *dev, void *addr)
  4865. {
  4866. struct dev_priv *priv = netdev_priv(dev);
  4867. struct dev_info *hw_priv = priv->adapter;
  4868. struct ksz_hw *hw = &hw_priv->hw;
  4869. struct sockaddr *mac = addr;
  4870. uint interrupt;
  4871. if (priv->port.first_port > 0)
  4872. hw_del_addr(hw, dev->dev_addr);
  4873. else {
  4874. hw->mac_override = 1;
  4875. memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
  4876. }
  4877. memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
  4878. interrupt = hw_block_intr(hw);
  4879. if (priv->port.first_port > 0)
  4880. hw_add_addr(hw, dev->dev_addr);
  4881. else
  4882. hw_set_addr(hw);
  4883. hw_restore_intr(hw, interrupt);
  4884. return 0;
  4885. }
  4886. static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
  4887. struct ksz_hw *hw, int promiscuous)
  4888. {
  4889. if (promiscuous != priv->promiscuous) {
  4890. u8 prev_state = hw->promiscuous;
  4891. if (promiscuous)
  4892. ++hw->promiscuous;
  4893. else
  4894. --hw->promiscuous;
  4895. priv->promiscuous = promiscuous;
  4896. /* Turn on/off promiscuous mode. */
  4897. if (hw->promiscuous <= 1 && prev_state <= 1)
  4898. hw_set_promiscuous(hw, hw->promiscuous);
  4899. /*
  4900. * Port is not in promiscuous mode, meaning it is released
  4901. * from the bridge.
  4902. */
  4903. if ((hw->features & STP_SUPPORT) && !promiscuous &&
  4904. (dev->priv_flags & IFF_BRIDGE_PORT)) {
  4905. struct ksz_switch *sw = hw->ksz_switch;
  4906. int port = priv->port.first_port;
  4907. port_set_stp_state(hw, port, STP_STATE_DISABLED);
  4908. port = 1 << port;
  4909. if (sw->member & port) {
  4910. sw->member &= ~port;
  4911. bridge_change(hw);
  4912. }
  4913. }
  4914. }
  4915. }
  4916. static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
  4917. int multicast)
  4918. {
  4919. if (multicast != priv->multicast) {
  4920. u8 all_multi = hw->all_multi;
  4921. if (multicast)
  4922. ++hw->all_multi;
  4923. else
  4924. --hw->all_multi;
  4925. priv->multicast = multicast;
  4926. /* Turn on/off all multicast mode. */
  4927. if (hw->all_multi <= 1 && all_multi <= 1)
  4928. hw_set_multicast(hw, hw->all_multi);
  4929. }
  4930. }
  4931. /**
  4932. * netdev_set_rx_mode
  4933. * @dev: Network device.
  4934. *
  4935. * This routine is used to set multicast addresses or put the network device
  4936. * into promiscuous mode.
  4937. */
  4938. static void netdev_set_rx_mode(struct net_device *dev)
  4939. {
  4940. struct dev_priv *priv = netdev_priv(dev);
  4941. struct dev_info *hw_priv = priv->adapter;
  4942. struct ksz_hw *hw = &hw_priv->hw;
  4943. struct netdev_hw_addr *ha;
  4944. int multicast = (dev->flags & IFF_ALLMULTI);
  4945. dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
  4946. if (hw_priv->hw.dev_count > 1)
  4947. multicast |= (dev->flags & IFF_MULTICAST);
  4948. dev_set_multicast(priv, hw, multicast);
  4949. /* Cannot use different hashes in multiple device interfaces mode. */
  4950. if (hw_priv->hw.dev_count > 1)
  4951. return;
  4952. if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
  4953. int i = 0;
  4954. /* List too big to support so turn on all multicast mode. */
  4955. if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
  4956. if (MAX_MULTICAST_LIST != hw->multi_list_size) {
  4957. hw->multi_list_size = MAX_MULTICAST_LIST;
  4958. ++hw->all_multi;
  4959. hw_set_multicast(hw, hw->all_multi);
  4960. }
  4961. return;
  4962. }
  4963. netdev_for_each_mc_addr(ha, dev) {
  4964. if (i >= MAX_MULTICAST_LIST)
  4965. break;
  4966. memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
  4967. }
  4968. hw->multi_list_size = (u8) i;
  4969. hw_set_grp_addr(hw);
  4970. } else {
  4971. if (MAX_MULTICAST_LIST == hw->multi_list_size) {
  4972. --hw->all_multi;
  4973. hw_set_multicast(hw, hw->all_multi);
  4974. }
  4975. hw->multi_list_size = 0;
  4976. hw_clr_multicast(hw);
  4977. }
  4978. }
  4979. static int netdev_change_mtu(struct net_device *dev, int new_mtu)
  4980. {
  4981. struct dev_priv *priv = netdev_priv(dev);
  4982. struct dev_info *hw_priv = priv->adapter;
  4983. struct ksz_hw *hw = &hw_priv->hw;
  4984. int hw_mtu;
  4985. if (netif_running(dev))
  4986. return -EBUSY;
  4987. /* Cannot use different MTU in multiple device interfaces mode. */
  4988. if (hw->dev_count > 1)
  4989. if (dev != hw_priv->dev)
  4990. return 0;
  4991. hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
  4992. if (hw_mtu > REGULAR_RX_BUF_SIZE) {
  4993. hw->features |= RX_HUGE_FRAME;
  4994. hw_mtu = MAX_RX_BUF_SIZE;
  4995. } else {
  4996. hw->features &= ~RX_HUGE_FRAME;
  4997. hw_mtu = REGULAR_RX_BUF_SIZE;
  4998. }
  4999. hw_mtu = (hw_mtu + 3) & ~3;
  5000. hw_priv->mtu = hw_mtu;
  5001. dev->mtu = new_mtu;
  5002. return 0;
  5003. }
  5004. /**
  5005. * netdev_ioctl - I/O control processing
  5006. * @dev: Network device.
  5007. * @ifr: Interface request structure.
  5008. * @cmd: I/O control code.
  5009. *
  5010. * This function is used to process I/O control calls.
  5011. *
  5012. * Return 0 to indicate success.
  5013. */
  5014. static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  5015. {
  5016. struct dev_priv *priv = netdev_priv(dev);
  5017. struct dev_info *hw_priv = priv->adapter;
  5018. struct ksz_hw *hw = &hw_priv->hw;
  5019. struct ksz_port *port = &priv->port;
  5020. int result = 0;
  5021. struct mii_ioctl_data *data = if_mii(ifr);
  5022. if (down_interruptible(&priv->proc_sem))
  5023. return -ERESTARTSYS;
  5024. switch (cmd) {
  5025. /* Get address of MII PHY in use. */
  5026. case SIOCGMIIPHY:
  5027. data->phy_id = priv->id;
  5028. /* Fallthrough... */
  5029. /* Read MII PHY register. */
  5030. case SIOCGMIIREG:
  5031. if (data->phy_id != priv->id || data->reg_num >= 6)
  5032. result = -EIO;
  5033. else
  5034. hw_r_phy(hw, port->linked->port_id, data->reg_num,
  5035. &data->val_out);
  5036. break;
  5037. /* Write MII PHY register. */
  5038. case SIOCSMIIREG:
  5039. if (!capable(CAP_NET_ADMIN))
  5040. result = -EPERM;
  5041. else if (data->phy_id != priv->id || data->reg_num >= 6)
  5042. result = -EIO;
  5043. else
  5044. hw_w_phy(hw, port->linked->port_id, data->reg_num,
  5045. data->val_in);
  5046. break;
  5047. default:
  5048. result = -EOPNOTSUPP;
  5049. }
  5050. up(&priv->proc_sem);
  5051. return result;
  5052. }
  5053. /*
  5054. * MII support
  5055. */
  5056. /**
  5057. * mdio_read - read PHY register
  5058. * @dev: Network device.
  5059. * @phy_id: The PHY id.
  5060. * @reg_num: The register number.
  5061. *
  5062. * This function returns the PHY register value.
  5063. *
  5064. * Return the register value.
  5065. */
  5066. static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
  5067. {
  5068. struct dev_priv *priv = netdev_priv(dev);
  5069. struct ksz_port *port = &priv->port;
  5070. struct ksz_hw *hw = port->hw;
  5071. u16 val_out;
  5072. hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
  5073. return val_out;
  5074. }
  5075. /**
  5076. * mdio_write - set PHY register
  5077. * @dev: Network device.
  5078. * @phy_id: The PHY id.
  5079. * @reg_num: The register number.
  5080. * @val: The register value.
  5081. *
  5082. * This procedure sets the PHY register value.
  5083. */
  5084. static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
  5085. {
  5086. struct dev_priv *priv = netdev_priv(dev);
  5087. struct ksz_port *port = &priv->port;
  5088. struct ksz_hw *hw = port->hw;
  5089. int i;
  5090. int pi;
  5091. for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
  5092. hw_w_phy(hw, pi, reg_num << 1, val);
  5093. }
  5094. /*
  5095. * ethtool support
  5096. */
  5097. #define EEPROM_SIZE 0x40
  5098. static u16 eeprom_data[EEPROM_SIZE] = { 0 };
  5099. #define ADVERTISED_ALL \
  5100. (ADVERTISED_10baseT_Half | \
  5101. ADVERTISED_10baseT_Full | \
  5102. ADVERTISED_100baseT_Half | \
  5103. ADVERTISED_100baseT_Full)
  5104. /* These functions use the MII functions in mii.c. */
  5105. /**
  5106. * netdev_get_link_ksettings - get network device settings
  5107. * @dev: Network device.
  5108. * @cmd: Ethtool command.
  5109. *
  5110. * This function queries the PHY and returns its state in the ethtool command.
  5111. *
  5112. * Return 0 if successful; otherwise an error code.
  5113. */
  5114. static int netdev_get_link_ksettings(struct net_device *dev,
  5115. struct ethtool_link_ksettings *cmd)
  5116. {
  5117. struct dev_priv *priv = netdev_priv(dev);
  5118. struct dev_info *hw_priv = priv->adapter;
  5119. mutex_lock(&hw_priv->lock);
  5120. mii_ethtool_get_link_ksettings(&priv->mii_if, cmd);
  5121. ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
  5122. mutex_unlock(&hw_priv->lock);
  5123. /* Save advertised settings for workaround in next function. */
  5124. ethtool_convert_link_mode_to_legacy_u32(&priv->advertising,
  5125. cmd->link_modes.advertising);
  5126. return 0;
  5127. }
  5128. /**
  5129. * netdev_set_link_ksettings - set network device settings
  5130. * @dev: Network device.
  5131. * @cmd: Ethtool command.
  5132. *
  5133. * This function sets the PHY according to the ethtool command.
  5134. *
  5135. * Return 0 if successful; otherwise an error code.
  5136. */
  5137. static int netdev_set_link_ksettings(struct net_device *dev,
  5138. const struct ethtool_link_ksettings *cmd)
  5139. {
  5140. struct dev_priv *priv = netdev_priv(dev);
  5141. struct dev_info *hw_priv = priv->adapter;
  5142. struct ksz_port *port = &priv->port;
  5143. struct ethtool_link_ksettings copy_cmd;
  5144. u32 speed = cmd->base.speed;
  5145. u32 advertising;
  5146. int rc;
  5147. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  5148. cmd->link_modes.advertising);
  5149. /*
  5150. * ethtool utility does not change advertised setting if auto
  5151. * negotiation is not specified explicitly.
  5152. */
  5153. if (cmd->base.autoneg && priv->advertising == advertising) {
  5154. advertising |= ADVERTISED_ALL;
  5155. if (10 == speed)
  5156. advertising &=
  5157. ~(ADVERTISED_100baseT_Full |
  5158. ADVERTISED_100baseT_Half);
  5159. else if (100 == speed)
  5160. advertising &=
  5161. ~(ADVERTISED_10baseT_Full |
  5162. ADVERTISED_10baseT_Half);
  5163. if (0 == cmd->base.duplex)
  5164. advertising &=
  5165. ~(ADVERTISED_100baseT_Full |
  5166. ADVERTISED_10baseT_Full);
  5167. else if (1 == cmd->base.duplex)
  5168. advertising &=
  5169. ~(ADVERTISED_100baseT_Half |
  5170. ADVERTISED_10baseT_Half);
  5171. }
  5172. mutex_lock(&hw_priv->lock);
  5173. if (cmd->base.autoneg &&
  5174. (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) {
  5175. port->duplex = 0;
  5176. port->speed = 0;
  5177. port->force_link = 0;
  5178. } else {
  5179. port->duplex = cmd->base.duplex + 1;
  5180. if (1000 != speed)
  5181. port->speed = speed;
  5182. if (cmd->base.autoneg)
  5183. port->force_link = 0;
  5184. else
  5185. port->force_link = 1;
  5186. }
  5187. memcpy(&copy_cmd, cmd, sizeof(copy_cmd));
  5188. ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising,
  5189. advertising);
  5190. rc = mii_ethtool_set_link_ksettings(
  5191. &priv->mii_if,
  5192. (const struct ethtool_link_ksettings *)&copy_cmd);
  5193. mutex_unlock(&hw_priv->lock);
  5194. return rc;
  5195. }
  5196. /**
  5197. * netdev_nway_reset - restart auto-negotiation
  5198. * @dev: Network device.
  5199. *
  5200. * This function restarts the PHY for auto-negotiation.
  5201. *
  5202. * Return 0 if successful; otherwise an error code.
  5203. */
  5204. static int netdev_nway_reset(struct net_device *dev)
  5205. {
  5206. struct dev_priv *priv = netdev_priv(dev);
  5207. struct dev_info *hw_priv = priv->adapter;
  5208. int rc;
  5209. mutex_lock(&hw_priv->lock);
  5210. rc = mii_nway_restart(&priv->mii_if);
  5211. mutex_unlock(&hw_priv->lock);
  5212. return rc;
  5213. }
  5214. /**
  5215. * netdev_get_link - get network device link status
  5216. * @dev: Network device.
  5217. *
  5218. * This function gets the link status from the PHY.
  5219. *
  5220. * Return true if PHY is linked and false otherwise.
  5221. */
  5222. static u32 netdev_get_link(struct net_device *dev)
  5223. {
  5224. struct dev_priv *priv = netdev_priv(dev);
  5225. int rc;
  5226. rc = mii_link_ok(&priv->mii_if);
  5227. return rc;
  5228. }
  5229. /**
  5230. * netdev_get_drvinfo - get network driver information
  5231. * @dev: Network device.
  5232. * @info: Ethtool driver info data structure.
  5233. *
  5234. * This procedure returns the driver information.
  5235. */
  5236. static void netdev_get_drvinfo(struct net_device *dev,
  5237. struct ethtool_drvinfo *info)
  5238. {
  5239. struct dev_priv *priv = netdev_priv(dev);
  5240. struct dev_info *hw_priv = priv->adapter;
  5241. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  5242. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  5243. strlcpy(info->bus_info, pci_name(hw_priv->pdev),
  5244. sizeof(info->bus_info));
  5245. }
  5246. /**
  5247. * netdev_get_regs_len - get length of register dump
  5248. * @dev: Network device.
  5249. *
  5250. * This function returns the length of the register dump.
  5251. *
  5252. * Return length of the register dump.
  5253. */
  5254. static struct hw_regs {
  5255. int start;
  5256. int end;
  5257. } hw_regs_range[] = {
  5258. { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
  5259. { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
  5260. { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
  5261. { KS884X_SIDER_P, KS8842_SGCR7_P },
  5262. { KS8842_MACAR1_P, KS8842_TOSR8_P },
  5263. { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
  5264. { 0, 0 }
  5265. };
  5266. static int netdev_get_regs_len(struct net_device *dev)
  5267. {
  5268. struct hw_regs *range = hw_regs_range;
  5269. int regs_len = 0x10 * sizeof(u32);
  5270. while (range->end > range->start) {
  5271. regs_len += (range->end - range->start + 3) / 4 * 4;
  5272. range++;
  5273. }
  5274. return regs_len;
  5275. }
  5276. /**
  5277. * netdev_get_regs - get register dump
  5278. * @dev: Network device.
  5279. * @regs: Ethtool registers data structure.
  5280. * @ptr: Buffer to store the register values.
  5281. *
  5282. * This procedure dumps the register values in the provided buffer.
  5283. */
  5284. static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  5285. void *ptr)
  5286. {
  5287. struct dev_priv *priv = netdev_priv(dev);
  5288. struct dev_info *hw_priv = priv->adapter;
  5289. struct ksz_hw *hw = &hw_priv->hw;
  5290. int *buf = (int *) ptr;
  5291. struct hw_regs *range = hw_regs_range;
  5292. int len;
  5293. mutex_lock(&hw_priv->lock);
  5294. regs->version = 0;
  5295. for (len = 0; len < 0x40; len += 4) {
  5296. pci_read_config_dword(hw_priv->pdev, len, buf);
  5297. buf++;
  5298. }
  5299. while (range->end > range->start) {
  5300. for (len = range->start; len < range->end; len += 4) {
  5301. *buf = readl(hw->io + len);
  5302. buf++;
  5303. }
  5304. range++;
  5305. }
  5306. mutex_unlock(&hw_priv->lock);
  5307. }
  5308. #define WOL_SUPPORT \
  5309. (WAKE_PHY | WAKE_MAGIC | \
  5310. WAKE_UCAST | WAKE_MCAST | \
  5311. WAKE_BCAST | WAKE_ARP)
  5312. /**
  5313. * netdev_get_wol - get Wake-on-LAN support
  5314. * @dev: Network device.
  5315. * @wol: Ethtool Wake-on-LAN data structure.
  5316. *
  5317. * This procedure returns Wake-on-LAN support.
  5318. */
  5319. static void netdev_get_wol(struct net_device *dev,
  5320. struct ethtool_wolinfo *wol)
  5321. {
  5322. struct dev_priv *priv = netdev_priv(dev);
  5323. struct dev_info *hw_priv = priv->adapter;
  5324. wol->supported = hw_priv->wol_support;
  5325. wol->wolopts = hw_priv->wol_enable;
  5326. memset(&wol->sopass, 0, sizeof(wol->sopass));
  5327. }
  5328. /**
  5329. * netdev_set_wol - set Wake-on-LAN support
  5330. * @dev: Network device.
  5331. * @wol: Ethtool Wake-on-LAN data structure.
  5332. *
  5333. * This function sets Wake-on-LAN support.
  5334. *
  5335. * Return 0 if successful; otherwise an error code.
  5336. */
  5337. static int netdev_set_wol(struct net_device *dev,
  5338. struct ethtool_wolinfo *wol)
  5339. {
  5340. struct dev_priv *priv = netdev_priv(dev);
  5341. struct dev_info *hw_priv = priv->adapter;
  5342. /* Need to find a way to retrieve the device IP address. */
  5343. static const u8 net_addr[] = { 192, 168, 1, 1 };
  5344. if (wol->wolopts & ~hw_priv->wol_support)
  5345. return -EINVAL;
  5346. hw_priv->wol_enable = wol->wolopts;
  5347. /* Link wakeup cannot really be disabled. */
  5348. if (wol->wolopts)
  5349. hw_priv->wol_enable |= WAKE_PHY;
  5350. hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
  5351. return 0;
  5352. }
  5353. /**
  5354. * netdev_get_msglevel - get debug message level
  5355. * @dev: Network device.
  5356. *
  5357. * This function returns current debug message level.
  5358. *
  5359. * Return current debug message flags.
  5360. */
  5361. static u32 netdev_get_msglevel(struct net_device *dev)
  5362. {
  5363. struct dev_priv *priv = netdev_priv(dev);
  5364. return priv->msg_enable;
  5365. }
  5366. /**
  5367. * netdev_set_msglevel - set debug message level
  5368. * @dev: Network device.
  5369. * @value: Debug message flags.
  5370. *
  5371. * This procedure sets debug message level.
  5372. */
  5373. static void netdev_set_msglevel(struct net_device *dev, u32 value)
  5374. {
  5375. struct dev_priv *priv = netdev_priv(dev);
  5376. priv->msg_enable = value;
  5377. }
  5378. /**
  5379. * netdev_get_eeprom_len - get EEPROM length
  5380. * @dev: Network device.
  5381. *
  5382. * This function returns the length of the EEPROM.
  5383. *
  5384. * Return length of the EEPROM.
  5385. */
  5386. static int netdev_get_eeprom_len(struct net_device *dev)
  5387. {
  5388. return EEPROM_SIZE * 2;
  5389. }
  5390. /**
  5391. * netdev_get_eeprom - get EEPROM data
  5392. * @dev: Network device.
  5393. * @eeprom: Ethtool EEPROM data structure.
  5394. * @data: Buffer to store the EEPROM data.
  5395. *
  5396. * This function dumps the EEPROM data in the provided buffer.
  5397. *
  5398. * Return 0 if successful; otherwise an error code.
  5399. */
  5400. #define EEPROM_MAGIC 0x10A18842
  5401. static int netdev_get_eeprom(struct net_device *dev,
  5402. struct ethtool_eeprom *eeprom, u8 *data)
  5403. {
  5404. struct dev_priv *priv = netdev_priv(dev);
  5405. struct dev_info *hw_priv = priv->adapter;
  5406. u8 *eeprom_byte = (u8 *) eeprom_data;
  5407. int i;
  5408. int len;
  5409. len = (eeprom->offset + eeprom->len + 1) / 2;
  5410. for (i = eeprom->offset / 2; i < len; i++)
  5411. eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
  5412. eeprom->magic = EEPROM_MAGIC;
  5413. memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
  5414. return 0;
  5415. }
  5416. /**
  5417. * netdev_set_eeprom - write EEPROM data
  5418. * @dev: Network device.
  5419. * @eeprom: Ethtool EEPROM data structure.
  5420. * @data: Data buffer.
  5421. *
  5422. * This function modifies the EEPROM data one byte at a time.
  5423. *
  5424. * Return 0 if successful; otherwise an error code.
  5425. */
  5426. static int netdev_set_eeprom(struct net_device *dev,
  5427. struct ethtool_eeprom *eeprom, u8 *data)
  5428. {
  5429. struct dev_priv *priv = netdev_priv(dev);
  5430. struct dev_info *hw_priv = priv->adapter;
  5431. u16 eeprom_word[EEPROM_SIZE];
  5432. u8 *eeprom_byte = (u8 *) eeprom_word;
  5433. int i;
  5434. int len;
  5435. if (eeprom->magic != EEPROM_MAGIC)
  5436. return -EINVAL;
  5437. len = (eeprom->offset + eeprom->len + 1) / 2;
  5438. for (i = eeprom->offset / 2; i < len; i++)
  5439. eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
  5440. memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
  5441. memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
  5442. for (i = 0; i < EEPROM_SIZE; i++)
  5443. if (eeprom_word[i] != eeprom_data[i]) {
  5444. eeprom_data[i] = eeprom_word[i];
  5445. eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
  5446. }
  5447. return 0;
  5448. }
  5449. /**
  5450. * netdev_get_pauseparam - get flow control parameters
  5451. * @dev: Network device.
  5452. * @pause: Ethtool PAUSE settings data structure.
  5453. *
  5454. * This procedure returns the PAUSE control flow settings.
  5455. */
  5456. static void netdev_get_pauseparam(struct net_device *dev,
  5457. struct ethtool_pauseparam *pause)
  5458. {
  5459. struct dev_priv *priv = netdev_priv(dev);
  5460. struct dev_info *hw_priv = priv->adapter;
  5461. struct ksz_hw *hw = &hw_priv->hw;
  5462. pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
  5463. if (!hw->ksz_switch) {
  5464. pause->rx_pause =
  5465. (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
  5466. pause->tx_pause =
  5467. (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
  5468. } else {
  5469. pause->rx_pause =
  5470. (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5471. SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
  5472. pause->tx_pause =
  5473. (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5474. SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
  5475. }
  5476. }
  5477. /**
  5478. * netdev_set_pauseparam - set flow control parameters
  5479. * @dev: Network device.
  5480. * @pause: Ethtool PAUSE settings data structure.
  5481. *
  5482. * This function sets the PAUSE control flow settings.
  5483. * Not implemented yet.
  5484. *
  5485. * Return 0 if successful; otherwise an error code.
  5486. */
  5487. static int netdev_set_pauseparam(struct net_device *dev,
  5488. struct ethtool_pauseparam *pause)
  5489. {
  5490. struct dev_priv *priv = netdev_priv(dev);
  5491. struct dev_info *hw_priv = priv->adapter;
  5492. struct ksz_hw *hw = &hw_priv->hw;
  5493. struct ksz_port *port = &priv->port;
  5494. mutex_lock(&hw_priv->lock);
  5495. if (pause->autoneg) {
  5496. if (!pause->rx_pause && !pause->tx_pause)
  5497. port->flow_ctrl = PHY_NO_FLOW_CTRL;
  5498. else
  5499. port->flow_ctrl = PHY_FLOW_CTRL;
  5500. hw->overrides &= ~PAUSE_FLOW_CTRL;
  5501. port->force_link = 0;
  5502. if (hw->ksz_switch) {
  5503. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5504. SWITCH_RX_FLOW_CTRL, 1);
  5505. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5506. SWITCH_TX_FLOW_CTRL, 1);
  5507. }
  5508. port_set_link_speed(port);
  5509. } else {
  5510. hw->overrides |= PAUSE_FLOW_CTRL;
  5511. if (hw->ksz_switch) {
  5512. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5513. SWITCH_RX_FLOW_CTRL, pause->rx_pause);
  5514. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5515. SWITCH_TX_FLOW_CTRL, pause->tx_pause);
  5516. } else
  5517. set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
  5518. }
  5519. mutex_unlock(&hw_priv->lock);
  5520. return 0;
  5521. }
  5522. /**
  5523. * netdev_get_ringparam - get tx/rx ring parameters
  5524. * @dev: Network device.
  5525. * @pause: Ethtool RING settings data structure.
  5526. *
  5527. * This procedure returns the TX/RX ring settings.
  5528. */
  5529. static void netdev_get_ringparam(struct net_device *dev,
  5530. struct ethtool_ringparam *ring)
  5531. {
  5532. struct dev_priv *priv = netdev_priv(dev);
  5533. struct dev_info *hw_priv = priv->adapter;
  5534. struct ksz_hw *hw = &hw_priv->hw;
  5535. ring->tx_max_pending = (1 << 9);
  5536. ring->tx_pending = hw->tx_desc_info.alloc;
  5537. ring->rx_max_pending = (1 << 9);
  5538. ring->rx_pending = hw->rx_desc_info.alloc;
  5539. }
  5540. #define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
  5541. static struct {
  5542. char string[ETH_GSTRING_LEN];
  5543. } ethtool_stats_keys[STATS_LEN] = {
  5544. { "rx_lo_priority_octets" },
  5545. { "rx_hi_priority_octets" },
  5546. { "rx_undersize_packets" },
  5547. { "rx_fragments" },
  5548. { "rx_oversize_packets" },
  5549. { "rx_jabbers" },
  5550. { "rx_symbol_errors" },
  5551. { "rx_crc_errors" },
  5552. { "rx_align_errors" },
  5553. { "rx_mac_ctrl_packets" },
  5554. { "rx_pause_packets" },
  5555. { "rx_bcast_packets" },
  5556. { "rx_mcast_packets" },
  5557. { "rx_ucast_packets" },
  5558. { "rx_64_or_less_octet_packets" },
  5559. { "rx_65_to_127_octet_packets" },
  5560. { "rx_128_to_255_octet_packets" },
  5561. { "rx_256_to_511_octet_packets" },
  5562. { "rx_512_to_1023_octet_packets" },
  5563. { "rx_1024_to_1522_octet_packets" },
  5564. { "tx_lo_priority_octets" },
  5565. { "tx_hi_priority_octets" },
  5566. { "tx_late_collisions" },
  5567. { "tx_pause_packets" },
  5568. { "tx_bcast_packets" },
  5569. { "tx_mcast_packets" },
  5570. { "tx_ucast_packets" },
  5571. { "tx_deferred" },
  5572. { "tx_total_collisions" },
  5573. { "tx_excessive_collisions" },
  5574. { "tx_single_collisions" },
  5575. { "tx_mult_collisions" },
  5576. { "rx_discards" },
  5577. { "tx_discards" },
  5578. };
  5579. /**
  5580. * netdev_get_strings - get statistics identity strings
  5581. * @dev: Network device.
  5582. * @stringset: String set identifier.
  5583. * @buf: Buffer to store the strings.
  5584. *
  5585. * This procedure returns the strings used to identify the statistics.
  5586. */
  5587. static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  5588. {
  5589. struct dev_priv *priv = netdev_priv(dev);
  5590. struct dev_info *hw_priv = priv->adapter;
  5591. struct ksz_hw *hw = &hw_priv->hw;
  5592. if (ETH_SS_STATS == stringset)
  5593. memcpy(buf, &ethtool_stats_keys,
  5594. ETH_GSTRING_LEN * hw->mib_cnt);
  5595. }
  5596. /**
  5597. * netdev_get_sset_count - get statistics size
  5598. * @dev: Network device.
  5599. * @sset: The statistics set number.
  5600. *
  5601. * This function returns the size of the statistics to be reported.
  5602. *
  5603. * Return size of the statistics to be reported.
  5604. */
  5605. static int netdev_get_sset_count(struct net_device *dev, int sset)
  5606. {
  5607. struct dev_priv *priv = netdev_priv(dev);
  5608. struct dev_info *hw_priv = priv->adapter;
  5609. struct ksz_hw *hw = &hw_priv->hw;
  5610. switch (sset) {
  5611. case ETH_SS_STATS:
  5612. return hw->mib_cnt;
  5613. default:
  5614. return -EOPNOTSUPP;
  5615. }
  5616. }
  5617. /**
  5618. * netdev_get_ethtool_stats - get network device statistics
  5619. * @dev: Network device.
  5620. * @stats: Ethtool statistics data structure.
  5621. * @data: Buffer to store the statistics.
  5622. *
  5623. * This procedure returns the statistics.
  5624. */
  5625. static void netdev_get_ethtool_stats(struct net_device *dev,
  5626. struct ethtool_stats *stats, u64 *data)
  5627. {
  5628. struct dev_priv *priv = netdev_priv(dev);
  5629. struct dev_info *hw_priv = priv->adapter;
  5630. struct ksz_hw *hw = &hw_priv->hw;
  5631. struct ksz_port *port = &priv->port;
  5632. int n_stats = stats->n_stats;
  5633. int i;
  5634. int n;
  5635. int p;
  5636. int rc;
  5637. u64 counter[TOTAL_PORT_COUNTER_NUM];
  5638. mutex_lock(&hw_priv->lock);
  5639. n = SWITCH_PORT_NUM;
  5640. for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
  5641. if (media_connected == hw->port_mib[p].state) {
  5642. hw_priv->counter[p].read = 1;
  5643. /* Remember first port that requests read. */
  5644. if (n == SWITCH_PORT_NUM)
  5645. n = p;
  5646. }
  5647. }
  5648. mutex_unlock(&hw_priv->lock);
  5649. if (n < SWITCH_PORT_NUM)
  5650. schedule_work(&hw_priv->mib_read);
  5651. if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
  5652. p = n;
  5653. rc = wait_event_interruptible_timeout(
  5654. hw_priv->counter[p].counter,
  5655. 2 == hw_priv->counter[p].read,
  5656. HZ * 1);
  5657. } else
  5658. for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
  5659. if (0 == i) {
  5660. rc = wait_event_interruptible_timeout(
  5661. hw_priv->counter[p].counter,
  5662. 2 == hw_priv->counter[p].read,
  5663. HZ * 2);
  5664. } else if (hw->port_mib[p].cnt_ptr) {
  5665. rc = wait_event_interruptible_timeout(
  5666. hw_priv->counter[p].counter,
  5667. 2 == hw_priv->counter[p].read,
  5668. HZ * 1);
  5669. }
  5670. }
  5671. get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
  5672. n = hw->mib_cnt;
  5673. if (n > n_stats)
  5674. n = n_stats;
  5675. n_stats -= n;
  5676. for (i = 0; i < n; i++)
  5677. *data++ = counter[i];
  5678. }
  5679. /**
  5680. * netdev_set_features - set receive checksum support
  5681. * @dev: Network device.
  5682. * @features: New device features (offloads).
  5683. *
  5684. * This function sets receive checksum support setting.
  5685. *
  5686. * Return 0 if successful; otherwise an error code.
  5687. */
  5688. static int netdev_set_features(struct net_device *dev,
  5689. netdev_features_t features)
  5690. {
  5691. struct dev_priv *priv = netdev_priv(dev);
  5692. struct dev_info *hw_priv = priv->adapter;
  5693. struct ksz_hw *hw = &hw_priv->hw;
  5694. mutex_lock(&hw_priv->lock);
  5695. /* see note in hw_setup() */
  5696. if (features & NETIF_F_RXCSUM)
  5697. hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
  5698. else
  5699. hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
  5700. if (hw->enabled)
  5701. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  5702. mutex_unlock(&hw_priv->lock);
  5703. return 0;
  5704. }
  5705. static const struct ethtool_ops netdev_ethtool_ops = {
  5706. .nway_reset = netdev_nway_reset,
  5707. .get_link = netdev_get_link,
  5708. .get_drvinfo = netdev_get_drvinfo,
  5709. .get_regs_len = netdev_get_regs_len,
  5710. .get_regs = netdev_get_regs,
  5711. .get_wol = netdev_get_wol,
  5712. .set_wol = netdev_set_wol,
  5713. .get_msglevel = netdev_get_msglevel,
  5714. .set_msglevel = netdev_set_msglevel,
  5715. .get_eeprom_len = netdev_get_eeprom_len,
  5716. .get_eeprom = netdev_get_eeprom,
  5717. .set_eeprom = netdev_set_eeprom,
  5718. .get_pauseparam = netdev_get_pauseparam,
  5719. .set_pauseparam = netdev_set_pauseparam,
  5720. .get_ringparam = netdev_get_ringparam,
  5721. .get_strings = netdev_get_strings,
  5722. .get_sset_count = netdev_get_sset_count,
  5723. .get_ethtool_stats = netdev_get_ethtool_stats,
  5724. .get_link_ksettings = netdev_get_link_ksettings,
  5725. .set_link_ksettings = netdev_set_link_ksettings,
  5726. };
  5727. /*
  5728. * Hardware monitoring
  5729. */
  5730. static void update_link(struct net_device *dev, struct dev_priv *priv,
  5731. struct ksz_port *port)
  5732. {
  5733. if (priv->media_state != port->linked->state) {
  5734. priv->media_state = port->linked->state;
  5735. if (netif_running(dev))
  5736. set_media_state(dev, media_connected);
  5737. }
  5738. }
  5739. static void mib_read_work(struct work_struct *work)
  5740. {
  5741. struct dev_info *hw_priv =
  5742. container_of(work, struct dev_info, mib_read);
  5743. struct ksz_hw *hw = &hw_priv->hw;
  5744. struct ksz_port_mib *mib;
  5745. int i;
  5746. next_jiffies = jiffies;
  5747. for (i = 0; i < hw->mib_port_cnt; i++) {
  5748. mib = &hw->port_mib[i];
  5749. /* Reading MIB counters or requested to read. */
  5750. if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
  5751. /* Need to process receive interrupt. */
  5752. if (port_r_cnt(hw, i))
  5753. break;
  5754. hw_priv->counter[i].read = 0;
  5755. /* Finish reading counters. */
  5756. if (0 == mib->cnt_ptr) {
  5757. hw_priv->counter[i].read = 2;
  5758. wake_up_interruptible(
  5759. &hw_priv->counter[i].counter);
  5760. }
  5761. } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
  5762. /* Only read MIB counters when the port is connected. */
  5763. if (media_connected == mib->state)
  5764. hw_priv->counter[i].read = 1;
  5765. next_jiffies += HZ * 1 * hw->mib_port_cnt;
  5766. hw_priv->counter[i].time = next_jiffies;
  5767. /* Port is just disconnected. */
  5768. } else if (mib->link_down) {
  5769. mib->link_down = 0;
  5770. /* Read counters one last time after link is lost. */
  5771. hw_priv->counter[i].read = 1;
  5772. }
  5773. }
  5774. }
  5775. static void mib_monitor(struct timer_list *t)
  5776. {
  5777. struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
  5778. mib_read_work(&hw_priv->mib_read);
  5779. /* This is used to verify Wake-on-LAN is working. */
  5780. if (hw_priv->pme_wait) {
  5781. if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
  5782. hw_clr_wol_pme_status(&hw_priv->hw);
  5783. hw_priv->pme_wait = 0;
  5784. }
  5785. } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
  5786. /* PME is asserted. Wait 2 seconds to clear it. */
  5787. hw_priv->pme_wait = jiffies + HZ * 2;
  5788. }
  5789. ksz_update_timer(&hw_priv->mib_timer_info);
  5790. }
  5791. /**
  5792. * dev_monitor - periodic monitoring
  5793. * @ptr: Network device pointer.
  5794. *
  5795. * This routine is run in a kernel timer to monitor the network device.
  5796. */
  5797. static void dev_monitor(struct timer_list *t)
  5798. {
  5799. struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
  5800. struct net_device *dev = priv->mii_if.dev;
  5801. struct dev_info *hw_priv = priv->adapter;
  5802. struct ksz_hw *hw = &hw_priv->hw;
  5803. struct ksz_port *port = &priv->port;
  5804. if (!(hw->features & LINK_INT_WORKING))
  5805. port_get_link_speed(port);
  5806. update_link(dev, priv, port);
  5807. ksz_update_timer(&priv->monitor_timer_info);
  5808. }
  5809. /*
  5810. * Linux network device interface functions
  5811. */
  5812. /* Driver exported variables */
  5813. static int msg_enable;
  5814. static char *macaddr = ":";
  5815. static char *mac1addr = ":";
  5816. /*
  5817. * This enables multiple network device mode for KSZ8842, which contains a
  5818. * switch with two physical ports. Some users like to take control of the
  5819. * ports for running Spanning Tree Protocol. The driver will create an
  5820. * additional eth? device for the other port.
  5821. *
  5822. * Some limitations are the network devices cannot have different MTU and
  5823. * multicast hash tables.
  5824. */
  5825. static int multi_dev;
  5826. /*
  5827. * As most users select multiple network device mode to use Spanning Tree
  5828. * Protocol, this enables a feature in which most unicast and multicast packets
  5829. * are forwarded inside the switch and not passed to the host. Only packets
  5830. * that need the host's attention are passed to it. This prevents the host
  5831. * wasting CPU time to examine each and every incoming packets and do the
  5832. * forwarding itself.
  5833. *
  5834. * As the hack requires the private bridge header, the driver cannot compile
  5835. * with just the kernel headers.
  5836. *
  5837. * Enabling STP support also turns on multiple network device mode.
  5838. */
  5839. static int stp;
  5840. /*
  5841. * This enables fast aging in the KSZ8842 switch. Not sure what situation
  5842. * needs that. However, fast aging is used to flush the dynamic MAC table when
  5843. * STP support is enabled.
  5844. */
  5845. static int fast_aging;
  5846. /**
  5847. * netdev_init - initialize network device.
  5848. * @dev: Network device.
  5849. *
  5850. * This function initializes the network device.
  5851. *
  5852. * Return 0 if successful; otherwise an error code indicating failure.
  5853. */
  5854. static int __init netdev_init(struct net_device *dev)
  5855. {
  5856. struct dev_priv *priv = netdev_priv(dev);
  5857. /* 500 ms timeout */
  5858. ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
  5859. dev_monitor);
  5860. /* 500 ms timeout */
  5861. dev->watchdog_timeo = HZ / 2;
  5862. dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
  5863. /*
  5864. * Hardware does not really support IPv6 checksum generation, but
  5865. * driver actually runs faster with this on.
  5866. */
  5867. dev->hw_features |= NETIF_F_IPV6_CSUM;
  5868. dev->features |= dev->hw_features;
  5869. sema_init(&priv->proc_sem, 1);
  5870. priv->mii_if.phy_id_mask = 0x1;
  5871. priv->mii_if.reg_num_mask = 0x7;
  5872. priv->mii_if.dev = dev;
  5873. priv->mii_if.mdio_read = mdio_read;
  5874. priv->mii_if.mdio_write = mdio_write;
  5875. priv->mii_if.phy_id = priv->port.first_port + 1;
  5876. priv->msg_enable = netif_msg_init(msg_enable,
  5877. (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
  5878. return 0;
  5879. }
  5880. static const struct net_device_ops netdev_ops = {
  5881. .ndo_init = netdev_init,
  5882. .ndo_open = netdev_open,
  5883. .ndo_stop = netdev_close,
  5884. .ndo_get_stats = netdev_query_statistics,
  5885. .ndo_start_xmit = netdev_tx,
  5886. .ndo_tx_timeout = netdev_tx_timeout,
  5887. .ndo_change_mtu = netdev_change_mtu,
  5888. .ndo_set_features = netdev_set_features,
  5889. .ndo_set_mac_address = netdev_set_mac_address,
  5890. .ndo_validate_addr = eth_validate_addr,
  5891. .ndo_do_ioctl = netdev_ioctl,
  5892. .ndo_set_rx_mode = netdev_set_rx_mode,
  5893. #ifdef CONFIG_NET_POLL_CONTROLLER
  5894. .ndo_poll_controller = netdev_netpoll,
  5895. #endif
  5896. };
  5897. static void netdev_free(struct net_device *dev)
  5898. {
  5899. if (dev->watchdog_timeo)
  5900. unregister_netdev(dev);
  5901. free_netdev(dev);
  5902. }
  5903. struct platform_info {
  5904. struct dev_info dev_info;
  5905. struct net_device *netdev[SWITCH_PORT_NUM];
  5906. };
  5907. static int net_device_present;
  5908. static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
  5909. {
  5910. int i;
  5911. int j;
  5912. int got_num;
  5913. int num;
  5914. i = j = num = got_num = 0;
  5915. while (j < ETH_ALEN) {
  5916. if (macaddr[i]) {
  5917. int digit;
  5918. got_num = 1;
  5919. digit = hex_to_bin(macaddr[i]);
  5920. if (digit >= 0)
  5921. num = num * 16 + digit;
  5922. else if (':' == macaddr[i])
  5923. got_num = 2;
  5924. else
  5925. break;
  5926. } else if (got_num)
  5927. got_num = 2;
  5928. else
  5929. break;
  5930. if (2 == got_num) {
  5931. if (MAIN_PORT == port) {
  5932. hw_priv->hw.override_addr[j++] = (u8) num;
  5933. hw_priv->hw.override_addr[5] +=
  5934. hw_priv->hw.id;
  5935. } else {
  5936. hw_priv->hw.ksz_switch->other_addr[j++] =
  5937. (u8) num;
  5938. hw_priv->hw.ksz_switch->other_addr[5] +=
  5939. hw_priv->hw.id;
  5940. }
  5941. num = got_num = 0;
  5942. }
  5943. i++;
  5944. }
  5945. if (ETH_ALEN == j) {
  5946. if (MAIN_PORT == port)
  5947. hw_priv->hw.mac_override = 1;
  5948. }
  5949. }
  5950. #define KS884X_DMA_MASK (~0x0UL)
  5951. static void read_other_addr(struct ksz_hw *hw)
  5952. {
  5953. int i;
  5954. u16 data[3];
  5955. struct ksz_switch *sw = hw->ksz_switch;
  5956. for (i = 0; i < 3; i++)
  5957. data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
  5958. if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
  5959. sw->other_addr[5] = (u8) data[0];
  5960. sw->other_addr[4] = (u8)(data[0] >> 8);
  5961. sw->other_addr[3] = (u8) data[1];
  5962. sw->other_addr[2] = (u8)(data[1] >> 8);
  5963. sw->other_addr[1] = (u8) data[2];
  5964. sw->other_addr[0] = (u8)(data[2] >> 8);
  5965. }
  5966. }
  5967. #ifndef PCI_VENDOR_ID_MICREL_KS
  5968. #define PCI_VENDOR_ID_MICREL_KS 0x16c6
  5969. #endif
  5970. static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
  5971. {
  5972. struct net_device *dev;
  5973. struct dev_priv *priv;
  5974. struct dev_info *hw_priv;
  5975. struct ksz_hw *hw;
  5976. struct platform_info *info;
  5977. struct ksz_port *port;
  5978. unsigned long reg_base;
  5979. unsigned long reg_len;
  5980. int cnt;
  5981. int i;
  5982. int mib_port_count;
  5983. int pi;
  5984. int port_count;
  5985. int result;
  5986. char banner[sizeof(version)];
  5987. struct ksz_switch *sw = NULL;
  5988. result = pci_enable_device(pdev);
  5989. if (result)
  5990. return result;
  5991. result = -ENODEV;
  5992. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
  5993. pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
  5994. return result;
  5995. reg_base = pci_resource_start(pdev, 0);
  5996. reg_len = pci_resource_len(pdev, 0);
  5997. if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
  5998. return result;
  5999. if (!request_mem_region(reg_base, reg_len, DRV_NAME))
  6000. return result;
  6001. pci_set_master(pdev);
  6002. result = -ENOMEM;
  6003. info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
  6004. if (!info)
  6005. goto pcidev_init_dev_err;
  6006. hw_priv = &info->dev_info;
  6007. hw_priv->pdev = pdev;
  6008. hw = &hw_priv->hw;
  6009. hw->io = ioremap(reg_base, reg_len);
  6010. if (!hw->io)
  6011. goto pcidev_init_io_err;
  6012. cnt = hw_init(hw);
  6013. if (!cnt) {
  6014. if (msg_enable & NETIF_MSG_PROBE)
  6015. pr_alert("chip not detected\n");
  6016. result = -ENODEV;
  6017. goto pcidev_init_alloc_err;
  6018. }
  6019. snprintf(banner, sizeof(banner), "%s", version);
  6020. banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
  6021. dev_info(&hw_priv->pdev->dev, "%s\n", banner);
  6022. dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
  6023. /* Assume device is KSZ8841. */
  6024. hw->dev_count = 1;
  6025. port_count = 1;
  6026. mib_port_count = 1;
  6027. hw->addr_list_size = 0;
  6028. hw->mib_cnt = PORT_COUNTER_NUM;
  6029. hw->mib_port_cnt = 1;
  6030. /* KSZ8842 has a switch with multiple ports. */
  6031. if (2 == cnt) {
  6032. if (fast_aging)
  6033. hw->overrides |= FAST_AGING;
  6034. hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
  6035. /* Multiple network device interfaces are required. */
  6036. if (multi_dev) {
  6037. hw->dev_count = SWITCH_PORT_NUM;
  6038. hw->addr_list_size = SWITCH_PORT_NUM - 1;
  6039. }
  6040. /* Single network device has multiple ports. */
  6041. if (1 == hw->dev_count) {
  6042. port_count = SWITCH_PORT_NUM;
  6043. mib_port_count = SWITCH_PORT_NUM;
  6044. }
  6045. hw->mib_port_cnt = TOTAL_PORT_NUM;
  6046. hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
  6047. if (!hw->ksz_switch)
  6048. goto pcidev_init_alloc_err;
  6049. sw = hw->ksz_switch;
  6050. }
  6051. for (i = 0; i < hw->mib_port_cnt; i++)
  6052. hw->port_mib[i].mib_start = 0;
  6053. hw->parent = hw_priv;
  6054. /* Default MTU is 1500. */
  6055. hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
  6056. if (ksz_alloc_mem(hw_priv))
  6057. goto pcidev_init_mem_err;
  6058. hw_priv->hw.id = net_device_present;
  6059. spin_lock_init(&hw_priv->hwlock);
  6060. mutex_init(&hw_priv->lock);
  6061. for (i = 0; i < TOTAL_PORT_NUM; i++)
  6062. init_waitqueue_head(&hw_priv->counter[i].counter);
  6063. if (macaddr[0] != ':')
  6064. get_mac_addr(hw_priv, macaddr, MAIN_PORT);
  6065. /* Read MAC address and initialize override address if not overridden. */
  6066. hw_read_addr(hw);
  6067. /* Multiple device interfaces mode requires a second MAC address. */
  6068. if (hw->dev_count > 1) {
  6069. memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
  6070. read_other_addr(hw);
  6071. if (mac1addr[0] != ':')
  6072. get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
  6073. }
  6074. hw_setup(hw);
  6075. if (hw->ksz_switch)
  6076. sw_setup(hw);
  6077. else {
  6078. hw_priv->wol_support = WOL_SUPPORT;
  6079. hw_priv->wol_enable = 0;
  6080. }
  6081. INIT_WORK(&hw_priv->mib_read, mib_read_work);
  6082. /* 500 ms timeout */
  6083. ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
  6084. mib_monitor);
  6085. for (i = 0; i < hw->dev_count; i++) {
  6086. dev = alloc_etherdev(sizeof(struct dev_priv));
  6087. if (!dev)
  6088. goto pcidev_init_reg_err;
  6089. SET_NETDEV_DEV(dev, &pdev->dev);
  6090. info->netdev[i] = dev;
  6091. priv = netdev_priv(dev);
  6092. priv->adapter = hw_priv;
  6093. priv->id = net_device_present++;
  6094. port = &priv->port;
  6095. port->port_cnt = port_count;
  6096. port->mib_port_cnt = mib_port_count;
  6097. port->first_port = i;
  6098. port->flow_ctrl = PHY_FLOW_CTRL;
  6099. port->hw = hw;
  6100. port->linked = &hw->port_info[port->first_port];
  6101. for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
  6102. hw->port_info[pi].port_id = pi;
  6103. hw->port_info[pi].pdev = dev;
  6104. hw->port_info[pi].state = media_disconnected;
  6105. }
  6106. dev->mem_start = (unsigned long) hw->io;
  6107. dev->mem_end = dev->mem_start + reg_len - 1;
  6108. dev->irq = pdev->irq;
  6109. if (MAIN_PORT == i)
  6110. memcpy(dev->dev_addr, hw_priv->hw.override_addr,
  6111. ETH_ALEN);
  6112. else {
  6113. memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
  6114. if (ether_addr_equal(sw->other_addr, hw->override_addr))
  6115. dev->dev_addr[5] += port->first_port;
  6116. }
  6117. dev->netdev_ops = &netdev_ops;
  6118. dev->ethtool_ops = &netdev_ethtool_ops;
  6119. /* MTU range: 60 - 1894 */
  6120. dev->min_mtu = ETH_ZLEN;
  6121. dev->max_mtu = MAX_RX_BUF_SIZE -
  6122. (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
  6123. if (register_netdev(dev))
  6124. goto pcidev_init_reg_err;
  6125. port_set_power_saving(port, true);
  6126. }
  6127. pci_dev_get(hw_priv->pdev);
  6128. pci_set_drvdata(pdev, info);
  6129. return 0;
  6130. pcidev_init_reg_err:
  6131. for (i = 0; i < hw->dev_count; i++) {
  6132. if (info->netdev[i]) {
  6133. netdev_free(info->netdev[i]);
  6134. info->netdev[i] = NULL;
  6135. }
  6136. }
  6137. pcidev_init_mem_err:
  6138. ksz_free_mem(hw_priv);
  6139. kfree(hw->ksz_switch);
  6140. pcidev_init_alloc_err:
  6141. iounmap(hw->io);
  6142. pcidev_init_io_err:
  6143. kfree(info);
  6144. pcidev_init_dev_err:
  6145. release_mem_region(reg_base, reg_len);
  6146. return result;
  6147. }
  6148. static void pcidev_exit(struct pci_dev *pdev)
  6149. {
  6150. int i;
  6151. struct platform_info *info = pci_get_drvdata(pdev);
  6152. struct dev_info *hw_priv = &info->dev_info;
  6153. release_mem_region(pci_resource_start(pdev, 0),
  6154. pci_resource_len(pdev, 0));
  6155. for (i = 0; i < hw_priv->hw.dev_count; i++) {
  6156. if (info->netdev[i])
  6157. netdev_free(info->netdev[i]);
  6158. }
  6159. if (hw_priv->hw.io)
  6160. iounmap(hw_priv->hw.io);
  6161. ksz_free_mem(hw_priv);
  6162. kfree(hw_priv->hw.ksz_switch);
  6163. pci_dev_put(hw_priv->pdev);
  6164. kfree(info);
  6165. }
  6166. #ifdef CONFIG_PM
  6167. static int pcidev_resume(struct pci_dev *pdev)
  6168. {
  6169. int i;
  6170. struct platform_info *info = pci_get_drvdata(pdev);
  6171. struct dev_info *hw_priv = &info->dev_info;
  6172. struct ksz_hw *hw = &hw_priv->hw;
  6173. pci_set_power_state(pdev, PCI_D0);
  6174. pci_restore_state(pdev);
  6175. pci_enable_wake(pdev, PCI_D0, 0);
  6176. if (hw_priv->wol_enable)
  6177. hw_cfg_wol_pme(hw, 0);
  6178. for (i = 0; i < hw->dev_count; i++) {
  6179. if (info->netdev[i]) {
  6180. struct net_device *dev = info->netdev[i];
  6181. if (netif_running(dev)) {
  6182. netdev_open(dev);
  6183. netif_device_attach(dev);
  6184. }
  6185. }
  6186. }
  6187. return 0;
  6188. }
  6189. static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
  6190. {
  6191. int i;
  6192. struct platform_info *info = pci_get_drvdata(pdev);
  6193. struct dev_info *hw_priv = &info->dev_info;
  6194. struct ksz_hw *hw = &hw_priv->hw;
  6195. /* Need to find a way to retrieve the device IP address. */
  6196. static const u8 net_addr[] = { 192, 168, 1, 1 };
  6197. for (i = 0; i < hw->dev_count; i++) {
  6198. if (info->netdev[i]) {
  6199. struct net_device *dev = info->netdev[i];
  6200. if (netif_running(dev)) {
  6201. netif_device_detach(dev);
  6202. netdev_close(dev);
  6203. }
  6204. }
  6205. }
  6206. if (hw_priv->wol_enable) {
  6207. hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
  6208. hw_cfg_wol_pme(hw, 1);
  6209. }
  6210. pci_save_state(pdev);
  6211. pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
  6212. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  6213. return 0;
  6214. }
  6215. #endif
  6216. static char pcidev_name[] = "ksz884xp";
  6217. static const struct pci_device_id pcidev_table[] = {
  6218. { PCI_VENDOR_ID_MICREL_KS, 0x8841,
  6219. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  6220. { PCI_VENDOR_ID_MICREL_KS, 0x8842,
  6221. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  6222. { 0 }
  6223. };
  6224. MODULE_DEVICE_TABLE(pci, pcidev_table);
  6225. static struct pci_driver pci_device_driver = {
  6226. #ifdef CONFIG_PM
  6227. .suspend = pcidev_suspend,
  6228. .resume = pcidev_resume,
  6229. #endif
  6230. .name = pcidev_name,
  6231. .id_table = pcidev_table,
  6232. .probe = pcidev_init,
  6233. .remove = pcidev_exit
  6234. };
  6235. module_pci_driver(pci_device_driver);
  6236. MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
  6237. MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
  6238. MODULE_LICENSE("GPL");
  6239. module_param_named(message, msg_enable, int, 0);
  6240. MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
  6241. module_param(macaddr, charp, 0);
  6242. module_param(mac1addr, charp, 0);
  6243. module_param(fast_aging, int, 0);
  6244. module_param(multi_dev, int, 0);
  6245. module_param(stp, int, 0);
  6246. MODULE_PARM_DESC(macaddr, "MAC address");
  6247. MODULE_PARM_DESC(mac1addr, "Second MAC address");
  6248. MODULE_PARM_DESC(fast_aging, "Fast aging");
  6249. MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
  6250. MODULE_PARM_DESC(stp, "STP support");