mvpp2.c 179 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517
  1. /*
  2. * Driver for Marvell PPv2 network controller for Armada 375 SoC.
  3. *
  4. * Copyright (C) 2014 Marvell
  5. *
  6. * Marcin Wojtas <mw@semihalf.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public
  9. * License version 2. This program is licensed "as is" without any
  10. * warranty of any kind, whether express or implied.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/inetdevice.h>
  18. #include <linux/mbus.h>
  19. #include <linux/module.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/cpumask.h>
  22. #include <linux/of.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/of_mdio.h>
  25. #include <linux/of_net.h>
  26. #include <linux/of_address.h>
  27. #include <linux/phy.h>
  28. #include <linux/clk.h>
  29. #include <linux/hrtimer.h>
  30. #include <linux/ktime.h>
  31. #include <uapi/linux/ppp_defs.h>
  32. #include <net/ip.h>
  33. #include <net/ipv6.h>
  34. /* RX Fifo Registers */
  35. #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
  36. #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
  37. #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
  38. #define MVPP2_RX_FIFO_INIT_REG 0x64
  39. /* RX DMA Top Registers */
  40. #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
  41. #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
  42. #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
  43. #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
  44. #define MVPP2_POOL_BUF_SIZE_OFFSET 5
  45. #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
  46. #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
  47. #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
  48. #define MVPP2_RXQ_POOL_SHORT_OFFS 20
  49. #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
  50. #define MVPP2_RXQ_POOL_LONG_OFFS 24
  51. #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
  52. #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
  53. #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
  54. #define MVPP2_RXQ_DISABLE_MASK BIT(31)
  55. /* Parser Registers */
  56. #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
  57. #define MVPP2_PRS_PORT_LU_MAX 0xf
  58. #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
  59. #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
  60. #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
  61. #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
  62. #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
  63. #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
  64. #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
  65. #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
  66. #define MVPP2_PRS_TCAM_IDX_REG 0x1100
  67. #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
  68. #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
  69. #define MVPP2_PRS_SRAM_IDX_REG 0x1200
  70. #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
  71. #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
  72. #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
  73. /* Classifier Registers */
  74. #define MVPP2_CLS_MODE_REG 0x1800
  75. #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
  76. #define MVPP2_CLS_PORT_WAY_REG 0x1810
  77. #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
  78. #define MVPP2_CLS_LKP_INDEX_REG 0x1814
  79. #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
  80. #define MVPP2_CLS_LKP_TBL_REG 0x1818
  81. #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
  82. #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
  83. #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
  84. #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
  85. #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
  86. #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
  87. #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
  88. #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
  89. #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
  90. #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
  91. #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
  92. #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
  93. /* Descriptor Manager Top Registers */
  94. #define MVPP2_RXQ_NUM_REG 0x2040
  95. #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
  96. #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
  97. #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
  98. #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
  99. #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
  100. #define MVPP2_RXQ_NUM_NEW_OFFSET 16
  101. #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
  102. #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
  103. #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
  104. #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
  105. #define MVPP2_RXQ_THRESH_REG 0x204c
  106. #define MVPP2_OCCUPIED_THRESH_OFFSET 0
  107. #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
  108. #define MVPP2_RXQ_INDEX_REG 0x2050
  109. #define MVPP2_TXQ_NUM_REG 0x2080
  110. #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
  111. #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
  112. #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
  113. #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
  114. #define MVPP2_TXQ_THRESH_REG 0x2094
  115. #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
  116. #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
  117. #define MVPP2_TXQ_INDEX_REG 0x2098
  118. #define MVPP2_TXQ_PREF_BUF_REG 0x209c
  119. #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
  120. #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
  121. #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
  122. #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
  123. #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
  124. #define MVPP2_TXQ_PENDING_REG 0x20a0
  125. #define MVPP2_TXQ_PENDING_MASK 0x3fff
  126. #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
  127. #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
  128. #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
  129. #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
  130. #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
  131. #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
  132. #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
  133. #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
  134. #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
  135. #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
  136. #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
  137. #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
  138. #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
  139. #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
  140. #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
  141. #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
  142. /* MBUS bridge registers */
  143. #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
  144. #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
  145. #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
  146. #define MVPP2_BASE_ADDR_ENABLE 0x4060
  147. /* Interrupt Cause and Mask registers */
  148. #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
  149. #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
  150. #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
  151. #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
  152. #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
  153. #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
  154. #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
  155. #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
  156. #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
  157. #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
  158. #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
  159. #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
  160. #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
  161. #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
  162. #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
  163. #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
  164. #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
  165. #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
  166. #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
  167. #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
  168. /* Buffer Manager registers */
  169. #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
  170. #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
  171. #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
  172. #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
  173. #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
  174. #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
  175. #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
  176. #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
  177. #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
  178. #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
  179. #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
  180. #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
  181. #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
  182. #define MVPP2_BM_START_MASK BIT(0)
  183. #define MVPP2_BM_STOP_MASK BIT(1)
  184. #define MVPP2_BM_STATE_MASK BIT(4)
  185. #define MVPP2_BM_LOW_THRESH_OFFS 8
  186. #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
  187. #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
  188. MVPP2_BM_LOW_THRESH_OFFS)
  189. #define MVPP2_BM_HIGH_THRESH_OFFS 16
  190. #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
  191. #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
  192. MVPP2_BM_HIGH_THRESH_OFFS)
  193. #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
  194. #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
  195. #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
  196. #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
  197. #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
  198. #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
  199. #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
  200. #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
  201. #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
  202. #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
  203. #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
  204. #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
  205. #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
  206. #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
  207. #define MVPP2_BM_VIRT_RLS_REG 0x64c0
  208. #define MVPP2_BM_MC_RLS_REG 0x64c4
  209. #define MVPP2_BM_MC_ID_MASK 0xfff
  210. #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
  211. /* TX Scheduler registers */
  212. #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
  213. #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
  214. #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
  215. #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
  216. #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
  217. #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
  218. #define MVPP2_TXP_SCHED_MTU_REG 0x801c
  219. #define MVPP2_TXP_MTU_MAX 0x7FFFF
  220. #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
  221. #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
  222. #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
  223. #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
  224. #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
  225. #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
  226. #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
  227. #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
  228. #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
  229. #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
  230. #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
  231. #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
  232. #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
  233. #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
  234. /* TX general registers */
  235. #define MVPP2_TX_SNOOP_REG 0x8800
  236. #define MVPP2_TX_PORT_FLUSH_REG 0x8810
  237. #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
  238. /* LMS registers */
  239. #define MVPP2_SRC_ADDR_MIDDLE 0x24
  240. #define MVPP2_SRC_ADDR_HIGH 0x28
  241. #define MVPP2_PHY_AN_CFG0_REG 0x34
  242. #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
  243. #define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
  244. 0x400 + (port) * 0x400)
  245. #define MVPP2_MIB_LATE_COLLISION 0x7c
  246. #define MVPP2_ISR_SUM_MASK_REG 0x220c
  247. #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
  248. #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
  249. /* Per-port registers */
  250. #define MVPP2_GMAC_CTRL_0_REG 0x0
  251. #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
  252. #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
  253. #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
  254. #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
  255. #define MVPP2_GMAC_CTRL_1_REG 0x4
  256. #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
  257. #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
  258. #define MVPP2_GMAC_PCS_LB_EN_BIT 6
  259. #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
  260. #define MVPP2_GMAC_SA_LOW_OFFS 7
  261. #define MVPP2_GMAC_CTRL_2_REG 0x8
  262. #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
  263. #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
  264. #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
  265. #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
  266. #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
  267. #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
  268. #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
  269. #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
  270. #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
  271. #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
  272. #define MVPP2_GMAC_FC_ADV_EN BIT(9)
  273. #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
  274. #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
  275. #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
  276. #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
  277. #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
  278. #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
  279. MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
  280. #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
  281. /* Descriptor ring Macros */
  282. #define MVPP2_QUEUE_NEXT_DESC(q, index) \
  283. (((index) < (q)->last_desc) ? ((index) + 1) : 0)
  284. /* Various constants */
  285. /* Coalescing */
  286. #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
  287. #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
  288. #define MVPP2_RX_COAL_PKTS 32
  289. #define MVPP2_RX_COAL_USEC 100
  290. /* The two bytes Marvell header. Either contains a special value used
  291. * by Marvell switches when a specific hardware mode is enabled (not
  292. * supported by this driver) or is filled automatically by zeroes on
  293. * the RX side. Those two bytes being at the front of the Ethernet
  294. * header, they allow to have the IP header aligned on a 4 bytes
  295. * boundary automatically: the hardware skips those two bytes on its
  296. * own.
  297. */
  298. #define MVPP2_MH_SIZE 2
  299. #define MVPP2_ETH_TYPE_LEN 2
  300. #define MVPP2_PPPOE_HDR_SIZE 8
  301. #define MVPP2_VLAN_TAG_LEN 4
  302. /* Lbtd 802.3 type */
  303. #define MVPP2_IP_LBDT_TYPE 0xfffa
  304. #define MVPP2_TX_CSUM_MAX_SIZE 9800
  305. /* Timeout constants */
  306. #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
  307. #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
  308. #define MVPP2_TX_MTU_MAX 0x7ffff
  309. /* Maximum number of T-CONTs of PON port */
  310. #define MVPP2_MAX_TCONT 16
  311. /* Maximum number of supported ports */
  312. #define MVPP2_MAX_PORTS 4
  313. /* Maximum number of TXQs used by single port */
  314. #define MVPP2_MAX_TXQ 8
  315. /* Maximum number of RXQs used by single port */
  316. #define MVPP2_MAX_RXQ 8
  317. /* Dfault number of RXQs in use */
  318. #define MVPP2_DEFAULT_RXQ 4
  319. /* Total number of RXQs available to all ports */
  320. #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
  321. /* Max number of Rx descriptors */
  322. #define MVPP2_MAX_RXD 128
  323. /* Max number of Tx descriptors */
  324. #define MVPP2_MAX_TXD 1024
  325. /* Amount of Tx descriptors that can be reserved at once by CPU */
  326. #define MVPP2_CPU_DESC_CHUNK 64
  327. /* Max number of Tx descriptors in each aggregated queue */
  328. #define MVPP2_AGGR_TXQ_SIZE 256
  329. /* Descriptor aligned size */
  330. #define MVPP2_DESC_ALIGNED_SIZE 32
  331. /* Descriptor alignment mask */
  332. #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
  333. /* RX FIFO constants */
  334. #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
  335. #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
  336. #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
  337. /* RX buffer constants */
  338. #define MVPP2_SKB_SHINFO_SIZE \
  339. SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
  340. #define MVPP2_RX_PKT_SIZE(mtu) \
  341. ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
  342. ETH_HLEN + ETH_FCS_LEN, cache_line_size())
  343. #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
  344. #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
  345. #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
  346. ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
  347. #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
  348. /* IPv6 max L3 address size */
  349. #define MVPP2_MAX_L3_ADDR_SIZE 16
  350. /* Port flags */
  351. #define MVPP2_F_LOOPBACK BIT(0)
  352. /* Marvell tag types */
  353. enum mvpp2_tag_type {
  354. MVPP2_TAG_TYPE_NONE = 0,
  355. MVPP2_TAG_TYPE_MH = 1,
  356. MVPP2_TAG_TYPE_DSA = 2,
  357. MVPP2_TAG_TYPE_EDSA = 3,
  358. MVPP2_TAG_TYPE_VLAN = 4,
  359. MVPP2_TAG_TYPE_LAST = 5
  360. };
  361. /* Parser constants */
  362. #define MVPP2_PRS_TCAM_SRAM_SIZE 256
  363. #define MVPP2_PRS_TCAM_WORDS 6
  364. #define MVPP2_PRS_SRAM_WORDS 4
  365. #define MVPP2_PRS_FLOW_ID_SIZE 64
  366. #define MVPP2_PRS_FLOW_ID_MASK 0x3f
  367. #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
  368. #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
  369. #define MVPP2_PRS_IPV4_HEAD 0x40
  370. #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
  371. #define MVPP2_PRS_IPV4_MC 0xe0
  372. #define MVPP2_PRS_IPV4_MC_MASK 0xf0
  373. #define MVPP2_PRS_IPV4_BC_MASK 0xff
  374. #define MVPP2_PRS_IPV4_IHL 0x5
  375. #define MVPP2_PRS_IPV4_IHL_MASK 0xf
  376. #define MVPP2_PRS_IPV6_MC 0xff
  377. #define MVPP2_PRS_IPV6_MC_MASK 0xff
  378. #define MVPP2_PRS_IPV6_HOP_MASK 0xff
  379. #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
  380. #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
  381. #define MVPP2_PRS_DBL_VLANS_MAX 100
  382. /* Tcam structure:
  383. * - lookup ID - 4 bits
  384. * - port ID - 1 byte
  385. * - additional information - 1 byte
  386. * - header data - 8 bytes
  387. * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
  388. */
  389. #define MVPP2_PRS_AI_BITS 8
  390. #define MVPP2_PRS_PORT_MASK 0xff
  391. #define MVPP2_PRS_LU_MASK 0xf
  392. #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
  393. (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
  394. #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
  395. (((offs) * 2) - ((offs) % 2) + 2)
  396. #define MVPP2_PRS_TCAM_AI_BYTE 16
  397. #define MVPP2_PRS_TCAM_PORT_BYTE 17
  398. #define MVPP2_PRS_TCAM_LU_BYTE 20
  399. #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
  400. #define MVPP2_PRS_TCAM_INV_WORD 5
  401. /* Tcam entries ID */
  402. #define MVPP2_PE_DROP_ALL 0
  403. #define MVPP2_PE_FIRST_FREE_TID 1
  404. #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
  405. #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
  406. #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
  407. #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
  408. #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
  409. #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
  410. #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
  411. #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
  412. #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
  413. #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
  414. #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
  415. #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
  416. #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
  417. #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
  418. #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
  419. #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
  420. #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
  421. #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
  422. #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
  423. #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
  424. #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
  425. #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
  426. #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
  427. #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
  428. #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  429. /* Sram structure
  430. * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
  431. */
  432. #define MVPP2_PRS_SRAM_RI_OFFS 0
  433. #define MVPP2_PRS_SRAM_RI_WORD 0
  434. #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
  435. #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
  436. #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
  437. #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
  438. #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
  439. #define MVPP2_PRS_SRAM_UDF_OFFS 73
  440. #define MVPP2_PRS_SRAM_UDF_BITS 8
  441. #define MVPP2_PRS_SRAM_UDF_MASK 0xff
  442. #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
  443. #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
  444. #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
  445. #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
  446. #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
  447. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
  448. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
  449. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
  450. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
  451. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
  452. #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
  453. #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
  454. #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
  455. #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
  456. #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
  457. #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
  458. #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
  459. #define MVPP2_PRS_SRAM_AI_OFFS 90
  460. #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
  461. #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
  462. #define MVPP2_PRS_SRAM_AI_MASK 0xff
  463. #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
  464. #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
  465. #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
  466. #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
  467. /* Sram result info bits assignment */
  468. #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
  469. #define MVPP2_PRS_RI_DSA_MASK 0x2
  470. #define MVPP2_PRS_RI_VLAN_MASK 0xc
  471. #define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
  472. #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
  473. #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
  474. #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
  475. #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
  476. #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
  477. #define MVPP2_PRS_RI_L2_CAST_MASK 0x600
  478. #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
  479. #define MVPP2_PRS_RI_L2_MCAST BIT(9)
  480. #define MVPP2_PRS_RI_L2_BCAST BIT(10)
  481. #define MVPP2_PRS_RI_PPPOE_MASK 0x800
  482. #define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
  483. #define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
  484. #define MVPP2_PRS_RI_L3_IP4 BIT(12)
  485. #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
  486. #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
  487. #define MVPP2_PRS_RI_L3_IP6 BIT(14)
  488. #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
  489. #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
  490. #define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
  491. #define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
  492. #define MVPP2_PRS_RI_L3_MCAST BIT(15)
  493. #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
  494. #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
  495. #define MVPP2_PRS_RI_UDF3_MASK 0x300000
  496. #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
  497. #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
  498. #define MVPP2_PRS_RI_L4_TCP BIT(22)
  499. #define MVPP2_PRS_RI_L4_UDP BIT(23)
  500. #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
  501. #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
  502. #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
  503. #define MVPP2_PRS_RI_DROP_MASK 0x80000000
  504. /* Sram additional info bits assignment */
  505. #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
  506. #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
  507. #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
  508. #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
  509. #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
  510. #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
  511. #define MVPP2_PRS_SINGLE_VLAN_AI 0
  512. #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
  513. /* DSA/EDSA type */
  514. #define MVPP2_PRS_TAGGED true
  515. #define MVPP2_PRS_UNTAGGED false
  516. #define MVPP2_PRS_EDSA true
  517. #define MVPP2_PRS_DSA false
  518. /* MAC entries, shadow udf */
  519. enum mvpp2_prs_udf {
  520. MVPP2_PRS_UDF_MAC_DEF,
  521. MVPP2_PRS_UDF_MAC_RANGE,
  522. MVPP2_PRS_UDF_L2_DEF,
  523. MVPP2_PRS_UDF_L2_DEF_COPY,
  524. MVPP2_PRS_UDF_L2_USER,
  525. };
  526. /* Lookup ID */
  527. enum mvpp2_prs_lookup {
  528. MVPP2_PRS_LU_MH,
  529. MVPP2_PRS_LU_MAC,
  530. MVPP2_PRS_LU_DSA,
  531. MVPP2_PRS_LU_VLAN,
  532. MVPP2_PRS_LU_L2,
  533. MVPP2_PRS_LU_PPPOE,
  534. MVPP2_PRS_LU_IP4,
  535. MVPP2_PRS_LU_IP6,
  536. MVPP2_PRS_LU_FLOWS,
  537. MVPP2_PRS_LU_LAST,
  538. };
  539. /* L3 cast enum */
  540. enum mvpp2_prs_l3_cast {
  541. MVPP2_PRS_L3_UNI_CAST,
  542. MVPP2_PRS_L3_MULTI_CAST,
  543. MVPP2_PRS_L3_BROAD_CAST
  544. };
  545. /* Classifier constants */
  546. #define MVPP2_CLS_FLOWS_TBL_SIZE 512
  547. #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
  548. #define MVPP2_CLS_LKP_TBL_SIZE 64
  549. /* BM constants */
  550. #define MVPP2_BM_POOLS_NUM 8
  551. #define MVPP2_BM_LONG_BUF_NUM 1024
  552. #define MVPP2_BM_SHORT_BUF_NUM 2048
  553. #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
  554. #define MVPP2_BM_POOL_PTR_ALIGN 128
  555. #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
  556. #define MVPP2_BM_SWF_SHORT_POOL 3
  557. /* BM cookie (32 bits) definition */
  558. #define MVPP2_BM_COOKIE_POOL_OFFS 8
  559. #define MVPP2_BM_COOKIE_CPU_OFFS 24
  560. /* BM short pool packet size
  561. * These value assure that for SWF the total number
  562. * of bytes allocated for each buffer will be 512
  563. */
  564. #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
  565. enum mvpp2_bm_type {
  566. MVPP2_BM_FREE,
  567. MVPP2_BM_SWF_LONG,
  568. MVPP2_BM_SWF_SHORT
  569. };
  570. /* Definitions */
  571. /* Shared Packet Processor resources */
  572. struct mvpp2 {
  573. /* Shared registers' base addresses */
  574. void __iomem *base;
  575. void __iomem *lms_base;
  576. /* Common clocks */
  577. struct clk *pp_clk;
  578. struct clk *gop_clk;
  579. /* List of pointers to port structures */
  580. struct mvpp2_port **port_list;
  581. /* Aggregated TXQs */
  582. struct mvpp2_tx_queue *aggr_txqs;
  583. /* BM pools */
  584. struct mvpp2_bm_pool *bm_pools;
  585. /* PRS shadow table */
  586. struct mvpp2_prs_shadow *prs_shadow;
  587. /* PRS auxiliary table for double vlan entries control */
  588. bool *prs_double_vlans;
  589. /* Tclk value */
  590. u32 tclk;
  591. };
  592. struct mvpp2_pcpu_stats {
  593. struct u64_stats_sync syncp;
  594. u64 rx_packets;
  595. u64 rx_bytes;
  596. u64 tx_packets;
  597. u64 tx_bytes;
  598. };
  599. /* Per-CPU port control */
  600. struct mvpp2_port_pcpu {
  601. struct hrtimer tx_done_timer;
  602. bool timer_scheduled;
  603. /* Tasklet for egress finalization */
  604. struct tasklet_struct tx_done_tasklet;
  605. };
  606. struct mvpp2_port {
  607. u8 id;
  608. int irq;
  609. struct mvpp2 *priv;
  610. /* Per-port registers' base address */
  611. void __iomem *base;
  612. struct mvpp2_rx_queue **rxqs;
  613. struct mvpp2_tx_queue **txqs;
  614. struct net_device *dev;
  615. int pkt_size;
  616. u32 pending_cause_rx;
  617. struct napi_struct napi;
  618. /* Per-CPU port control */
  619. struct mvpp2_port_pcpu __percpu *pcpu;
  620. /* Flags */
  621. unsigned long flags;
  622. u16 tx_ring_size;
  623. u16 rx_ring_size;
  624. struct mvpp2_pcpu_stats __percpu *stats;
  625. phy_interface_t phy_interface;
  626. struct device_node *phy_node;
  627. unsigned int link;
  628. unsigned int duplex;
  629. unsigned int speed;
  630. struct mvpp2_bm_pool *pool_long;
  631. struct mvpp2_bm_pool *pool_short;
  632. /* Index of first port's physical RXQ */
  633. u8 first_rxq;
  634. };
  635. /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
  636. * layout of the transmit and reception DMA descriptors, and their
  637. * layout is therefore defined by the hardware design
  638. */
  639. #define MVPP2_TXD_L3_OFF_SHIFT 0
  640. #define MVPP2_TXD_IP_HLEN_SHIFT 8
  641. #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
  642. #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
  643. #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
  644. #define MVPP2_TXD_PADDING_DISABLE BIT(23)
  645. #define MVPP2_TXD_L4_UDP BIT(24)
  646. #define MVPP2_TXD_L3_IP6 BIT(26)
  647. #define MVPP2_TXD_L_DESC BIT(28)
  648. #define MVPP2_TXD_F_DESC BIT(29)
  649. #define MVPP2_RXD_ERR_SUMMARY BIT(15)
  650. #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
  651. #define MVPP2_RXD_ERR_CRC 0x0
  652. #define MVPP2_RXD_ERR_OVERRUN BIT(13)
  653. #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
  654. #define MVPP2_RXD_BM_POOL_ID_OFFS 16
  655. #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
  656. #define MVPP2_RXD_HWF_SYNC BIT(21)
  657. #define MVPP2_RXD_L4_CSUM_OK BIT(22)
  658. #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
  659. #define MVPP2_RXD_L4_TCP BIT(25)
  660. #define MVPP2_RXD_L4_UDP BIT(26)
  661. #define MVPP2_RXD_L3_IP4 BIT(28)
  662. #define MVPP2_RXD_L3_IP6 BIT(30)
  663. #define MVPP2_RXD_BUF_HDR BIT(31)
  664. struct mvpp2_tx_desc {
  665. u32 command; /* Options used by HW for packet transmitting.*/
  666. u8 packet_offset; /* the offset from the buffer beginning */
  667. u8 phys_txq; /* destination queue ID */
  668. u16 data_size; /* data size of transmitted packet in bytes */
  669. u32 buf_phys_addr; /* physical addr of transmitted buffer */
  670. u32 buf_cookie; /* cookie for access to TX buffer in tx path */
  671. u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
  672. u32 reserved2; /* reserved (for future use) */
  673. };
  674. struct mvpp2_rx_desc {
  675. u32 status; /* info about received packet */
  676. u16 reserved1; /* parser_info (for future use, PnC) */
  677. u16 data_size; /* size of received packet in bytes */
  678. u32 buf_phys_addr; /* physical address of the buffer */
  679. u32 buf_cookie; /* cookie for access to RX buffer in rx path */
  680. u16 reserved2; /* gem_port_id (for future use, PON) */
  681. u16 reserved3; /* csum_l4 (for future use, PnC) */
  682. u8 reserved4; /* bm_qset (for future use, BM) */
  683. u8 reserved5;
  684. u16 reserved6; /* classify_info (for future use, PnC) */
  685. u32 reserved7; /* flow_id (for future use, PnC) */
  686. u32 reserved8;
  687. };
  688. struct mvpp2_txq_pcpu_buf {
  689. /* Transmitted SKB */
  690. struct sk_buff *skb;
  691. /* Physical address of transmitted buffer */
  692. dma_addr_t phys;
  693. /* Size transmitted */
  694. size_t size;
  695. };
  696. /* Per-CPU Tx queue control */
  697. struct mvpp2_txq_pcpu {
  698. int cpu;
  699. /* Number of Tx DMA descriptors in the descriptor ring */
  700. int size;
  701. /* Number of currently used Tx DMA descriptor in the
  702. * descriptor ring
  703. */
  704. int count;
  705. /* Number of Tx DMA descriptors reserved for each CPU */
  706. int reserved_num;
  707. /* Infos about transmitted buffers */
  708. struct mvpp2_txq_pcpu_buf *buffs;
  709. /* Index of last TX DMA descriptor that was inserted */
  710. int txq_put_index;
  711. /* Index of the TX DMA descriptor to be cleaned up */
  712. int txq_get_index;
  713. };
  714. struct mvpp2_tx_queue {
  715. /* Physical number of this Tx queue */
  716. u8 id;
  717. /* Logical number of this Tx queue */
  718. u8 log_id;
  719. /* Number of Tx DMA descriptors in the descriptor ring */
  720. int size;
  721. /* Number of currently used Tx DMA descriptor in the descriptor ring */
  722. int count;
  723. /* Per-CPU control of physical Tx queues */
  724. struct mvpp2_txq_pcpu __percpu *pcpu;
  725. /* Array of transmitted skb */
  726. struct sk_buff **tx_skb;
  727. u32 done_pkts_coal;
  728. /* Virtual address of thex Tx DMA descriptors array */
  729. struct mvpp2_tx_desc *descs;
  730. /* DMA address of the Tx DMA descriptors array */
  731. dma_addr_t descs_phys;
  732. /* Index of the last Tx DMA descriptor */
  733. int last_desc;
  734. /* Index of the next Tx DMA descriptor to process */
  735. int next_desc_to_proc;
  736. };
  737. struct mvpp2_rx_queue {
  738. /* RX queue number, in the range 0-31 for physical RXQs */
  739. u8 id;
  740. /* Num of rx descriptors in the rx descriptor ring */
  741. int size;
  742. u32 pkts_coal;
  743. u32 time_coal;
  744. /* Virtual address of the RX DMA descriptors array */
  745. struct mvpp2_rx_desc *descs;
  746. /* DMA address of the RX DMA descriptors array */
  747. dma_addr_t descs_phys;
  748. /* Index of the last RX DMA descriptor */
  749. int last_desc;
  750. /* Index of the next RX DMA descriptor to process */
  751. int next_desc_to_proc;
  752. /* ID of port to which physical RXQ is mapped */
  753. int port;
  754. /* Port's logic RXQ number to which physical RXQ is mapped */
  755. int logic_rxq;
  756. };
  757. union mvpp2_prs_tcam_entry {
  758. u32 word[MVPP2_PRS_TCAM_WORDS];
  759. u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
  760. };
  761. union mvpp2_prs_sram_entry {
  762. u32 word[MVPP2_PRS_SRAM_WORDS];
  763. u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
  764. };
  765. struct mvpp2_prs_entry {
  766. u32 index;
  767. union mvpp2_prs_tcam_entry tcam;
  768. union mvpp2_prs_sram_entry sram;
  769. };
  770. struct mvpp2_prs_shadow {
  771. bool valid;
  772. bool finish;
  773. /* Lookup ID */
  774. int lu;
  775. /* User defined offset */
  776. int udf;
  777. /* Result info */
  778. u32 ri;
  779. u32 ri_mask;
  780. };
  781. struct mvpp2_cls_flow_entry {
  782. u32 index;
  783. u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
  784. };
  785. struct mvpp2_cls_lookup_entry {
  786. u32 lkpid;
  787. u32 way;
  788. u32 data;
  789. };
  790. struct mvpp2_bm_pool {
  791. /* Pool number in the range 0-7 */
  792. int id;
  793. enum mvpp2_bm_type type;
  794. /* Buffer Pointers Pool External (BPPE) size */
  795. int size;
  796. /* Number of buffers for this pool */
  797. int buf_num;
  798. /* Pool buffer size */
  799. int buf_size;
  800. /* Packet size */
  801. int pkt_size;
  802. /* BPPE virtual base address */
  803. u32 *virt_addr;
  804. /* BPPE physical base address */
  805. dma_addr_t phys_addr;
  806. /* Ports using BM pool */
  807. u32 port_map;
  808. /* Occupied buffers indicator */
  809. atomic_t in_use;
  810. int in_use_thresh;
  811. };
  812. struct mvpp2_buff_hdr {
  813. u32 next_buff_phys_addr;
  814. u32 next_buff_virt_addr;
  815. u16 byte_count;
  816. u16 info;
  817. u8 reserved1; /* bm_qset (for future use, BM) */
  818. };
  819. /* Buffer header info bits */
  820. #define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
  821. #define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
  822. #define MVPP2_B_HDR_INFO_LAST_OFFS 12
  823. #define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
  824. #define MVPP2_B_HDR_INFO_IS_LAST(info) \
  825. ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
  826. /* Static declaractions */
  827. /* Number of RXQs used by single port */
  828. static int rxq_number = MVPP2_DEFAULT_RXQ;
  829. /* Number of TXQs used by single port */
  830. static int txq_number = MVPP2_MAX_TXQ;
  831. #define MVPP2_DRIVER_NAME "mvpp2"
  832. #define MVPP2_DRIVER_VERSION "1.0"
  833. /* Utility/helper methods */
  834. static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
  835. {
  836. writel(data, priv->base + offset);
  837. }
  838. static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
  839. {
  840. return readl(priv->base + offset);
  841. }
  842. static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
  843. {
  844. txq_pcpu->txq_get_index++;
  845. if (txq_pcpu->txq_get_index == txq_pcpu->size)
  846. txq_pcpu->txq_get_index = 0;
  847. }
  848. static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
  849. struct sk_buff *skb,
  850. struct mvpp2_tx_desc *tx_desc)
  851. {
  852. struct mvpp2_txq_pcpu_buf *tx_buf =
  853. txq_pcpu->buffs + txq_pcpu->txq_put_index;
  854. tx_buf->skb = skb;
  855. tx_buf->size = tx_desc->data_size;
  856. tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
  857. txq_pcpu->txq_put_index++;
  858. if (txq_pcpu->txq_put_index == txq_pcpu->size)
  859. txq_pcpu->txq_put_index = 0;
  860. }
  861. /* Get number of physical egress port */
  862. static inline int mvpp2_egress_port(struct mvpp2_port *port)
  863. {
  864. return MVPP2_MAX_TCONT + port->id;
  865. }
  866. /* Get number of physical TXQ */
  867. static inline int mvpp2_txq_phys(int port, int txq)
  868. {
  869. return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
  870. }
  871. /* Parser configuration routines */
  872. /* Update parser tcam and sram hw entries */
  873. static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  874. {
  875. int i;
  876. if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  877. return -EINVAL;
  878. /* Clear entry invalidation bit */
  879. pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
  880. /* Write tcam index - indirect access */
  881. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  882. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  883. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
  884. /* Write sram index - indirect access */
  885. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  886. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  887. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
  888. return 0;
  889. }
  890. /* Read tcam entry from hw */
  891. static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  892. {
  893. int i;
  894. if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  895. return -EINVAL;
  896. /* Write tcam index - indirect access */
  897. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  898. pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
  899. MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
  900. if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
  901. return MVPP2_PRS_TCAM_ENTRY_INVALID;
  902. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  903. pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
  904. /* Write sram index - indirect access */
  905. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  906. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  907. pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
  908. return 0;
  909. }
  910. /* Invalidate tcam hw entry */
  911. static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
  912. {
  913. /* Write index - indirect access */
  914. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  915. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
  916. MVPP2_PRS_TCAM_INV_MASK);
  917. }
  918. /* Enable shadow table entry and set its lookup ID */
  919. static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
  920. {
  921. priv->prs_shadow[index].valid = true;
  922. priv->prs_shadow[index].lu = lu;
  923. }
  924. /* Update ri fields in shadow table entry */
  925. static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
  926. unsigned int ri, unsigned int ri_mask)
  927. {
  928. priv->prs_shadow[index].ri_mask = ri_mask;
  929. priv->prs_shadow[index].ri = ri;
  930. }
  931. /* Update lookup field in tcam sw entry */
  932. static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
  933. {
  934. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
  935. pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
  936. pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
  937. }
  938. /* Update mask for single port in tcam sw entry */
  939. static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
  940. unsigned int port, bool add)
  941. {
  942. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  943. if (add)
  944. pe->tcam.byte[enable_off] &= ~(1 << port);
  945. else
  946. pe->tcam.byte[enable_off] |= 1 << port;
  947. }
  948. /* Update port map in tcam sw entry */
  949. static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
  950. unsigned int ports)
  951. {
  952. unsigned char port_mask = MVPP2_PRS_PORT_MASK;
  953. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  954. pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
  955. pe->tcam.byte[enable_off] &= ~port_mask;
  956. pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
  957. }
  958. /* Obtain port map from tcam sw entry */
  959. static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
  960. {
  961. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  962. return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
  963. }
  964. /* Set byte of data and its enable bits in tcam sw entry */
  965. static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
  966. unsigned int offs, unsigned char byte,
  967. unsigned char enable)
  968. {
  969. pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
  970. pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
  971. }
  972. /* Get byte of data and its enable bits from tcam sw entry */
  973. static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
  974. unsigned int offs, unsigned char *byte,
  975. unsigned char *enable)
  976. {
  977. *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
  978. *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
  979. }
  980. /* Compare tcam data bytes with a pattern */
  981. static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
  982. u16 data)
  983. {
  984. int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
  985. u16 tcam_data;
  986. tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
  987. if (tcam_data != data)
  988. return false;
  989. return true;
  990. }
  991. /* Update ai bits in tcam sw entry */
  992. static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
  993. unsigned int bits, unsigned int enable)
  994. {
  995. int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
  996. for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
  997. if (!(enable & BIT(i)))
  998. continue;
  999. if (bits & BIT(i))
  1000. pe->tcam.byte[ai_idx] |= 1 << i;
  1001. else
  1002. pe->tcam.byte[ai_idx] &= ~(1 << i);
  1003. }
  1004. pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
  1005. }
  1006. /* Get ai bits from tcam sw entry */
  1007. static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
  1008. {
  1009. return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
  1010. }
  1011. /* Set ethertype in tcam sw entry */
  1012. static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
  1013. unsigned short ethertype)
  1014. {
  1015. mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
  1016. mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
  1017. }
  1018. /* Set bits in sram sw entry */
  1019. static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
  1020. int val)
  1021. {
  1022. pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
  1023. }
  1024. /* Clear bits in sram sw entry */
  1025. static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
  1026. int val)
  1027. {
  1028. pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
  1029. }
  1030. /* Update ri bits in sram sw entry */
  1031. static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
  1032. unsigned int bits, unsigned int mask)
  1033. {
  1034. unsigned int i;
  1035. for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
  1036. int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
  1037. if (!(mask & BIT(i)))
  1038. continue;
  1039. if (bits & BIT(i))
  1040. mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
  1041. else
  1042. mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
  1043. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
  1044. }
  1045. }
  1046. /* Obtain ri bits from sram sw entry */
  1047. static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
  1048. {
  1049. return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
  1050. }
  1051. /* Update ai bits in sram sw entry */
  1052. static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
  1053. unsigned int bits, unsigned int mask)
  1054. {
  1055. unsigned int i;
  1056. int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
  1057. for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
  1058. if (!(mask & BIT(i)))
  1059. continue;
  1060. if (bits & BIT(i))
  1061. mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
  1062. else
  1063. mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
  1064. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
  1065. }
  1066. }
  1067. /* Read ai bits from sram sw entry */
  1068. static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
  1069. {
  1070. u8 bits;
  1071. int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
  1072. int ai_en_off = ai_off + 1;
  1073. int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
  1074. bits = (pe->sram.byte[ai_off] >> ai_shift) |
  1075. (pe->sram.byte[ai_en_off] << (8 - ai_shift));
  1076. return bits;
  1077. }
  1078. /* In sram sw entry set lookup ID field of the tcam key to be used in the next
  1079. * lookup interation
  1080. */
  1081. static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
  1082. unsigned int lu)
  1083. {
  1084. int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
  1085. mvpp2_prs_sram_bits_clear(pe, sram_next_off,
  1086. MVPP2_PRS_SRAM_NEXT_LU_MASK);
  1087. mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
  1088. }
  1089. /* In the sram sw entry set sign and value of the next lookup offset
  1090. * and the offset value generated to the classifier
  1091. */
  1092. static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
  1093. unsigned int op)
  1094. {
  1095. /* Set sign */
  1096. if (shift < 0) {
  1097. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  1098. shift = 0 - shift;
  1099. } else {
  1100. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  1101. }
  1102. /* Set value */
  1103. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
  1104. (unsigned char)shift;
  1105. /* Reset and set operation */
  1106. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
  1107. MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
  1108. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
  1109. /* Set base offset as current */
  1110. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  1111. }
  1112. /* In the sram sw entry set sign and value of the user defined offset
  1113. * generated to the classifier
  1114. */
  1115. static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
  1116. unsigned int type, int offset,
  1117. unsigned int op)
  1118. {
  1119. /* Set sign */
  1120. if (offset < 0) {
  1121. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  1122. offset = 0 - offset;
  1123. } else {
  1124. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  1125. }
  1126. /* Set value */
  1127. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
  1128. MVPP2_PRS_SRAM_UDF_MASK);
  1129. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
  1130. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
  1131. MVPP2_PRS_SRAM_UDF_BITS)] &=
  1132. ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
  1133. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
  1134. MVPP2_PRS_SRAM_UDF_BITS)] |=
  1135. (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
  1136. /* Set offset type */
  1137. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
  1138. MVPP2_PRS_SRAM_UDF_TYPE_MASK);
  1139. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
  1140. /* Set offset operation */
  1141. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
  1142. MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
  1143. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
  1144. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
  1145. MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
  1146. ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
  1147. (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
  1148. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
  1149. MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
  1150. (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
  1151. /* Set base offset as current */
  1152. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  1153. }
  1154. /* Find parser flow entry */
  1155. static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
  1156. {
  1157. struct mvpp2_prs_entry *pe;
  1158. int tid;
  1159. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1160. if (!pe)
  1161. return NULL;
  1162. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
  1163. /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
  1164. for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
  1165. u8 bits;
  1166. if (!priv->prs_shadow[tid].valid ||
  1167. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
  1168. continue;
  1169. pe->index = tid;
  1170. mvpp2_prs_hw_read(priv, pe);
  1171. bits = mvpp2_prs_sram_ai_get(pe);
  1172. /* Sram store classification lookup ID in AI bits [5:0] */
  1173. if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
  1174. return pe;
  1175. }
  1176. kfree(pe);
  1177. return NULL;
  1178. }
  1179. /* Return first free tcam index, seeking from start to end */
  1180. static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
  1181. unsigned char end)
  1182. {
  1183. int tid;
  1184. if (start > end)
  1185. swap(start, end);
  1186. if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
  1187. end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
  1188. for (tid = start; tid <= end; tid++) {
  1189. if (!priv->prs_shadow[tid].valid)
  1190. return tid;
  1191. }
  1192. return -EINVAL;
  1193. }
  1194. /* Enable/disable dropping all mac da's */
  1195. static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
  1196. {
  1197. struct mvpp2_prs_entry pe;
  1198. if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
  1199. /* Entry exist - update port only */
  1200. pe.index = MVPP2_PE_DROP_ALL;
  1201. mvpp2_prs_hw_read(priv, &pe);
  1202. } else {
  1203. /* Entry doesn't exist - create new */
  1204. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1205. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1206. pe.index = MVPP2_PE_DROP_ALL;
  1207. /* Non-promiscuous mode for all ports - DROP unknown packets */
  1208. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  1209. MVPP2_PRS_RI_DROP_MASK);
  1210. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1211. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1212. /* Update shadow table */
  1213. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1214. /* Mask all ports */
  1215. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1216. }
  1217. /* Update port mask */
  1218. mvpp2_prs_tcam_port_set(&pe, port, add);
  1219. mvpp2_prs_hw_write(priv, &pe);
  1220. }
  1221. /* Set port to promiscuous mode */
  1222. static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
  1223. {
  1224. struct mvpp2_prs_entry pe;
  1225. /* Promiscuous mode - Accept unknown packets */
  1226. if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
  1227. /* Entry exist - update port only */
  1228. pe.index = MVPP2_PE_MAC_PROMISCUOUS;
  1229. mvpp2_prs_hw_read(priv, &pe);
  1230. } else {
  1231. /* Entry doesn't exist - create new */
  1232. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1233. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1234. pe.index = MVPP2_PE_MAC_PROMISCUOUS;
  1235. /* Continue - set next lookup */
  1236. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1237. /* Set result info bits */
  1238. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
  1239. MVPP2_PRS_RI_L2_CAST_MASK);
  1240. /* Shift to ethertype */
  1241. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  1242. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1243. /* Mask all ports */
  1244. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1245. /* Update shadow table */
  1246. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1247. }
  1248. /* Update port mask */
  1249. mvpp2_prs_tcam_port_set(&pe, port, add);
  1250. mvpp2_prs_hw_write(priv, &pe);
  1251. }
  1252. /* Accept multicast */
  1253. static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
  1254. bool add)
  1255. {
  1256. struct mvpp2_prs_entry pe;
  1257. unsigned char da_mc;
  1258. /* Ethernet multicast address first byte is
  1259. * 0x01 for IPv4 and 0x33 for IPv6
  1260. */
  1261. da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
  1262. if (priv->prs_shadow[index].valid) {
  1263. /* Entry exist - update port only */
  1264. pe.index = index;
  1265. mvpp2_prs_hw_read(priv, &pe);
  1266. } else {
  1267. /* Entry doesn't exist - create new */
  1268. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1269. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1270. pe.index = index;
  1271. /* Continue - set next lookup */
  1272. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1273. /* Set result info bits */
  1274. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
  1275. MVPP2_PRS_RI_L2_CAST_MASK);
  1276. /* Update tcam entry data first byte */
  1277. mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
  1278. /* Shift to ethertype */
  1279. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  1280. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1281. /* Mask all ports */
  1282. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1283. /* Update shadow table */
  1284. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1285. }
  1286. /* Update port mask */
  1287. mvpp2_prs_tcam_port_set(&pe, port, add);
  1288. mvpp2_prs_hw_write(priv, &pe);
  1289. }
  1290. /* Set entry for dsa packets */
  1291. static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
  1292. bool tagged, bool extend)
  1293. {
  1294. struct mvpp2_prs_entry pe;
  1295. int tid, shift;
  1296. if (extend) {
  1297. tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
  1298. shift = 8;
  1299. } else {
  1300. tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
  1301. shift = 4;
  1302. }
  1303. if (priv->prs_shadow[tid].valid) {
  1304. /* Entry exist - update port only */
  1305. pe.index = tid;
  1306. mvpp2_prs_hw_read(priv, &pe);
  1307. } else {
  1308. /* Entry doesn't exist - create new */
  1309. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1310. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1311. pe.index = tid;
  1312. /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
  1313. mvpp2_prs_sram_shift_set(&pe, shift,
  1314. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1315. /* Update shadow table */
  1316. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
  1317. if (tagged) {
  1318. /* Set tagged bit in DSA tag */
  1319. mvpp2_prs_tcam_data_byte_set(&pe, 0,
  1320. MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
  1321. MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
  1322. /* Clear all ai bits for next iteration */
  1323. mvpp2_prs_sram_ai_update(&pe, 0,
  1324. MVPP2_PRS_SRAM_AI_MASK);
  1325. /* If packet is tagged continue check vlans */
  1326. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1327. } else {
  1328. /* Set result info bits to 'no vlans' */
  1329. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  1330. MVPP2_PRS_RI_VLAN_MASK);
  1331. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1332. }
  1333. /* Mask all ports */
  1334. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1335. }
  1336. /* Update port mask */
  1337. mvpp2_prs_tcam_port_set(&pe, port, add);
  1338. mvpp2_prs_hw_write(priv, &pe);
  1339. }
  1340. /* Set entry for dsa ethertype */
  1341. static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
  1342. bool add, bool tagged, bool extend)
  1343. {
  1344. struct mvpp2_prs_entry pe;
  1345. int tid, shift, port_mask;
  1346. if (extend) {
  1347. tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
  1348. MVPP2_PE_ETYPE_EDSA_UNTAGGED;
  1349. port_mask = 0;
  1350. shift = 8;
  1351. } else {
  1352. tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
  1353. MVPP2_PE_ETYPE_DSA_UNTAGGED;
  1354. port_mask = MVPP2_PRS_PORT_MASK;
  1355. shift = 4;
  1356. }
  1357. if (priv->prs_shadow[tid].valid) {
  1358. /* Entry exist - update port only */
  1359. pe.index = tid;
  1360. mvpp2_prs_hw_read(priv, &pe);
  1361. } else {
  1362. /* Entry doesn't exist - create new */
  1363. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1364. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1365. pe.index = tid;
  1366. /* Set ethertype */
  1367. mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
  1368. mvpp2_prs_match_etype(&pe, 2, 0);
  1369. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
  1370. MVPP2_PRS_RI_DSA_MASK);
  1371. /* Shift ethertype + 2 byte reserved + tag*/
  1372. mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
  1373. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1374. /* Update shadow table */
  1375. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
  1376. if (tagged) {
  1377. /* Set tagged bit in DSA tag */
  1378. mvpp2_prs_tcam_data_byte_set(&pe,
  1379. MVPP2_ETH_TYPE_LEN + 2 + 3,
  1380. MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
  1381. MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
  1382. /* Clear all ai bits for next iteration */
  1383. mvpp2_prs_sram_ai_update(&pe, 0,
  1384. MVPP2_PRS_SRAM_AI_MASK);
  1385. /* If packet is tagged continue check vlans */
  1386. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1387. } else {
  1388. /* Set result info bits to 'no vlans' */
  1389. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  1390. MVPP2_PRS_RI_VLAN_MASK);
  1391. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1392. }
  1393. /* Mask/unmask all ports, depending on dsa type */
  1394. mvpp2_prs_tcam_port_map_set(&pe, port_mask);
  1395. }
  1396. /* Update port mask */
  1397. mvpp2_prs_tcam_port_set(&pe, port, add);
  1398. mvpp2_prs_hw_write(priv, &pe);
  1399. }
  1400. /* Search for existing single/triple vlan entry */
  1401. static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
  1402. unsigned short tpid, int ai)
  1403. {
  1404. struct mvpp2_prs_entry *pe;
  1405. int tid;
  1406. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1407. if (!pe)
  1408. return NULL;
  1409. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1410. /* Go through the all entries with MVPP2_PRS_LU_VLAN */
  1411. for (tid = MVPP2_PE_FIRST_FREE_TID;
  1412. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  1413. unsigned int ri_bits, ai_bits;
  1414. bool match;
  1415. if (!priv->prs_shadow[tid].valid ||
  1416. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
  1417. continue;
  1418. pe->index = tid;
  1419. mvpp2_prs_hw_read(priv, pe);
  1420. match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
  1421. if (!match)
  1422. continue;
  1423. /* Get vlan type */
  1424. ri_bits = mvpp2_prs_sram_ri_get(pe);
  1425. ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
  1426. /* Get current ai value from tcam */
  1427. ai_bits = mvpp2_prs_tcam_ai_get(pe);
  1428. /* Clear double vlan bit */
  1429. ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
  1430. if (ai != ai_bits)
  1431. continue;
  1432. if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
  1433. ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
  1434. return pe;
  1435. }
  1436. kfree(pe);
  1437. return NULL;
  1438. }
  1439. /* Add/update single/triple vlan entry */
  1440. static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
  1441. unsigned int port_map)
  1442. {
  1443. struct mvpp2_prs_entry *pe;
  1444. int tid_aux, tid;
  1445. int ret = 0;
  1446. pe = mvpp2_prs_vlan_find(priv, tpid, ai);
  1447. if (!pe) {
  1448. /* Create new tcam entry */
  1449. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
  1450. MVPP2_PE_FIRST_FREE_TID);
  1451. if (tid < 0)
  1452. return tid;
  1453. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1454. if (!pe)
  1455. return -ENOMEM;
  1456. /* Get last double vlan tid */
  1457. for (tid_aux = MVPP2_PE_LAST_FREE_TID;
  1458. tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
  1459. unsigned int ri_bits;
  1460. if (!priv->prs_shadow[tid_aux].valid ||
  1461. priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
  1462. continue;
  1463. pe->index = tid_aux;
  1464. mvpp2_prs_hw_read(priv, pe);
  1465. ri_bits = mvpp2_prs_sram_ri_get(pe);
  1466. if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
  1467. MVPP2_PRS_RI_VLAN_DOUBLE)
  1468. break;
  1469. }
  1470. if (tid <= tid_aux) {
  1471. ret = -EINVAL;
  1472. goto error;
  1473. }
  1474. memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
  1475. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1476. pe->index = tid;
  1477. mvpp2_prs_match_etype(pe, 0, tpid);
  1478. mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
  1479. /* Shift 4 bytes - skip 1 vlan tag */
  1480. mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
  1481. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1482. /* Clear all ai bits for next iteration */
  1483. mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1484. if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
  1485. mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
  1486. MVPP2_PRS_RI_VLAN_MASK);
  1487. } else {
  1488. ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
  1489. mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
  1490. MVPP2_PRS_RI_VLAN_MASK);
  1491. }
  1492. mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
  1493. mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
  1494. }
  1495. /* Update ports' mask */
  1496. mvpp2_prs_tcam_port_map_set(pe, port_map);
  1497. mvpp2_prs_hw_write(priv, pe);
  1498. error:
  1499. kfree(pe);
  1500. return ret;
  1501. }
  1502. /* Get first free double vlan ai number */
  1503. static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
  1504. {
  1505. int i;
  1506. for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
  1507. if (!priv->prs_double_vlans[i])
  1508. return i;
  1509. }
  1510. return -EINVAL;
  1511. }
  1512. /* Search for existing double vlan entry */
  1513. static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
  1514. unsigned short tpid1,
  1515. unsigned short tpid2)
  1516. {
  1517. struct mvpp2_prs_entry *pe;
  1518. int tid;
  1519. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1520. if (!pe)
  1521. return NULL;
  1522. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1523. /* Go through the all entries with MVPP2_PRS_LU_VLAN */
  1524. for (tid = MVPP2_PE_FIRST_FREE_TID;
  1525. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  1526. unsigned int ri_mask;
  1527. bool match;
  1528. if (!priv->prs_shadow[tid].valid ||
  1529. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
  1530. continue;
  1531. pe->index = tid;
  1532. mvpp2_prs_hw_read(priv, pe);
  1533. match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
  1534. && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
  1535. if (!match)
  1536. continue;
  1537. ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
  1538. if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
  1539. return pe;
  1540. }
  1541. kfree(pe);
  1542. return NULL;
  1543. }
  1544. /* Add or update double vlan entry */
  1545. static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
  1546. unsigned short tpid2,
  1547. unsigned int port_map)
  1548. {
  1549. struct mvpp2_prs_entry *pe;
  1550. int tid_aux, tid, ai, ret = 0;
  1551. pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
  1552. if (!pe) {
  1553. /* Create new tcam entry */
  1554. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1555. MVPP2_PE_LAST_FREE_TID);
  1556. if (tid < 0)
  1557. return tid;
  1558. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1559. if (!pe)
  1560. return -ENOMEM;
  1561. /* Set ai value for new double vlan entry */
  1562. ai = mvpp2_prs_double_vlan_ai_free_get(priv);
  1563. if (ai < 0) {
  1564. ret = ai;
  1565. goto error;
  1566. }
  1567. /* Get first single/triple vlan tid */
  1568. for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
  1569. tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
  1570. unsigned int ri_bits;
  1571. if (!priv->prs_shadow[tid_aux].valid ||
  1572. priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
  1573. continue;
  1574. pe->index = tid_aux;
  1575. mvpp2_prs_hw_read(priv, pe);
  1576. ri_bits = mvpp2_prs_sram_ri_get(pe);
  1577. ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
  1578. if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
  1579. ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
  1580. break;
  1581. }
  1582. if (tid >= tid_aux) {
  1583. ret = -ERANGE;
  1584. goto error;
  1585. }
  1586. memset(pe, 0, sizeof(struct mvpp2_prs_entry));
  1587. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1588. pe->index = tid;
  1589. priv->prs_double_vlans[ai] = true;
  1590. mvpp2_prs_match_etype(pe, 0, tpid1);
  1591. mvpp2_prs_match_etype(pe, 4, tpid2);
  1592. mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1593. /* Shift 8 bytes - skip 2 vlan tags */
  1594. mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
  1595. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1596. mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
  1597. MVPP2_PRS_RI_VLAN_MASK);
  1598. mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
  1599. MVPP2_PRS_SRAM_AI_MASK);
  1600. mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
  1601. }
  1602. /* Update ports' mask */
  1603. mvpp2_prs_tcam_port_map_set(pe, port_map);
  1604. mvpp2_prs_hw_write(priv, pe);
  1605. error:
  1606. kfree(pe);
  1607. return ret;
  1608. }
  1609. /* IPv4 header parsing for fragmentation and L4 offset */
  1610. static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
  1611. unsigned int ri, unsigned int ri_mask)
  1612. {
  1613. struct mvpp2_prs_entry pe;
  1614. int tid;
  1615. if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
  1616. (proto != IPPROTO_IGMP))
  1617. return -EINVAL;
  1618. /* Fragmented packet */
  1619. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1620. MVPP2_PE_LAST_FREE_TID);
  1621. if (tid < 0)
  1622. return tid;
  1623. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1624. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1625. pe.index = tid;
  1626. /* Set next lu to IPv4 */
  1627. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1628. mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1629. /* Set L4 offset */
  1630. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  1631. sizeof(struct iphdr) - 4,
  1632. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1633. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  1634. MVPP2_PRS_IPV4_DIP_AI_BIT);
  1635. mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
  1636. ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
  1637. mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
  1638. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
  1639. /* Unmask all ports */
  1640. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1641. /* Update shadow table and hw entry */
  1642. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1643. mvpp2_prs_hw_write(priv, &pe);
  1644. /* Not fragmented packet */
  1645. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1646. MVPP2_PE_LAST_FREE_TID);
  1647. if (tid < 0)
  1648. return tid;
  1649. pe.index = tid;
  1650. /* Clear ri before updating */
  1651. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  1652. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  1653. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
  1654. mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
  1655. mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
  1656. /* Update shadow table and hw entry */
  1657. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1658. mvpp2_prs_hw_write(priv, &pe);
  1659. return 0;
  1660. }
  1661. /* IPv4 L3 multicast or broadcast */
  1662. static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
  1663. {
  1664. struct mvpp2_prs_entry pe;
  1665. int mask, tid;
  1666. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1667. MVPP2_PE_LAST_FREE_TID);
  1668. if (tid < 0)
  1669. return tid;
  1670. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1671. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1672. pe.index = tid;
  1673. switch (l3_cast) {
  1674. case MVPP2_PRS_L3_MULTI_CAST:
  1675. mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
  1676. MVPP2_PRS_IPV4_MC_MASK);
  1677. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
  1678. MVPP2_PRS_RI_L3_ADDR_MASK);
  1679. break;
  1680. case MVPP2_PRS_L3_BROAD_CAST:
  1681. mask = MVPP2_PRS_IPV4_BC_MASK;
  1682. mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
  1683. mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
  1684. mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
  1685. mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
  1686. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
  1687. MVPP2_PRS_RI_L3_ADDR_MASK);
  1688. break;
  1689. default:
  1690. return -EINVAL;
  1691. }
  1692. /* Finished: go to flowid generation */
  1693. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1694. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1695. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  1696. MVPP2_PRS_IPV4_DIP_AI_BIT);
  1697. /* Unmask all ports */
  1698. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1699. /* Update shadow table and hw entry */
  1700. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1701. mvpp2_prs_hw_write(priv, &pe);
  1702. return 0;
  1703. }
  1704. /* Set entries for protocols over IPv6 */
  1705. static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
  1706. unsigned int ri, unsigned int ri_mask)
  1707. {
  1708. struct mvpp2_prs_entry pe;
  1709. int tid;
  1710. if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
  1711. (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
  1712. return -EINVAL;
  1713. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1714. MVPP2_PE_LAST_FREE_TID);
  1715. if (tid < 0)
  1716. return tid;
  1717. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1718. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1719. pe.index = tid;
  1720. /* Finished: go to flowid generation */
  1721. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1722. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1723. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
  1724. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  1725. sizeof(struct ipv6hdr) - 6,
  1726. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1727. mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
  1728. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1729. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1730. /* Unmask all ports */
  1731. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1732. /* Write HW */
  1733. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  1734. mvpp2_prs_hw_write(priv, &pe);
  1735. return 0;
  1736. }
  1737. /* IPv6 L3 multicast entry */
  1738. static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
  1739. {
  1740. struct mvpp2_prs_entry pe;
  1741. int tid;
  1742. if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
  1743. return -EINVAL;
  1744. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1745. MVPP2_PE_LAST_FREE_TID);
  1746. if (tid < 0)
  1747. return tid;
  1748. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1749. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1750. pe.index = tid;
  1751. /* Finished: go to flowid generation */
  1752. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1753. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
  1754. MVPP2_PRS_RI_L3_ADDR_MASK);
  1755. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1756. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1757. /* Shift back to IPv6 NH */
  1758. mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1759. mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
  1760. MVPP2_PRS_IPV6_MC_MASK);
  1761. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1762. /* Unmask all ports */
  1763. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1764. /* Update shadow table and hw entry */
  1765. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  1766. mvpp2_prs_hw_write(priv, &pe);
  1767. return 0;
  1768. }
  1769. /* Parser per-port initialization */
  1770. static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
  1771. int lu_max, int offset)
  1772. {
  1773. u32 val;
  1774. /* Set lookup ID */
  1775. val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
  1776. val &= ~MVPP2_PRS_PORT_LU_MASK(port);
  1777. val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
  1778. mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
  1779. /* Set maximum number of loops for packet received from port */
  1780. val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
  1781. val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
  1782. val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
  1783. mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
  1784. /* Set initial offset for packet header extraction for the first
  1785. * searching loop
  1786. */
  1787. val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
  1788. val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
  1789. val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
  1790. mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
  1791. }
  1792. /* Default flow entries initialization for all ports */
  1793. static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
  1794. {
  1795. struct mvpp2_prs_entry pe;
  1796. int port;
  1797. for (port = 0; port < MVPP2_MAX_PORTS; port++) {
  1798. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1799. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1800. pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
  1801. /* Mask all ports */
  1802. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1803. /* Set flow ID*/
  1804. mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
  1805. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  1806. /* Update shadow table and hw entry */
  1807. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
  1808. mvpp2_prs_hw_write(priv, &pe);
  1809. }
  1810. }
  1811. /* Set default entry for Marvell Header field */
  1812. static void mvpp2_prs_mh_init(struct mvpp2 *priv)
  1813. {
  1814. struct mvpp2_prs_entry pe;
  1815. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1816. pe.index = MVPP2_PE_MH_DEFAULT;
  1817. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
  1818. mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
  1819. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1820. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1821. /* Unmask all ports */
  1822. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1823. /* Update shadow table and hw entry */
  1824. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
  1825. mvpp2_prs_hw_write(priv, &pe);
  1826. }
  1827. /* Set default entires (place holder) for promiscuous, non-promiscuous and
  1828. * multicast MAC addresses
  1829. */
  1830. static void mvpp2_prs_mac_init(struct mvpp2 *priv)
  1831. {
  1832. struct mvpp2_prs_entry pe;
  1833. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1834. /* Non-promiscuous mode for all ports - DROP unknown packets */
  1835. pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
  1836. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1837. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  1838. MVPP2_PRS_RI_DROP_MASK);
  1839. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1840. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1841. /* Unmask all ports */
  1842. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1843. /* Update shadow table and hw entry */
  1844. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1845. mvpp2_prs_hw_write(priv, &pe);
  1846. /* place holders only - no ports */
  1847. mvpp2_prs_mac_drop_all_set(priv, 0, false);
  1848. mvpp2_prs_mac_promisc_set(priv, 0, false);
  1849. mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
  1850. mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
  1851. }
  1852. /* Set default entries for various types of dsa packets */
  1853. static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
  1854. {
  1855. struct mvpp2_prs_entry pe;
  1856. /* None tagged EDSA entry - place holder */
  1857. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
  1858. MVPP2_PRS_EDSA);
  1859. /* Tagged EDSA entry - place holder */
  1860. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1861. /* None tagged DSA entry - place holder */
  1862. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
  1863. MVPP2_PRS_DSA);
  1864. /* Tagged DSA entry - place holder */
  1865. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1866. /* None tagged EDSA ethertype entry - place holder*/
  1867. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
  1868. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  1869. /* Tagged EDSA ethertype entry - place holder*/
  1870. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
  1871. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1872. /* None tagged DSA ethertype entry */
  1873. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
  1874. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  1875. /* Tagged DSA ethertype entry */
  1876. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
  1877. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1878. /* Set default entry, in case DSA or EDSA tag not found */
  1879. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1880. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1881. pe.index = MVPP2_PE_DSA_DEFAULT;
  1882. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1883. /* Shift 0 bytes */
  1884. mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1885. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1886. /* Clear all sram ai bits for next iteration */
  1887. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1888. /* Unmask all ports */
  1889. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1890. mvpp2_prs_hw_write(priv, &pe);
  1891. }
  1892. /* Match basic ethertypes */
  1893. static int mvpp2_prs_etype_init(struct mvpp2 *priv)
  1894. {
  1895. struct mvpp2_prs_entry pe;
  1896. int tid;
  1897. /* Ethertype: PPPoE */
  1898. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1899. MVPP2_PE_LAST_FREE_TID);
  1900. if (tid < 0)
  1901. return tid;
  1902. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1903. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1904. pe.index = tid;
  1905. mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
  1906. mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
  1907. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1908. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1909. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
  1910. MVPP2_PRS_RI_PPPOE_MASK);
  1911. /* Update shadow table and hw entry */
  1912. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1913. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1914. priv->prs_shadow[pe.index].finish = false;
  1915. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
  1916. MVPP2_PRS_RI_PPPOE_MASK);
  1917. mvpp2_prs_hw_write(priv, &pe);
  1918. /* Ethertype: ARP */
  1919. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1920. MVPP2_PE_LAST_FREE_TID);
  1921. if (tid < 0)
  1922. return tid;
  1923. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1924. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1925. pe.index = tid;
  1926. mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
  1927. /* Generate flow in the next iteration*/
  1928. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1929. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1930. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
  1931. MVPP2_PRS_RI_L3_PROTO_MASK);
  1932. /* Set L3 offset */
  1933. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1934. MVPP2_ETH_TYPE_LEN,
  1935. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1936. /* Update shadow table and hw entry */
  1937. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1938. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1939. priv->prs_shadow[pe.index].finish = true;
  1940. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
  1941. MVPP2_PRS_RI_L3_PROTO_MASK);
  1942. mvpp2_prs_hw_write(priv, &pe);
  1943. /* Ethertype: LBTD */
  1944. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1945. MVPP2_PE_LAST_FREE_TID);
  1946. if (tid < 0)
  1947. return tid;
  1948. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1949. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1950. pe.index = tid;
  1951. mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
  1952. /* Generate flow in the next iteration*/
  1953. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1954. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1955. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1956. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1957. MVPP2_PRS_RI_CPU_CODE_MASK |
  1958. MVPP2_PRS_RI_UDF3_MASK);
  1959. /* Set L3 offset */
  1960. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1961. MVPP2_ETH_TYPE_LEN,
  1962. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1963. /* Update shadow table and hw entry */
  1964. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1965. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1966. priv->prs_shadow[pe.index].finish = true;
  1967. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1968. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1969. MVPP2_PRS_RI_CPU_CODE_MASK |
  1970. MVPP2_PRS_RI_UDF3_MASK);
  1971. mvpp2_prs_hw_write(priv, &pe);
  1972. /* Ethertype: IPv4 without options */
  1973. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1974. MVPP2_PE_LAST_FREE_TID);
  1975. if (tid < 0)
  1976. return tid;
  1977. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1978. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1979. pe.index = tid;
  1980. mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
  1981. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1982. MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
  1983. MVPP2_PRS_IPV4_HEAD_MASK |
  1984. MVPP2_PRS_IPV4_IHL_MASK);
  1985. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1986. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
  1987. MVPP2_PRS_RI_L3_PROTO_MASK);
  1988. /* Skip eth_type + 4 bytes of IP header */
  1989. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  1990. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1991. /* Set L3 offset */
  1992. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1993. MVPP2_ETH_TYPE_LEN,
  1994. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1995. /* Update shadow table and hw entry */
  1996. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1997. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1998. priv->prs_shadow[pe.index].finish = false;
  1999. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
  2000. MVPP2_PRS_RI_L3_PROTO_MASK);
  2001. mvpp2_prs_hw_write(priv, &pe);
  2002. /* Ethertype: IPv4 with options */
  2003. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2004. MVPP2_PE_LAST_FREE_TID);
  2005. if (tid < 0)
  2006. return tid;
  2007. pe.index = tid;
  2008. /* Clear tcam data before updating */
  2009. pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
  2010. pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
  2011. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  2012. MVPP2_PRS_IPV4_HEAD,
  2013. MVPP2_PRS_IPV4_HEAD_MASK);
  2014. /* Clear ri before updating */
  2015. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  2016. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  2017. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
  2018. MVPP2_PRS_RI_L3_PROTO_MASK);
  2019. /* Update shadow table and hw entry */
  2020. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  2021. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  2022. priv->prs_shadow[pe.index].finish = false;
  2023. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
  2024. MVPP2_PRS_RI_L3_PROTO_MASK);
  2025. mvpp2_prs_hw_write(priv, &pe);
  2026. /* Ethertype: IPv6 without options */
  2027. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2028. MVPP2_PE_LAST_FREE_TID);
  2029. if (tid < 0)
  2030. return tid;
  2031. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2032. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  2033. pe.index = tid;
  2034. mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
  2035. /* Skip DIP of IPV6 header */
  2036. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
  2037. MVPP2_MAX_L3_ADDR_SIZE,
  2038. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2039. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2040. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
  2041. MVPP2_PRS_RI_L3_PROTO_MASK);
  2042. /* Set L3 offset */
  2043. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2044. MVPP2_ETH_TYPE_LEN,
  2045. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2046. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  2047. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  2048. priv->prs_shadow[pe.index].finish = false;
  2049. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
  2050. MVPP2_PRS_RI_L3_PROTO_MASK);
  2051. mvpp2_prs_hw_write(priv, &pe);
  2052. /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
  2053. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2054. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  2055. pe.index = MVPP2_PE_ETH_TYPE_UN;
  2056. /* Unmask all ports */
  2057. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2058. /* Generate flow in the next iteration*/
  2059. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2060. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2061. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
  2062. MVPP2_PRS_RI_L3_PROTO_MASK);
  2063. /* Set L3 offset even it's unknown L3 */
  2064. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2065. MVPP2_ETH_TYPE_LEN,
  2066. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2067. /* Update shadow table and hw entry */
  2068. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  2069. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  2070. priv->prs_shadow[pe.index].finish = true;
  2071. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
  2072. MVPP2_PRS_RI_L3_PROTO_MASK);
  2073. mvpp2_prs_hw_write(priv, &pe);
  2074. return 0;
  2075. }
  2076. /* Configure vlan entries and detect up to 2 successive VLAN tags.
  2077. * Possible options:
  2078. * 0x8100, 0x88A8
  2079. * 0x8100, 0x8100
  2080. * 0x8100
  2081. * 0x88A8
  2082. */
  2083. static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
  2084. {
  2085. struct mvpp2_prs_entry pe;
  2086. int err;
  2087. priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
  2088. MVPP2_PRS_DBL_VLANS_MAX,
  2089. GFP_KERNEL);
  2090. if (!priv->prs_double_vlans)
  2091. return -ENOMEM;
  2092. /* Double VLAN: 0x8100, 0x88A8 */
  2093. err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
  2094. MVPP2_PRS_PORT_MASK);
  2095. if (err)
  2096. return err;
  2097. /* Double VLAN: 0x8100, 0x8100 */
  2098. err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
  2099. MVPP2_PRS_PORT_MASK);
  2100. if (err)
  2101. return err;
  2102. /* Single VLAN: 0x88a8 */
  2103. err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
  2104. MVPP2_PRS_PORT_MASK);
  2105. if (err)
  2106. return err;
  2107. /* Single VLAN: 0x8100 */
  2108. err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
  2109. MVPP2_PRS_PORT_MASK);
  2110. if (err)
  2111. return err;
  2112. /* Set default double vlan entry */
  2113. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2114. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  2115. pe.index = MVPP2_PE_VLAN_DBL;
  2116. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  2117. /* Clear ai for next iterations */
  2118. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  2119. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
  2120. MVPP2_PRS_RI_VLAN_MASK);
  2121. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
  2122. MVPP2_PRS_DBL_VLAN_AI_BIT);
  2123. /* Unmask all ports */
  2124. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2125. /* Update shadow table and hw entry */
  2126. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  2127. mvpp2_prs_hw_write(priv, &pe);
  2128. /* Set default vlan none entry */
  2129. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2130. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  2131. pe.index = MVPP2_PE_VLAN_NONE;
  2132. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  2133. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  2134. MVPP2_PRS_RI_VLAN_MASK);
  2135. /* Unmask all ports */
  2136. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2137. /* Update shadow table and hw entry */
  2138. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  2139. mvpp2_prs_hw_write(priv, &pe);
  2140. return 0;
  2141. }
  2142. /* Set entries for PPPoE ethertype */
  2143. static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
  2144. {
  2145. struct mvpp2_prs_entry pe;
  2146. int tid;
  2147. /* IPv4 over PPPoE with options */
  2148. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2149. MVPP2_PE_LAST_FREE_TID);
  2150. if (tid < 0)
  2151. return tid;
  2152. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2153. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  2154. pe.index = tid;
  2155. mvpp2_prs_match_etype(&pe, 0, PPP_IP);
  2156. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  2157. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
  2158. MVPP2_PRS_RI_L3_PROTO_MASK);
  2159. /* Skip eth_type + 4 bytes of IP header */
  2160. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  2161. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2162. /* Set L3 offset */
  2163. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2164. MVPP2_ETH_TYPE_LEN,
  2165. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2166. /* Update shadow table and hw entry */
  2167. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  2168. mvpp2_prs_hw_write(priv, &pe);
  2169. /* IPv4 over PPPoE without options */
  2170. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2171. MVPP2_PE_LAST_FREE_TID);
  2172. if (tid < 0)
  2173. return tid;
  2174. pe.index = tid;
  2175. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  2176. MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
  2177. MVPP2_PRS_IPV4_HEAD_MASK |
  2178. MVPP2_PRS_IPV4_IHL_MASK);
  2179. /* Clear ri before updating */
  2180. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  2181. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  2182. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
  2183. MVPP2_PRS_RI_L3_PROTO_MASK);
  2184. /* Update shadow table and hw entry */
  2185. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  2186. mvpp2_prs_hw_write(priv, &pe);
  2187. /* IPv6 over PPPoE */
  2188. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2189. MVPP2_PE_LAST_FREE_TID);
  2190. if (tid < 0)
  2191. return tid;
  2192. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2193. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  2194. pe.index = tid;
  2195. mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
  2196. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2197. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
  2198. MVPP2_PRS_RI_L3_PROTO_MASK);
  2199. /* Skip eth_type + 4 bytes of IPv6 header */
  2200. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  2201. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2202. /* Set L3 offset */
  2203. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2204. MVPP2_ETH_TYPE_LEN,
  2205. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2206. /* Update shadow table and hw entry */
  2207. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  2208. mvpp2_prs_hw_write(priv, &pe);
  2209. /* Non-IP over PPPoE */
  2210. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2211. MVPP2_PE_LAST_FREE_TID);
  2212. if (tid < 0)
  2213. return tid;
  2214. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2215. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  2216. pe.index = tid;
  2217. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
  2218. MVPP2_PRS_RI_L3_PROTO_MASK);
  2219. /* Finished: go to flowid generation */
  2220. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2221. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2222. /* Set L3 offset even if it's unknown L3 */
  2223. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2224. MVPP2_ETH_TYPE_LEN,
  2225. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2226. /* Update shadow table and hw entry */
  2227. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  2228. mvpp2_prs_hw_write(priv, &pe);
  2229. return 0;
  2230. }
  2231. /* Initialize entries for IPv4 */
  2232. static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
  2233. {
  2234. struct mvpp2_prs_entry pe;
  2235. int err;
  2236. /* Set entries for TCP, UDP and IGMP over IPv4 */
  2237. err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
  2238. MVPP2_PRS_RI_L4_PROTO_MASK);
  2239. if (err)
  2240. return err;
  2241. err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
  2242. MVPP2_PRS_RI_L4_PROTO_MASK);
  2243. if (err)
  2244. return err;
  2245. err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
  2246. MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  2247. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  2248. MVPP2_PRS_RI_CPU_CODE_MASK |
  2249. MVPP2_PRS_RI_UDF3_MASK);
  2250. if (err)
  2251. return err;
  2252. /* IPv4 Broadcast */
  2253. err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
  2254. if (err)
  2255. return err;
  2256. /* IPv4 Multicast */
  2257. err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
  2258. if (err)
  2259. return err;
  2260. /* Default IPv4 entry for unknown protocols */
  2261. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2262. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  2263. pe.index = MVPP2_PE_IP4_PROTO_UN;
  2264. /* Set next lu to IPv4 */
  2265. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  2266. mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2267. /* Set L4 offset */
  2268. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  2269. sizeof(struct iphdr) - 4,
  2270. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2271. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  2272. MVPP2_PRS_IPV4_DIP_AI_BIT);
  2273. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  2274. MVPP2_PRS_RI_L4_PROTO_MASK);
  2275. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
  2276. /* Unmask all ports */
  2277. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2278. /* Update shadow table and hw entry */
  2279. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2280. mvpp2_prs_hw_write(priv, &pe);
  2281. /* Default IPv4 entry for unicast address */
  2282. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2283. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  2284. pe.index = MVPP2_PE_IP4_ADDR_UN;
  2285. /* Finished: go to flowid generation */
  2286. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2287. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2288. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
  2289. MVPP2_PRS_RI_L3_ADDR_MASK);
  2290. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  2291. MVPP2_PRS_IPV4_DIP_AI_BIT);
  2292. /* Unmask all ports */
  2293. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2294. /* Update shadow table and hw entry */
  2295. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2296. mvpp2_prs_hw_write(priv, &pe);
  2297. return 0;
  2298. }
  2299. /* Initialize entries for IPv6 */
  2300. static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
  2301. {
  2302. struct mvpp2_prs_entry pe;
  2303. int tid, err;
  2304. /* Set entries for TCP, UDP and ICMP over IPv6 */
  2305. err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
  2306. MVPP2_PRS_RI_L4_TCP,
  2307. MVPP2_PRS_RI_L4_PROTO_MASK);
  2308. if (err)
  2309. return err;
  2310. err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
  2311. MVPP2_PRS_RI_L4_UDP,
  2312. MVPP2_PRS_RI_L4_PROTO_MASK);
  2313. if (err)
  2314. return err;
  2315. err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
  2316. MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  2317. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  2318. MVPP2_PRS_RI_CPU_CODE_MASK |
  2319. MVPP2_PRS_RI_UDF3_MASK);
  2320. if (err)
  2321. return err;
  2322. /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
  2323. /* Result Info: UDF7=1, DS lite */
  2324. err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
  2325. MVPP2_PRS_RI_UDF7_IP6_LITE,
  2326. MVPP2_PRS_RI_UDF7_MASK);
  2327. if (err)
  2328. return err;
  2329. /* IPv6 multicast */
  2330. err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
  2331. if (err)
  2332. return err;
  2333. /* Entry for checking hop limit */
  2334. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2335. MVPP2_PE_LAST_FREE_TID);
  2336. if (tid < 0)
  2337. return tid;
  2338. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2339. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2340. pe.index = tid;
  2341. /* Finished: go to flowid generation */
  2342. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2343. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2344. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
  2345. MVPP2_PRS_RI_DROP_MASK,
  2346. MVPP2_PRS_RI_L3_PROTO_MASK |
  2347. MVPP2_PRS_RI_DROP_MASK);
  2348. mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
  2349. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  2350. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  2351. /* Update shadow table and hw entry */
  2352. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2353. mvpp2_prs_hw_write(priv, &pe);
  2354. /* Default IPv6 entry for unknown protocols */
  2355. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2356. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2357. pe.index = MVPP2_PE_IP6_PROTO_UN;
  2358. /* Finished: go to flowid generation */
  2359. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2360. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2361. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  2362. MVPP2_PRS_RI_L4_PROTO_MASK);
  2363. /* Set L4 offset relatively to our current place */
  2364. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  2365. sizeof(struct ipv6hdr) - 4,
  2366. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2367. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  2368. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  2369. /* Unmask all ports */
  2370. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2371. /* Update shadow table and hw entry */
  2372. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2373. mvpp2_prs_hw_write(priv, &pe);
  2374. /* Default IPv6 entry for unknown ext protocols */
  2375. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2376. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2377. pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
  2378. /* Finished: go to flowid generation */
  2379. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2380. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2381. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  2382. MVPP2_PRS_RI_L4_PROTO_MASK);
  2383. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
  2384. MVPP2_PRS_IPV6_EXT_AI_BIT);
  2385. /* Unmask all ports */
  2386. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2387. /* Update shadow table and hw entry */
  2388. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2389. mvpp2_prs_hw_write(priv, &pe);
  2390. /* Default IPv6 entry for unicast address */
  2391. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2392. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2393. pe.index = MVPP2_PE_IP6_ADDR_UN;
  2394. /* Finished: go to IPv6 again */
  2395. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2396. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
  2397. MVPP2_PRS_RI_L3_ADDR_MASK);
  2398. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  2399. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  2400. /* Shift back to IPV6 NH */
  2401. mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2402. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  2403. /* Unmask all ports */
  2404. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2405. /* Update shadow table and hw entry */
  2406. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  2407. mvpp2_prs_hw_write(priv, &pe);
  2408. return 0;
  2409. }
  2410. /* Parser default initialization */
  2411. static int mvpp2_prs_default_init(struct platform_device *pdev,
  2412. struct mvpp2 *priv)
  2413. {
  2414. int err, index, i;
  2415. /* Enable tcam table */
  2416. mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
  2417. /* Clear all tcam and sram entries */
  2418. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
  2419. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  2420. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  2421. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
  2422. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
  2423. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  2424. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
  2425. }
  2426. /* Invalidate all tcam entries */
  2427. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
  2428. mvpp2_prs_hw_inv(priv, index);
  2429. priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
  2430. sizeof(struct mvpp2_prs_shadow),
  2431. GFP_KERNEL);
  2432. if (!priv->prs_shadow)
  2433. return -ENOMEM;
  2434. /* Always start from lookup = 0 */
  2435. for (index = 0; index < MVPP2_MAX_PORTS; index++)
  2436. mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
  2437. MVPP2_PRS_PORT_LU_MAX, 0);
  2438. mvpp2_prs_def_flow_init(priv);
  2439. mvpp2_prs_mh_init(priv);
  2440. mvpp2_prs_mac_init(priv);
  2441. mvpp2_prs_dsa_init(priv);
  2442. err = mvpp2_prs_etype_init(priv);
  2443. if (err)
  2444. return err;
  2445. err = mvpp2_prs_vlan_init(pdev, priv);
  2446. if (err)
  2447. return err;
  2448. err = mvpp2_prs_pppoe_init(priv);
  2449. if (err)
  2450. return err;
  2451. err = mvpp2_prs_ip6_init(priv);
  2452. if (err)
  2453. return err;
  2454. err = mvpp2_prs_ip4_init(priv);
  2455. if (err)
  2456. return err;
  2457. return 0;
  2458. }
  2459. /* Compare MAC DA with tcam entry data */
  2460. static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
  2461. const u8 *da, unsigned char *mask)
  2462. {
  2463. unsigned char tcam_byte, tcam_mask;
  2464. int index;
  2465. for (index = 0; index < ETH_ALEN; index++) {
  2466. mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
  2467. if (tcam_mask != mask[index])
  2468. return false;
  2469. if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
  2470. return false;
  2471. }
  2472. return true;
  2473. }
  2474. /* Find tcam entry with matched pair <MAC DA, port> */
  2475. static struct mvpp2_prs_entry *
  2476. mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
  2477. unsigned char *mask, int udf_type)
  2478. {
  2479. struct mvpp2_prs_entry *pe;
  2480. int tid;
  2481. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  2482. if (!pe)
  2483. return NULL;
  2484. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
  2485. /* Go through the all entires with MVPP2_PRS_LU_MAC */
  2486. for (tid = MVPP2_PE_FIRST_FREE_TID;
  2487. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  2488. unsigned int entry_pmap;
  2489. if (!priv->prs_shadow[tid].valid ||
  2490. (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
  2491. (priv->prs_shadow[tid].udf != udf_type))
  2492. continue;
  2493. pe->index = tid;
  2494. mvpp2_prs_hw_read(priv, pe);
  2495. entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
  2496. if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
  2497. entry_pmap == pmap)
  2498. return pe;
  2499. }
  2500. kfree(pe);
  2501. return NULL;
  2502. }
  2503. /* Update parser's mac da entry */
  2504. static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
  2505. const u8 *da, bool add)
  2506. {
  2507. struct mvpp2_prs_entry *pe;
  2508. unsigned int pmap, len, ri;
  2509. unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  2510. int tid;
  2511. /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
  2512. pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
  2513. MVPP2_PRS_UDF_MAC_DEF);
  2514. /* No such entry */
  2515. if (!pe) {
  2516. if (!add)
  2517. return 0;
  2518. /* Create new TCAM entry */
  2519. /* Find first range mac entry*/
  2520. for (tid = MVPP2_PE_FIRST_FREE_TID;
  2521. tid <= MVPP2_PE_LAST_FREE_TID; tid++)
  2522. if (priv->prs_shadow[tid].valid &&
  2523. (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
  2524. (priv->prs_shadow[tid].udf ==
  2525. MVPP2_PRS_UDF_MAC_RANGE))
  2526. break;
  2527. /* Go through the all entries from first to last */
  2528. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2529. tid - 1);
  2530. if (tid < 0)
  2531. return tid;
  2532. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  2533. if (!pe)
  2534. return -ENOMEM;
  2535. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
  2536. pe->index = tid;
  2537. /* Mask all ports */
  2538. mvpp2_prs_tcam_port_map_set(pe, 0);
  2539. }
  2540. /* Update port mask */
  2541. mvpp2_prs_tcam_port_set(pe, port, add);
  2542. /* Invalidate the entry if no ports are left enabled */
  2543. pmap = mvpp2_prs_tcam_port_map_get(pe);
  2544. if (pmap == 0) {
  2545. if (add) {
  2546. kfree(pe);
  2547. return -EINVAL;
  2548. }
  2549. mvpp2_prs_hw_inv(priv, pe->index);
  2550. priv->prs_shadow[pe->index].valid = false;
  2551. kfree(pe);
  2552. return 0;
  2553. }
  2554. /* Continue - set next lookup */
  2555. mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
  2556. /* Set match on DA */
  2557. len = ETH_ALEN;
  2558. while (len--)
  2559. mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
  2560. /* Set result info bits */
  2561. if (is_broadcast_ether_addr(da))
  2562. ri = MVPP2_PRS_RI_L2_BCAST;
  2563. else if (is_multicast_ether_addr(da))
  2564. ri = MVPP2_PRS_RI_L2_MCAST;
  2565. else
  2566. ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
  2567. mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  2568. MVPP2_PRS_RI_MAC_ME_MASK);
  2569. mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  2570. MVPP2_PRS_RI_MAC_ME_MASK);
  2571. /* Shift to ethertype */
  2572. mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
  2573. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2574. /* Update shadow table and hw entry */
  2575. priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
  2576. mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
  2577. mvpp2_prs_hw_write(priv, pe);
  2578. kfree(pe);
  2579. return 0;
  2580. }
  2581. static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
  2582. {
  2583. struct mvpp2_port *port = netdev_priv(dev);
  2584. int err;
  2585. /* Remove old parser entry */
  2586. err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
  2587. false);
  2588. if (err)
  2589. return err;
  2590. /* Add new parser entry */
  2591. err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
  2592. if (err)
  2593. return err;
  2594. /* Set addr in the device */
  2595. ether_addr_copy(dev->dev_addr, da);
  2596. return 0;
  2597. }
  2598. /* Delete all port's multicast simple (not range) entries */
  2599. static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
  2600. {
  2601. struct mvpp2_prs_entry pe;
  2602. int index, tid;
  2603. for (tid = MVPP2_PE_FIRST_FREE_TID;
  2604. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  2605. unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
  2606. if (!priv->prs_shadow[tid].valid ||
  2607. (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
  2608. (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
  2609. continue;
  2610. /* Only simple mac entries */
  2611. pe.index = tid;
  2612. mvpp2_prs_hw_read(priv, &pe);
  2613. /* Read mac addr from entry */
  2614. for (index = 0; index < ETH_ALEN; index++)
  2615. mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
  2616. &da_mask[index]);
  2617. if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
  2618. /* Delete this entry */
  2619. mvpp2_prs_mac_da_accept(priv, port, da, false);
  2620. }
  2621. }
  2622. static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
  2623. {
  2624. switch (type) {
  2625. case MVPP2_TAG_TYPE_EDSA:
  2626. /* Add port to EDSA entries */
  2627. mvpp2_prs_dsa_tag_set(priv, port, true,
  2628. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  2629. mvpp2_prs_dsa_tag_set(priv, port, true,
  2630. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  2631. /* Remove port from DSA entries */
  2632. mvpp2_prs_dsa_tag_set(priv, port, false,
  2633. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  2634. mvpp2_prs_dsa_tag_set(priv, port, false,
  2635. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  2636. break;
  2637. case MVPP2_TAG_TYPE_DSA:
  2638. /* Add port to DSA entries */
  2639. mvpp2_prs_dsa_tag_set(priv, port, true,
  2640. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  2641. mvpp2_prs_dsa_tag_set(priv, port, true,
  2642. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  2643. /* Remove port from EDSA entries */
  2644. mvpp2_prs_dsa_tag_set(priv, port, false,
  2645. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  2646. mvpp2_prs_dsa_tag_set(priv, port, false,
  2647. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  2648. break;
  2649. case MVPP2_TAG_TYPE_MH:
  2650. case MVPP2_TAG_TYPE_NONE:
  2651. /* Remove port form EDSA and DSA entries */
  2652. mvpp2_prs_dsa_tag_set(priv, port, false,
  2653. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  2654. mvpp2_prs_dsa_tag_set(priv, port, false,
  2655. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  2656. mvpp2_prs_dsa_tag_set(priv, port, false,
  2657. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  2658. mvpp2_prs_dsa_tag_set(priv, port, false,
  2659. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  2660. break;
  2661. default:
  2662. if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
  2663. return -EINVAL;
  2664. }
  2665. return 0;
  2666. }
  2667. /* Set prs flow for the port */
  2668. static int mvpp2_prs_def_flow(struct mvpp2_port *port)
  2669. {
  2670. struct mvpp2_prs_entry *pe;
  2671. int tid;
  2672. pe = mvpp2_prs_flow_find(port->priv, port->id);
  2673. /* Such entry not exist */
  2674. if (!pe) {
  2675. /* Go through the all entires from last to first */
  2676. tid = mvpp2_prs_tcam_first_free(port->priv,
  2677. MVPP2_PE_LAST_FREE_TID,
  2678. MVPP2_PE_FIRST_FREE_TID);
  2679. if (tid < 0)
  2680. return tid;
  2681. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  2682. if (!pe)
  2683. return -ENOMEM;
  2684. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
  2685. pe->index = tid;
  2686. /* Set flow ID*/
  2687. mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
  2688. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  2689. /* Update shadow table */
  2690. mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
  2691. }
  2692. mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
  2693. mvpp2_prs_hw_write(port->priv, pe);
  2694. kfree(pe);
  2695. return 0;
  2696. }
  2697. /* Classifier configuration routines */
  2698. /* Update classification flow table registers */
  2699. static void mvpp2_cls_flow_write(struct mvpp2 *priv,
  2700. struct mvpp2_cls_flow_entry *fe)
  2701. {
  2702. mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
  2703. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
  2704. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
  2705. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
  2706. }
  2707. /* Update classification lookup table register */
  2708. static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
  2709. struct mvpp2_cls_lookup_entry *le)
  2710. {
  2711. u32 val;
  2712. val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
  2713. mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
  2714. mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
  2715. }
  2716. /* Classifier default initialization */
  2717. static void mvpp2_cls_init(struct mvpp2 *priv)
  2718. {
  2719. struct mvpp2_cls_lookup_entry le;
  2720. struct mvpp2_cls_flow_entry fe;
  2721. int index;
  2722. /* Enable classifier */
  2723. mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
  2724. /* Clear classifier flow table */
  2725. memset(&fe.data, 0, sizeof(fe.data));
  2726. for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
  2727. fe.index = index;
  2728. mvpp2_cls_flow_write(priv, &fe);
  2729. }
  2730. /* Clear classifier lookup table */
  2731. le.data = 0;
  2732. for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
  2733. le.lkpid = index;
  2734. le.way = 0;
  2735. mvpp2_cls_lookup_write(priv, &le);
  2736. le.way = 1;
  2737. mvpp2_cls_lookup_write(priv, &le);
  2738. }
  2739. }
  2740. static void mvpp2_cls_port_config(struct mvpp2_port *port)
  2741. {
  2742. struct mvpp2_cls_lookup_entry le;
  2743. u32 val;
  2744. /* Set way for the port */
  2745. val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
  2746. val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
  2747. mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
  2748. /* Pick the entry to be accessed in lookup ID decoding table
  2749. * according to the way and lkpid.
  2750. */
  2751. le.lkpid = port->id;
  2752. le.way = 0;
  2753. le.data = 0;
  2754. /* Set initial CPU queue for receiving packets */
  2755. le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
  2756. le.data |= port->first_rxq;
  2757. /* Disable classification engines */
  2758. le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
  2759. /* Update lookup ID table entry */
  2760. mvpp2_cls_lookup_write(port->priv, &le);
  2761. }
  2762. /* Set CPU queue number for oversize packets */
  2763. static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
  2764. {
  2765. u32 val;
  2766. mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
  2767. port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
  2768. mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
  2769. (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
  2770. val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
  2771. val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
  2772. mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
  2773. }
  2774. /* Buffer Manager configuration routines */
  2775. /* Create pool */
  2776. static int mvpp2_bm_pool_create(struct platform_device *pdev,
  2777. struct mvpp2 *priv,
  2778. struct mvpp2_bm_pool *bm_pool, int size)
  2779. {
  2780. int size_bytes;
  2781. u32 val;
  2782. size_bytes = sizeof(u32) * size;
  2783. bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
  2784. &bm_pool->phys_addr,
  2785. GFP_KERNEL);
  2786. if (!bm_pool->virt_addr)
  2787. return -ENOMEM;
  2788. if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
  2789. dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
  2790. bm_pool->phys_addr);
  2791. dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
  2792. bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
  2793. return -ENOMEM;
  2794. }
  2795. mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
  2796. bm_pool->phys_addr);
  2797. mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
  2798. val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
  2799. val |= MVPP2_BM_START_MASK;
  2800. mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
  2801. bm_pool->type = MVPP2_BM_FREE;
  2802. bm_pool->size = size;
  2803. bm_pool->pkt_size = 0;
  2804. bm_pool->buf_num = 0;
  2805. atomic_set(&bm_pool->in_use, 0);
  2806. return 0;
  2807. }
  2808. /* Set pool buffer size */
  2809. static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
  2810. struct mvpp2_bm_pool *bm_pool,
  2811. int buf_size)
  2812. {
  2813. u32 val;
  2814. bm_pool->buf_size = buf_size;
  2815. val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
  2816. mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
  2817. }
  2818. /* Free all buffers from the pool */
  2819. static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
  2820. struct mvpp2_bm_pool *bm_pool)
  2821. {
  2822. int i;
  2823. for (i = 0; i < bm_pool->buf_num; i++) {
  2824. dma_addr_t buf_phys_addr;
  2825. u32 vaddr;
  2826. /* Get buffer virtual address (indirect access) */
  2827. buf_phys_addr = mvpp2_read(priv,
  2828. MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
  2829. vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
  2830. dma_unmap_single(dev, buf_phys_addr,
  2831. bm_pool->buf_size, DMA_FROM_DEVICE);
  2832. if (!vaddr)
  2833. break;
  2834. dev_kfree_skb_any((struct sk_buff *)vaddr);
  2835. }
  2836. /* Update BM driver with number of buffers removed from pool */
  2837. bm_pool->buf_num -= i;
  2838. }
  2839. /* Cleanup pool */
  2840. static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
  2841. struct mvpp2 *priv,
  2842. struct mvpp2_bm_pool *bm_pool)
  2843. {
  2844. u32 val;
  2845. mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
  2846. if (bm_pool->buf_num) {
  2847. WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
  2848. return 0;
  2849. }
  2850. val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
  2851. val |= MVPP2_BM_STOP_MASK;
  2852. mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
  2853. dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
  2854. bm_pool->virt_addr,
  2855. bm_pool->phys_addr);
  2856. return 0;
  2857. }
  2858. static int mvpp2_bm_pools_init(struct platform_device *pdev,
  2859. struct mvpp2 *priv)
  2860. {
  2861. int i, err, size;
  2862. struct mvpp2_bm_pool *bm_pool;
  2863. /* Create all pools with maximum size */
  2864. size = MVPP2_BM_POOL_SIZE_MAX;
  2865. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
  2866. bm_pool = &priv->bm_pools[i];
  2867. bm_pool->id = i;
  2868. err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
  2869. if (err)
  2870. goto err_unroll_pools;
  2871. mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
  2872. }
  2873. return 0;
  2874. err_unroll_pools:
  2875. dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
  2876. for (i = i - 1; i >= 0; i--)
  2877. mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
  2878. return err;
  2879. }
  2880. static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
  2881. {
  2882. int i, err;
  2883. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
  2884. /* Mask BM all interrupts */
  2885. mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
  2886. /* Clear BM cause register */
  2887. mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
  2888. }
  2889. /* Allocate and initialize BM pools */
  2890. priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
  2891. sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
  2892. if (!priv->bm_pools)
  2893. return -ENOMEM;
  2894. err = mvpp2_bm_pools_init(pdev, priv);
  2895. if (err < 0)
  2896. return err;
  2897. return 0;
  2898. }
  2899. /* Attach long pool to rxq */
  2900. static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
  2901. int lrxq, int long_pool)
  2902. {
  2903. u32 val;
  2904. int prxq;
  2905. /* Get queue physical ID */
  2906. prxq = port->rxqs[lrxq]->id;
  2907. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
  2908. val &= ~MVPP2_RXQ_POOL_LONG_MASK;
  2909. val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
  2910. MVPP2_RXQ_POOL_LONG_MASK);
  2911. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
  2912. }
  2913. /* Attach short pool to rxq */
  2914. static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
  2915. int lrxq, int short_pool)
  2916. {
  2917. u32 val;
  2918. int prxq;
  2919. /* Get queue physical ID */
  2920. prxq = port->rxqs[lrxq]->id;
  2921. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
  2922. val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
  2923. val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
  2924. MVPP2_RXQ_POOL_SHORT_MASK);
  2925. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
  2926. }
  2927. /* Allocate skb for BM pool */
  2928. static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
  2929. struct mvpp2_bm_pool *bm_pool,
  2930. dma_addr_t *buf_phys_addr,
  2931. gfp_t gfp_mask)
  2932. {
  2933. struct sk_buff *skb;
  2934. dma_addr_t phys_addr;
  2935. skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
  2936. if (!skb)
  2937. return NULL;
  2938. phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
  2939. MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
  2940. DMA_FROM_DEVICE);
  2941. if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
  2942. dev_kfree_skb_any(skb);
  2943. return NULL;
  2944. }
  2945. *buf_phys_addr = phys_addr;
  2946. return skb;
  2947. }
  2948. /* Set pool number in a BM cookie */
  2949. static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
  2950. {
  2951. u32 bm;
  2952. bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
  2953. bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
  2954. return bm;
  2955. }
  2956. /* Get pool number from a BM cookie */
  2957. static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
  2958. {
  2959. return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
  2960. }
  2961. /* Release buffer to BM */
  2962. static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
  2963. u32 buf_phys_addr, u32 buf_virt_addr)
  2964. {
  2965. mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
  2966. mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
  2967. }
  2968. /* Release multicast buffer */
  2969. static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
  2970. u32 buf_phys_addr, u32 buf_virt_addr,
  2971. int mc_id)
  2972. {
  2973. u32 val = 0;
  2974. val |= (mc_id & MVPP2_BM_MC_ID_MASK);
  2975. mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
  2976. mvpp2_bm_pool_put(port, pool,
  2977. buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
  2978. buf_virt_addr);
  2979. }
  2980. /* Refill BM pool */
  2981. static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
  2982. u32 phys_addr, u32 cookie)
  2983. {
  2984. int pool = mvpp2_bm_cookie_pool_get(bm);
  2985. mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
  2986. }
  2987. /* Allocate buffers for the pool */
  2988. static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
  2989. struct mvpp2_bm_pool *bm_pool, int buf_num)
  2990. {
  2991. struct sk_buff *skb;
  2992. int i, buf_size, total_size;
  2993. u32 bm;
  2994. dma_addr_t phys_addr;
  2995. buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
  2996. total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
  2997. if (buf_num < 0 ||
  2998. (buf_num + bm_pool->buf_num > bm_pool->size)) {
  2999. netdev_err(port->dev,
  3000. "cannot allocate %d buffers for pool %d\n",
  3001. buf_num, bm_pool->id);
  3002. return 0;
  3003. }
  3004. bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
  3005. for (i = 0; i < buf_num; i++) {
  3006. skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
  3007. if (!skb)
  3008. break;
  3009. mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
  3010. }
  3011. /* Update BM driver with number of buffers added to pool */
  3012. bm_pool->buf_num += i;
  3013. bm_pool->in_use_thresh = bm_pool->buf_num / 4;
  3014. netdev_dbg(port->dev,
  3015. "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
  3016. bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
  3017. bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
  3018. netdev_dbg(port->dev,
  3019. "%s pool %d: %d of %d buffers added\n",
  3020. bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
  3021. bm_pool->id, i, buf_num);
  3022. return i;
  3023. }
  3024. /* Notify the driver that BM pool is being used as specific type and return the
  3025. * pool pointer on success
  3026. */
  3027. static struct mvpp2_bm_pool *
  3028. mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
  3029. int pkt_size)
  3030. {
  3031. struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
  3032. int num;
  3033. if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
  3034. netdev_err(port->dev, "mixing pool types is forbidden\n");
  3035. return NULL;
  3036. }
  3037. if (new_pool->type == MVPP2_BM_FREE)
  3038. new_pool->type = type;
  3039. /* Allocate buffers in case BM pool is used as long pool, but packet
  3040. * size doesn't match MTU or BM pool hasn't being used yet
  3041. */
  3042. if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
  3043. (new_pool->pkt_size == 0)) {
  3044. int pkts_num;
  3045. /* Set default buffer number or free all the buffers in case
  3046. * the pool is not empty
  3047. */
  3048. pkts_num = new_pool->buf_num;
  3049. if (pkts_num == 0)
  3050. pkts_num = type == MVPP2_BM_SWF_LONG ?
  3051. MVPP2_BM_LONG_BUF_NUM :
  3052. MVPP2_BM_SHORT_BUF_NUM;
  3053. else
  3054. mvpp2_bm_bufs_free(port->dev->dev.parent,
  3055. port->priv, new_pool);
  3056. new_pool->pkt_size = pkt_size;
  3057. /* Allocate buffers for this pool */
  3058. num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
  3059. if (num != pkts_num) {
  3060. WARN(1, "pool %d: %d of %d allocated\n",
  3061. new_pool->id, num, pkts_num);
  3062. return NULL;
  3063. }
  3064. }
  3065. mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
  3066. MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
  3067. return new_pool;
  3068. }
  3069. /* Initialize pools for swf */
  3070. static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
  3071. {
  3072. int rxq;
  3073. if (!port->pool_long) {
  3074. port->pool_long =
  3075. mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
  3076. MVPP2_BM_SWF_LONG,
  3077. port->pkt_size);
  3078. if (!port->pool_long)
  3079. return -ENOMEM;
  3080. port->pool_long->port_map |= (1 << port->id);
  3081. for (rxq = 0; rxq < rxq_number; rxq++)
  3082. mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
  3083. }
  3084. if (!port->pool_short) {
  3085. port->pool_short =
  3086. mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
  3087. MVPP2_BM_SWF_SHORT,
  3088. MVPP2_BM_SHORT_PKT_SIZE);
  3089. if (!port->pool_short)
  3090. return -ENOMEM;
  3091. port->pool_short->port_map |= (1 << port->id);
  3092. for (rxq = 0; rxq < rxq_number; rxq++)
  3093. mvpp2_rxq_short_pool_set(port, rxq,
  3094. port->pool_short->id);
  3095. }
  3096. return 0;
  3097. }
  3098. static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
  3099. {
  3100. struct mvpp2_port *port = netdev_priv(dev);
  3101. struct mvpp2_bm_pool *port_pool = port->pool_long;
  3102. int num, pkts_num = port_pool->buf_num;
  3103. int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
  3104. /* Update BM pool with new buffer size */
  3105. mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
  3106. if (port_pool->buf_num) {
  3107. WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
  3108. return -EIO;
  3109. }
  3110. port_pool->pkt_size = pkt_size;
  3111. num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
  3112. if (num != pkts_num) {
  3113. WARN(1, "pool %d: %d of %d allocated\n",
  3114. port_pool->id, num, pkts_num);
  3115. return -EIO;
  3116. }
  3117. mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
  3118. MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
  3119. dev->mtu = mtu;
  3120. netdev_update_features(dev);
  3121. return 0;
  3122. }
  3123. static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
  3124. {
  3125. int cpu, cpu_mask = 0;
  3126. for_each_present_cpu(cpu)
  3127. cpu_mask |= 1 << cpu;
  3128. mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
  3129. MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
  3130. }
  3131. static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
  3132. {
  3133. int cpu, cpu_mask = 0;
  3134. for_each_present_cpu(cpu)
  3135. cpu_mask |= 1 << cpu;
  3136. mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
  3137. MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
  3138. }
  3139. /* Mask the current CPU's Rx/Tx interrupts */
  3140. static void mvpp2_interrupts_mask(void *arg)
  3141. {
  3142. struct mvpp2_port *port = arg;
  3143. mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
  3144. }
  3145. /* Unmask the current CPU's Rx/Tx interrupts */
  3146. static void mvpp2_interrupts_unmask(void *arg)
  3147. {
  3148. struct mvpp2_port *port = arg;
  3149. mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
  3150. (MVPP2_CAUSE_MISC_SUM_MASK |
  3151. MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
  3152. }
  3153. /* Port configuration routines */
  3154. static void mvpp2_port_mii_set(struct mvpp2_port *port)
  3155. {
  3156. u32 val;
  3157. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
  3158. switch (port->phy_interface) {
  3159. case PHY_INTERFACE_MODE_SGMII:
  3160. val |= MVPP2_GMAC_INBAND_AN_MASK;
  3161. break;
  3162. case PHY_INTERFACE_MODE_RGMII:
  3163. val |= MVPP2_GMAC_PORT_RGMII_MASK;
  3164. default:
  3165. val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
  3166. }
  3167. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  3168. }
  3169. static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
  3170. {
  3171. u32 val;
  3172. val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3173. val |= MVPP2_GMAC_FC_ADV_EN;
  3174. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3175. }
  3176. static void mvpp2_port_enable(struct mvpp2_port *port)
  3177. {
  3178. u32 val;
  3179. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  3180. val |= MVPP2_GMAC_PORT_EN_MASK;
  3181. val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
  3182. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  3183. }
  3184. static void mvpp2_port_disable(struct mvpp2_port *port)
  3185. {
  3186. u32 val;
  3187. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  3188. val &= ~(MVPP2_GMAC_PORT_EN_MASK);
  3189. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  3190. }
  3191. /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
  3192. static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
  3193. {
  3194. u32 val;
  3195. val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
  3196. ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
  3197. writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
  3198. }
  3199. /* Configure loopback port */
  3200. static void mvpp2_port_loopback_set(struct mvpp2_port *port)
  3201. {
  3202. u32 val;
  3203. val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
  3204. if (port->speed == 1000)
  3205. val |= MVPP2_GMAC_GMII_LB_EN_MASK;
  3206. else
  3207. val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
  3208. if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
  3209. val |= MVPP2_GMAC_PCS_LB_EN_MASK;
  3210. else
  3211. val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
  3212. writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
  3213. }
  3214. static void mvpp2_port_reset(struct mvpp2_port *port)
  3215. {
  3216. u32 val;
  3217. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
  3218. ~MVPP2_GMAC_PORT_RESET_MASK;
  3219. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  3220. while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
  3221. MVPP2_GMAC_PORT_RESET_MASK)
  3222. continue;
  3223. }
  3224. /* Change maximum receive size of the port */
  3225. static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
  3226. {
  3227. u32 val;
  3228. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  3229. val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
  3230. val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
  3231. MVPP2_GMAC_MAX_RX_SIZE_OFFS);
  3232. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  3233. }
  3234. /* Set defaults to the MVPP2 port */
  3235. static void mvpp2_defaults_set(struct mvpp2_port *port)
  3236. {
  3237. int tx_port_num, val, queue, ptxq, lrxq;
  3238. /* Configure port to loopback if needed */
  3239. if (port->flags & MVPP2_F_LOOPBACK)
  3240. mvpp2_port_loopback_set(port);
  3241. /* Update TX FIFO MIN Threshold */
  3242. val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  3243. val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
  3244. /* Min. TX threshold must be less than minimal packet length */
  3245. val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
  3246. writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  3247. /* Disable Legacy WRR, Disable EJP, Release from reset */
  3248. tx_port_num = mvpp2_egress_port(port);
  3249. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
  3250. tx_port_num);
  3251. mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
  3252. /* Close bandwidth for all queues */
  3253. for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
  3254. ptxq = mvpp2_txq_phys(port->id, queue);
  3255. mvpp2_write(port->priv,
  3256. MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
  3257. }
  3258. /* Set refill period to 1 usec, refill tokens
  3259. * and bucket size to maximum
  3260. */
  3261. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
  3262. port->priv->tclk / USEC_PER_SEC);
  3263. val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
  3264. val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
  3265. val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
  3266. val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
  3267. mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
  3268. val = MVPP2_TXP_TOKEN_SIZE_MAX;
  3269. mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
  3270. /* Set MaximumLowLatencyPacketSize value to 256 */
  3271. mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
  3272. MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
  3273. MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
  3274. /* Enable Rx cache snoop */
  3275. for (lrxq = 0; lrxq < rxq_number; lrxq++) {
  3276. queue = port->rxqs[lrxq]->id;
  3277. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
  3278. val |= MVPP2_SNOOP_PKT_SIZE_MASK |
  3279. MVPP2_SNOOP_BUF_HDR_MASK;
  3280. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
  3281. }
  3282. /* At default, mask all interrupts to all present cpus */
  3283. mvpp2_interrupts_disable(port);
  3284. }
  3285. /* Enable/disable receiving packets */
  3286. static void mvpp2_ingress_enable(struct mvpp2_port *port)
  3287. {
  3288. u32 val;
  3289. int lrxq, queue;
  3290. for (lrxq = 0; lrxq < rxq_number; lrxq++) {
  3291. queue = port->rxqs[lrxq]->id;
  3292. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
  3293. val &= ~MVPP2_RXQ_DISABLE_MASK;
  3294. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
  3295. }
  3296. }
  3297. static void mvpp2_ingress_disable(struct mvpp2_port *port)
  3298. {
  3299. u32 val;
  3300. int lrxq, queue;
  3301. for (lrxq = 0; lrxq < rxq_number; lrxq++) {
  3302. queue = port->rxqs[lrxq]->id;
  3303. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
  3304. val |= MVPP2_RXQ_DISABLE_MASK;
  3305. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
  3306. }
  3307. }
  3308. /* Enable transmit via physical egress queue
  3309. * - HW starts take descriptors from DRAM
  3310. */
  3311. static void mvpp2_egress_enable(struct mvpp2_port *port)
  3312. {
  3313. u32 qmap;
  3314. int queue;
  3315. int tx_port_num = mvpp2_egress_port(port);
  3316. /* Enable all initialized TXs. */
  3317. qmap = 0;
  3318. for (queue = 0; queue < txq_number; queue++) {
  3319. struct mvpp2_tx_queue *txq = port->txqs[queue];
  3320. if (txq->descs != NULL)
  3321. qmap |= (1 << queue);
  3322. }
  3323. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3324. mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
  3325. }
  3326. /* Disable transmit via physical egress queue
  3327. * - HW doesn't take descriptors from DRAM
  3328. */
  3329. static void mvpp2_egress_disable(struct mvpp2_port *port)
  3330. {
  3331. u32 reg_data;
  3332. int delay;
  3333. int tx_port_num = mvpp2_egress_port(port);
  3334. /* Issue stop command for active channels only */
  3335. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3336. reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
  3337. MVPP2_TXP_SCHED_ENQ_MASK;
  3338. if (reg_data != 0)
  3339. mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
  3340. (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
  3341. /* Wait for all Tx activity to terminate. */
  3342. delay = 0;
  3343. do {
  3344. if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
  3345. netdev_warn(port->dev,
  3346. "Tx stop timed out, status=0x%08x\n",
  3347. reg_data);
  3348. break;
  3349. }
  3350. mdelay(1);
  3351. delay++;
  3352. /* Check port TX Command register that all
  3353. * Tx queues are stopped
  3354. */
  3355. reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
  3356. } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
  3357. }
  3358. /* Rx descriptors helper methods */
  3359. /* Get number of Rx descriptors occupied by received packets */
  3360. static inline int
  3361. mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
  3362. {
  3363. u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
  3364. return val & MVPP2_RXQ_OCCUPIED_MASK;
  3365. }
  3366. /* Update Rx queue status with the number of occupied and available
  3367. * Rx descriptor slots.
  3368. */
  3369. static inline void
  3370. mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
  3371. int used_count, int free_count)
  3372. {
  3373. /* Decrement the number of used descriptors and increment count
  3374. * increment the number of free descriptors.
  3375. */
  3376. u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
  3377. mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
  3378. }
  3379. /* Get pointer to next RX descriptor to be processed by SW */
  3380. static inline struct mvpp2_rx_desc *
  3381. mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
  3382. {
  3383. int rx_desc = rxq->next_desc_to_proc;
  3384. rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
  3385. prefetch(rxq->descs + rxq->next_desc_to_proc);
  3386. return rxq->descs + rx_desc;
  3387. }
  3388. /* Set rx queue offset */
  3389. static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
  3390. int prxq, int offset)
  3391. {
  3392. u32 val;
  3393. /* Convert offset from bytes to units of 32 bytes */
  3394. offset = offset >> 5;
  3395. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
  3396. val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
  3397. /* Offset is in */
  3398. val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
  3399. MVPP2_RXQ_PACKET_OFFSET_MASK);
  3400. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
  3401. }
  3402. /* Obtain BM cookie information from descriptor */
  3403. static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
  3404. {
  3405. int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
  3406. MVPP2_RXD_BM_POOL_ID_OFFS;
  3407. int cpu = smp_processor_id();
  3408. return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
  3409. ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
  3410. }
  3411. /* Tx descriptors helper methods */
  3412. /* Get number of Tx descriptors waiting to be transmitted by HW */
  3413. static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
  3414. struct mvpp2_tx_queue *txq)
  3415. {
  3416. u32 val;
  3417. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3418. val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
  3419. return val & MVPP2_TXQ_PENDING_MASK;
  3420. }
  3421. /* Get pointer to next Tx descriptor to be processed (send) by HW */
  3422. static struct mvpp2_tx_desc *
  3423. mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
  3424. {
  3425. int tx_desc = txq->next_desc_to_proc;
  3426. txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
  3427. return txq->descs + tx_desc;
  3428. }
  3429. /* Update HW with number of aggregated Tx descriptors to be sent */
  3430. static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
  3431. {
  3432. /* aggregated access - relevant TXQ number is written in TX desc */
  3433. mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
  3434. }
  3435. /* Check if there are enough free descriptors in aggregated txq.
  3436. * If not, update the number of occupied descriptors and repeat the check.
  3437. */
  3438. static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
  3439. struct mvpp2_tx_queue *aggr_txq, int num)
  3440. {
  3441. if ((aggr_txq->count + num) > aggr_txq->size) {
  3442. /* Update number of occupied aggregated Tx descriptors */
  3443. int cpu = smp_processor_id();
  3444. u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
  3445. aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
  3446. }
  3447. if ((aggr_txq->count + num) > aggr_txq->size)
  3448. return -ENOMEM;
  3449. return 0;
  3450. }
  3451. /* Reserved Tx descriptors allocation request */
  3452. static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
  3453. struct mvpp2_tx_queue *txq, int num)
  3454. {
  3455. u32 val;
  3456. val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
  3457. mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
  3458. val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
  3459. return val & MVPP2_TXQ_RSVD_RSLT_MASK;
  3460. }
  3461. /* Check if there are enough reserved descriptors for transmission.
  3462. * If not, request chunk of reserved descriptors and check again.
  3463. */
  3464. static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
  3465. struct mvpp2_tx_queue *txq,
  3466. struct mvpp2_txq_pcpu *txq_pcpu,
  3467. int num)
  3468. {
  3469. int req, cpu, desc_count;
  3470. if (txq_pcpu->reserved_num >= num)
  3471. return 0;
  3472. /* Not enough descriptors reserved! Update the reserved descriptor
  3473. * count and check again.
  3474. */
  3475. desc_count = 0;
  3476. /* Compute total of used descriptors */
  3477. for_each_present_cpu(cpu) {
  3478. struct mvpp2_txq_pcpu *txq_pcpu_aux;
  3479. txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
  3480. desc_count += txq_pcpu_aux->count;
  3481. desc_count += txq_pcpu_aux->reserved_num;
  3482. }
  3483. req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
  3484. desc_count += req;
  3485. if (desc_count >
  3486. (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
  3487. return -ENOMEM;
  3488. txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
  3489. /* OK, the descriptor cound has been updated: check again. */
  3490. if (txq_pcpu->reserved_num < num)
  3491. return -ENOMEM;
  3492. return 0;
  3493. }
  3494. /* Release the last allocated Tx descriptor. Useful to handle DMA
  3495. * mapping failures in the Tx path.
  3496. */
  3497. static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
  3498. {
  3499. if (txq->next_desc_to_proc == 0)
  3500. txq->next_desc_to_proc = txq->last_desc - 1;
  3501. else
  3502. txq->next_desc_to_proc--;
  3503. }
  3504. /* Set Tx descriptors fields relevant for CSUM calculation */
  3505. static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
  3506. int ip_hdr_len, int l4_proto)
  3507. {
  3508. u32 command;
  3509. /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
  3510. * G_L4_chk, L4_type required only for checksum calculation
  3511. */
  3512. command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
  3513. command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
  3514. command |= MVPP2_TXD_IP_CSUM_DISABLE;
  3515. if (l3_proto == swab16(ETH_P_IP)) {
  3516. command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
  3517. command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
  3518. } else {
  3519. command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
  3520. }
  3521. if (l4_proto == IPPROTO_TCP) {
  3522. command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
  3523. command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
  3524. } else if (l4_proto == IPPROTO_UDP) {
  3525. command |= MVPP2_TXD_L4_UDP; /* enable UDP */
  3526. command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
  3527. } else {
  3528. command |= MVPP2_TXD_L4_CSUM_NOT;
  3529. }
  3530. return command;
  3531. }
  3532. /* Get number of sent descriptors and decrement counter.
  3533. * The number of sent descriptors is returned.
  3534. * Per-CPU access
  3535. */
  3536. static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
  3537. struct mvpp2_tx_queue *txq)
  3538. {
  3539. u32 val;
  3540. /* Reading status reg resets transmitted descriptor counter */
  3541. val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
  3542. return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
  3543. MVPP2_TRANSMITTED_COUNT_OFFSET;
  3544. }
  3545. static void mvpp2_txq_sent_counter_clear(void *arg)
  3546. {
  3547. struct mvpp2_port *port = arg;
  3548. int queue;
  3549. for (queue = 0; queue < txq_number; queue++) {
  3550. int id = port->txqs[queue]->id;
  3551. mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
  3552. }
  3553. }
  3554. /* Set max sizes for Tx queues */
  3555. static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
  3556. {
  3557. u32 val, size, mtu;
  3558. int txq, tx_port_num;
  3559. mtu = port->pkt_size * 8;
  3560. if (mtu > MVPP2_TXP_MTU_MAX)
  3561. mtu = MVPP2_TXP_MTU_MAX;
  3562. /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
  3563. mtu = 3 * mtu;
  3564. /* Indirect access to registers */
  3565. tx_port_num = mvpp2_egress_port(port);
  3566. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3567. /* Set MTU */
  3568. val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
  3569. val &= ~MVPP2_TXP_MTU_MAX;
  3570. val |= mtu;
  3571. mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
  3572. /* TXP token size and all TXQs token size must be larger that MTU */
  3573. val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
  3574. size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
  3575. if (size < mtu) {
  3576. size = mtu;
  3577. val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
  3578. val |= size;
  3579. mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
  3580. }
  3581. for (txq = 0; txq < txq_number; txq++) {
  3582. val = mvpp2_read(port->priv,
  3583. MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
  3584. size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
  3585. if (size < mtu) {
  3586. size = mtu;
  3587. val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
  3588. val |= size;
  3589. mvpp2_write(port->priv,
  3590. MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
  3591. val);
  3592. }
  3593. }
  3594. }
  3595. /* Set the number of packets that will be received before Rx interrupt
  3596. * will be generated by HW.
  3597. */
  3598. static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
  3599. struct mvpp2_rx_queue *rxq, u32 pkts)
  3600. {
  3601. u32 val;
  3602. val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
  3603. mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
  3604. mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
  3605. rxq->pkts_coal = pkts;
  3606. }
  3607. /* Set the time delay in usec before Rx interrupt */
  3608. static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
  3609. struct mvpp2_rx_queue *rxq, u32 usec)
  3610. {
  3611. u32 val;
  3612. val = (port->priv->tclk / USEC_PER_SEC) * usec;
  3613. mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
  3614. rxq->time_coal = usec;
  3615. }
  3616. /* Free Tx queue skbuffs */
  3617. static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
  3618. struct mvpp2_tx_queue *txq,
  3619. struct mvpp2_txq_pcpu *txq_pcpu, int num)
  3620. {
  3621. int i;
  3622. for (i = 0; i < num; i++) {
  3623. struct mvpp2_txq_pcpu_buf *tx_buf =
  3624. txq_pcpu->buffs + txq_pcpu->txq_get_index;
  3625. dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
  3626. tx_buf->size, DMA_TO_DEVICE);
  3627. if (tx_buf->skb)
  3628. dev_kfree_skb_any(tx_buf->skb);
  3629. mvpp2_txq_inc_get(txq_pcpu);
  3630. }
  3631. }
  3632. static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
  3633. u32 cause)
  3634. {
  3635. int queue = fls(cause) - 1;
  3636. return port->rxqs[queue];
  3637. }
  3638. static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
  3639. u32 cause)
  3640. {
  3641. int queue = fls(cause) - 1;
  3642. return port->txqs[queue];
  3643. }
  3644. /* Handle end of transmission */
  3645. static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
  3646. struct mvpp2_txq_pcpu *txq_pcpu)
  3647. {
  3648. struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
  3649. int tx_done;
  3650. if (txq_pcpu->cpu != smp_processor_id())
  3651. netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
  3652. tx_done = mvpp2_txq_sent_desc_proc(port, txq);
  3653. if (!tx_done)
  3654. return;
  3655. mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
  3656. txq_pcpu->count -= tx_done;
  3657. if (netif_tx_queue_stopped(nq))
  3658. if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
  3659. netif_tx_wake_queue(nq);
  3660. }
  3661. static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
  3662. {
  3663. struct mvpp2_tx_queue *txq;
  3664. struct mvpp2_txq_pcpu *txq_pcpu;
  3665. unsigned int tx_todo = 0;
  3666. while (cause) {
  3667. txq = mvpp2_get_tx_queue(port, cause);
  3668. if (!txq)
  3669. break;
  3670. txq_pcpu = this_cpu_ptr(txq->pcpu);
  3671. if (txq_pcpu->count) {
  3672. mvpp2_txq_done(port, txq, txq_pcpu);
  3673. tx_todo += txq_pcpu->count;
  3674. }
  3675. cause &= ~(1 << txq->log_id);
  3676. }
  3677. return tx_todo;
  3678. }
  3679. /* Rx/Tx queue initialization/cleanup methods */
  3680. /* Allocate and initialize descriptors for aggr TXQ */
  3681. static int mvpp2_aggr_txq_init(struct platform_device *pdev,
  3682. struct mvpp2_tx_queue *aggr_txq,
  3683. int desc_num, int cpu,
  3684. struct mvpp2 *priv)
  3685. {
  3686. /* Allocate memory for TX descriptors */
  3687. aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
  3688. desc_num * MVPP2_DESC_ALIGNED_SIZE,
  3689. &aggr_txq->descs_phys, GFP_KERNEL);
  3690. if (!aggr_txq->descs)
  3691. return -ENOMEM;
  3692. aggr_txq->last_desc = aggr_txq->size - 1;
  3693. /* Aggr TXQ no reset WA */
  3694. aggr_txq->next_desc_to_proc = mvpp2_read(priv,
  3695. MVPP2_AGGR_TXQ_INDEX_REG(cpu));
  3696. /* Set Tx descriptors queue starting address */
  3697. /* indirect access */
  3698. mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
  3699. aggr_txq->descs_phys);
  3700. mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
  3701. return 0;
  3702. }
  3703. /* Create a specified Rx queue */
  3704. static int mvpp2_rxq_init(struct mvpp2_port *port,
  3705. struct mvpp2_rx_queue *rxq)
  3706. {
  3707. rxq->size = port->rx_ring_size;
  3708. /* Allocate memory for RX descriptors */
  3709. rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
  3710. rxq->size * MVPP2_DESC_ALIGNED_SIZE,
  3711. &rxq->descs_phys, GFP_KERNEL);
  3712. if (!rxq->descs)
  3713. return -ENOMEM;
  3714. rxq->last_desc = rxq->size - 1;
  3715. /* Zero occupied and non-occupied counters - direct access */
  3716. mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
  3717. /* Set Rx descriptors queue starting address - indirect access */
  3718. mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
  3719. mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
  3720. mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
  3721. mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
  3722. /* Set Offset */
  3723. mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
  3724. /* Set coalescing pkts and time */
  3725. mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
  3726. mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
  3727. /* Add number of descriptors ready for receiving packets */
  3728. mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
  3729. return 0;
  3730. }
  3731. /* Push packets received by the RXQ to BM pool */
  3732. static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
  3733. struct mvpp2_rx_queue *rxq)
  3734. {
  3735. int rx_received, i;
  3736. rx_received = mvpp2_rxq_received(port, rxq->id);
  3737. if (!rx_received)
  3738. return;
  3739. for (i = 0; i < rx_received; i++) {
  3740. struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
  3741. u32 bm = mvpp2_bm_cookie_build(rx_desc);
  3742. mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
  3743. rx_desc->buf_cookie);
  3744. }
  3745. mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
  3746. }
  3747. /* Cleanup Rx queue */
  3748. static void mvpp2_rxq_deinit(struct mvpp2_port *port,
  3749. struct mvpp2_rx_queue *rxq)
  3750. {
  3751. mvpp2_rxq_drop_pkts(port, rxq);
  3752. if (rxq->descs)
  3753. dma_free_coherent(port->dev->dev.parent,
  3754. rxq->size * MVPP2_DESC_ALIGNED_SIZE,
  3755. rxq->descs,
  3756. rxq->descs_phys);
  3757. rxq->descs = NULL;
  3758. rxq->last_desc = 0;
  3759. rxq->next_desc_to_proc = 0;
  3760. rxq->descs_phys = 0;
  3761. /* Clear Rx descriptors queue starting address and size;
  3762. * free descriptor number
  3763. */
  3764. mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
  3765. mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
  3766. mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
  3767. mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
  3768. }
  3769. /* Create and initialize a Tx queue */
  3770. static int mvpp2_txq_init(struct mvpp2_port *port,
  3771. struct mvpp2_tx_queue *txq)
  3772. {
  3773. u32 val;
  3774. int cpu, desc, desc_per_txq, tx_port_num;
  3775. struct mvpp2_txq_pcpu *txq_pcpu;
  3776. txq->size = port->tx_ring_size;
  3777. /* Allocate memory for Tx descriptors */
  3778. txq->descs = dma_alloc_coherent(port->dev->dev.parent,
  3779. txq->size * MVPP2_DESC_ALIGNED_SIZE,
  3780. &txq->descs_phys, GFP_KERNEL);
  3781. if (!txq->descs)
  3782. return -ENOMEM;
  3783. txq->last_desc = txq->size - 1;
  3784. /* Set Tx descriptors queue starting address - indirect access */
  3785. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3786. mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
  3787. mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
  3788. MVPP2_TXQ_DESC_SIZE_MASK);
  3789. mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
  3790. mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
  3791. txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
  3792. val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
  3793. val &= ~MVPP2_TXQ_PENDING_MASK;
  3794. mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
  3795. /* Calculate base address in prefetch buffer. We reserve 16 descriptors
  3796. * for each existing TXQ.
  3797. * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
  3798. * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
  3799. */
  3800. desc_per_txq = 16;
  3801. desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
  3802. (txq->log_id * desc_per_txq);
  3803. mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
  3804. MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
  3805. MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
  3806. /* WRR / EJP configuration - indirect access */
  3807. tx_port_num = mvpp2_egress_port(port);
  3808. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3809. val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
  3810. val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
  3811. val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
  3812. val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
  3813. mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
  3814. val = MVPP2_TXQ_TOKEN_SIZE_MAX;
  3815. mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
  3816. val);
  3817. for_each_present_cpu(cpu) {
  3818. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3819. txq_pcpu->size = txq->size;
  3820. txq_pcpu->buffs = kmalloc(txq_pcpu->size *
  3821. sizeof(struct mvpp2_txq_pcpu_buf),
  3822. GFP_KERNEL);
  3823. if (!txq_pcpu->buffs)
  3824. goto error;
  3825. txq_pcpu->count = 0;
  3826. txq_pcpu->reserved_num = 0;
  3827. txq_pcpu->txq_put_index = 0;
  3828. txq_pcpu->txq_get_index = 0;
  3829. }
  3830. return 0;
  3831. error:
  3832. for_each_present_cpu(cpu) {
  3833. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3834. kfree(txq_pcpu->buffs);
  3835. }
  3836. dma_free_coherent(port->dev->dev.parent,
  3837. txq->size * MVPP2_DESC_ALIGNED_SIZE,
  3838. txq->descs, txq->descs_phys);
  3839. return -ENOMEM;
  3840. }
  3841. /* Free allocated TXQ resources */
  3842. static void mvpp2_txq_deinit(struct mvpp2_port *port,
  3843. struct mvpp2_tx_queue *txq)
  3844. {
  3845. struct mvpp2_txq_pcpu *txq_pcpu;
  3846. int cpu;
  3847. for_each_present_cpu(cpu) {
  3848. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3849. kfree(txq_pcpu->buffs);
  3850. }
  3851. if (txq->descs)
  3852. dma_free_coherent(port->dev->dev.parent,
  3853. txq->size * MVPP2_DESC_ALIGNED_SIZE,
  3854. txq->descs, txq->descs_phys);
  3855. txq->descs = NULL;
  3856. txq->last_desc = 0;
  3857. txq->next_desc_to_proc = 0;
  3858. txq->descs_phys = 0;
  3859. /* Set minimum bandwidth for disabled TXQs */
  3860. mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
  3861. /* Set Tx descriptors queue starting address and size */
  3862. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3863. mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
  3864. mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
  3865. }
  3866. /* Cleanup Tx ports */
  3867. static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
  3868. {
  3869. struct mvpp2_txq_pcpu *txq_pcpu;
  3870. int delay, pending, cpu;
  3871. u32 val;
  3872. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3873. val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
  3874. val |= MVPP2_TXQ_DRAIN_EN_MASK;
  3875. mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
  3876. /* The napi queue has been stopped so wait for all packets
  3877. * to be transmitted.
  3878. */
  3879. delay = 0;
  3880. do {
  3881. if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
  3882. netdev_warn(port->dev,
  3883. "port %d: cleaning queue %d timed out\n",
  3884. port->id, txq->log_id);
  3885. break;
  3886. }
  3887. mdelay(1);
  3888. delay++;
  3889. pending = mvpp2_txq_pend_desc_num_get(port, txq);
  3890. } while (pending);
  3891. val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
  3892. mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
  3893. for_each_present_cpu(cpu) {
  3894. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3895. /* Release all packets */
  3896. mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
  3897. /* Reset queue */
  3898. txq_pcpu->count = 0;
  3899. txq_pcpu->txq_put_index = 0;
  3900. txq_pcpu->txq_get_index = 0;
  3901. }
  3902. }
  3903. /* Cleanup all Tx queues */
  3904. static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
  3905. {
  3906. struct mvpp2_tx_queue *txq;
  3907. int queue;
  3908. u32 val;
  3909. val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
  3910. /* Reset Tx ports and delete Tx queues */
  3911. val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
  3912. mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
  3913. for (queue = 0; queue < txq_number; queue++) {
  3914. txq = port->txqs[queue];
  3915. mvpp2_txq_clean(port, txq);
  3916. mvpp2_txq_deinit(port, txq);
  3917. }
  3918. on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
  3919. val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
  3920. mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
  3921. }
  3922. /* Cleanup all Rx queues */
  3923. static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
  3924. {
  3925. int queue;
  3926. for (queue = 0; queue < rxq_number; queue++)
  3927. mvpp2_rxq_deinit(port, port->rxqs[queue]);
  3928. }
  3929. /* Init all Rx queues for port */
  3930. static int mvpp2_setup_rxqs(struct mvpp2_port *port)
  3931. {
  3932. int queue, err;
  3933. for (queue = 0; queue < rxq_number; queue++) {
  3934. err = mvpp2_rxq_init(port, port->rxqs[queue]);
  3935. if (err)
  3936. goto err_cleanup;
  3937. }
  3938. return 0;
  3939. err_cleanup:
  3940. mvpp2_cleanup_rxqs(port);
  3941. return err;
  3942. }
  3943. /* Init all tx queues for port */
  3944. static int mvpp2_setup_txqs(struct mvpp2_port *port)
  3945. {
  3946. struct mvpp2_tx_queue *txq;
  3947. int queue, err;
  3948. for (queue = 0; queue < txq_number; queue++) {
  3949. txq = port->txqs[queue];
  3950. err = mvpp2_txq_init(port, txq);
  3951. if (err)
  3952. goto err_cleanup;
  3953. }
  3954. on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
  3955. return 0;
  3956. err_cleanup:
  3957. mvpp2_cleanup_txqs(port);
  3958. return err;
  3959. }
  3960. /* The callback for per-port interrupt */
  3961. static irqreturn_t mvpp2_isr(int irq, void *dev_id)
  3962. {
  3963. struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
  3964. mvpp2_interrupts_disable(port);
  3965. napi_schedule(&port->napi);
  3966. return IRQ_HANDLED;
  3967. }
  3968. /* Adjust link */
  3969. static void mvpp2_link_event(struct net_device *dev)
  3970. {
  3971. struct mvpp2_port *port = netdev_priv(dev);
  3972. struct phy_device *phydev = dev->phydev;
  3973. int status_change = 0;
  3974. u32 val;
  3975. if (phydev->link) {
  3976. if ((port->speed != phydev->speed) ||
  3977. (port->duplex != phydev->duplex)) {
  3978. u32 val;
  3979. val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3980. val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
  3981. MVPP2_GMAC_CONFIG_GMII_SPEED |
  3982. MVPP2_GMAC_CONFIG_FULL_DUPLEX |
  3983. MVPP2_GMAC_AN_SPEED_EN |
  3984. MVPP2_GMAC_AN_DUPLEX_EN);
  3985. if (phydev->duplex)
  3986. val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
  3987. if (phydev->speed == SPEED_1000)
  3988. val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
  3989. else if (phydev->speed == SPEED_100)
  3990. val |= MVPP2_GMAC_CONFIG_MII_SPEED;
  3991. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3992. port->duplex = phydev->duplex;
  3993. port->speed = phydev->speed;
  3994. }
  3995. }
  3996. if (phydev->link != port->link) {
  3997. if (!phydev->link) {
  3998. port->duplex = -1;
  3999. port->speed = 0;
  4000. }
  4001. port->link = phydev->link;
  4002. status_change = 1;
  4003. }
  4004. if (status_change) {
  4005. if (phydev->link) {
  4006. val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  4007. val |= (MVPP2_GMAC_FORCE_LINK_PASS |
  4008. MVPP2_GMAC_FORCE_LINK_DOWN);
  4009. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  4010. mvpp2_egress_enable(port);
  4011. mvpp2_ingress_enable(port);
  4012. } else {
  4013. mvpp2_ingress_disable(port);
  4014. mvpp2_egress_disable(port);
  4015. }
  4016. phy_print_status(phydev);
  4017. }
  4018. }
  4019. static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
  4020. {
  4021. ktime_t interval;
  4022. if (!port_pcpu->timer_scheduled) {
  4023. port_pcpu->timer_scheduled = true;
  4024. interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
  4025. hrtimer_start(&port_pcpu->tx_done_timer, interval,
  4026. HRTIMER_MODE_REL_PINNED);
  4027. }
  4028. }
  4029. static void mvpp2_tx_proc_cb(unsigned long data)
  4030. {
  4031. struct net_device *dev = (struct net_device *)data;
  4032. struct mvpp2_port *port = netdev_priv(dev);
  4033. struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
  4034. unsigned int tx_todo, cause;
  4035. if (!netif_running(dev))
  4036. return;
  4037. port_pcpu->timer_scheduled = false;
  4038. /* Process all the Tx queues */
  4039. cause = (1 << txq_number) - 1;
  4040. tx_todo = mvpp2_tx_done(port, cause);
  4041. /* Set the timer in case not all the packets were processed */
  4042. if (tx_todo)
  4043. mvpp2_timer_set(port_pcpu);
  4044. }
  4045. static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
  4046. {
  4047. struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
  4048. struct mvpp2_port_pcpu,
  4049. tx_done_timer);
  4050. tasklet_schedule(&port_pcpu->tx_done_tasklet);
  4051. return HRTIMER_NORESTART;
  4052. }
  4053. /* Main RX/TX processing routines */
  4054. /* Display more error info */
  4055. static void mvpp2_rx_error(struct mvpp2_port *port,
  4056. struct mvpp2_rx_desc *rx_desc)
  4057. {
  4058. u32 status = rx_desc->status;
  4059. switch (status & MVPP2_RXD_ERR_CODE_MASK) {
  4060. case MVPP2_RXD_ERR_CRC:
  4061. netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
  4062. status, rx_desc->data_size);
  4063. break;
  4064. case MVPP2_RXD_ERR_OVERRUN:
  4065. netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
  4066. status, rx_desc->data_size);
  4067. break;
  4068. case MVPP2_RXD_ERR_RESOURCE:
  4069. netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
  4070. status, rx_desc->data_size);
  4071. break;
  4072. }
  4073. }
  4074. /* Handle RX checksum offload */
  4075. static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
  4076. struct sk_buff *skb)
  4077. {
  4078. if (((status & MVPP2_RXD_L3_IP4) &&
  4079. !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
  4080. (status & MVPP2_RXD_L3_IP6))
  4081. if (((status & MVPP2_RXD_L4_UDP) ||
  4082. (status & MVPP2_RXD_L4_TCP)) &&
  4083. (status & MVPP2_RXD_L4_CSUM_OK)) {
  4084. skb->csum = 0;
  4085. skb->ip_summed = CHECKSUM_UNNECESSARY;
  4086. return;
  4087. }
  4088. skb->ip_summed = CHECKSUM_NONE;
  4089. }
  4090. /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
  4091. static int mvpp2_rx_refill(struct mvpp2_port *port,
  4092. struct mvpp2_bm_pool *bm_pool,
  4093. u32 bm, int is_recycle)
  4094. {
  4095. struct sk_buff *skb;
  4096. dma_addr_t phys_addr;
  4097. if (is_recycle &&
  4098. (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
  4099. return 0;
  4100. /* No recycle or too many buffers are in use, so allocate a new skb */
  4101. skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
  4102. if (!skb)
  4103. return -ENOMEM;
  4104. mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
  4105. atomic_dec(&bm_pool->in_use);
  4106. return 0;
  4107. }
  4108. /* Handle tx checksum */
  4109. static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
  4110. {
  4111. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  4112. int ip_hdr_len = 0;
  4113. u8 l4_proto;
  4114. if (skb->protocol == htons(ETH_P_IP)) {
  4115. struct iphdr *ip4h = ip_hdr(skb);
  4116. /* Calculate IPv4 checksum and L4 checksum */
  4117. ip_hdr_len = ip4h->ihl;
  4118. l4_proto = ip4h->protocol;
  4119. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  4120. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  4121. /* Read l4_protocol from one of IPv6 extra headers */
  4122. if (skb_network_header_len(skb) > 0)
  4123. ip_hdr_len = (skb_network_header_len(skb) >> 2);
  4124. l4_proto = ip6h->nexthdr;
  4125. } else {
  4126. return MVPP2_TXD_L4_CSUM_NOT;
  4127. }
  4128. return mvpp2_txq_desc_csum(skb_network_offset(skb),
  4129. skb->protocol, ip_hdr_len, l4_proto);
  4130. }
  4131. return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
  4132. }
  4133. static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
  4134. struct mvpp2_rx_desc *rx_desc)
  4135. {
  4136. struct mvpp2_buff_hdr *buff_hdr;
  4137. struct sk_buff *skb;
  4138. u32 rx_status = rx_desc->status;
  4139. u32 buff_phys_addr;
  4140. u32 buff_virt_addr;
  4141. u32 buff_phys_addr_next;
  4142. u32 buff_virt_addr_next;
  4143. int mc_id;
  4144. int pool_id;
  4145. pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
  4146. MVPP2_RXD_BM_POOL_ID_OFFS;
  4147. buff_phys_addr = rx_desc->buf_phys_addr;
  4148. buff_virt_addr = rx_desc->buf_cookie;
  4149. do {
  4150. skb = (struct sk_buff *)buff_virt_addr;
  4151. buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
  4152. mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
  4153. buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
  4154. buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
  4155. /* Release buffer */
  4156. mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
  4157. buff_virt_addr, mc_id);
  4158. buff_phys_addr = buff_phys_addr_next;
  4159. buff_virt_addr = buff_virt_addr_next;
  4160. } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
  4161. }
  4162. /* Main rx processing */
  4163. static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
  4164. struct mvpp2_rx_queue *rxq)
  4165. {
  4166. struct net_device *dev = port->dev;
  4167. int rx_received;
  4168. int rx_done = 0;
  4169. u32 rcvd_pkts = 0;
  4170. u32 rcvd_bytes = 0;
  4171. /* Get number of received packets and clamp the to-do */
  4172. rx_received = mvpp2_rxq_received(port, rxq->id);
  4173. if (rx_todo > rx_received)
  4174. rx_todo = rx_received;
  4175. while (rx_done < rx_todo) {
  4176. struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
  4177. struct mvpp2_bm_pool *bm_pool;
  4178. struct sk_buff *skb;
  4179. dma_addr_t phys_addr;
  4180. u32 bm, rx_status;
  4181. int pool, rx_bytes, err;
  4182. rx_done++;
  4183. rx_status = rx_desc->status;
  4184. rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
  4185. phys_addr = rx_desc->buf_phys_addr;
  4186. bm = mvpp2_bm_cookie_build(rx_desc);
  4187. pool = mvpp2_bm_cookie_pool_get(bm);
  4188. bm_pool = &port->priv->bm_pools[pool];
  4189. /* Check if buffer header is used */
  4190. if (rx_status & MVPP2_RXD_BUF_HDR) {
  4191. mvpp2_buff_hdr_rx(port, rx_desc);
  4192. continue;
  4193. }
  4194. /* In case of an error, release the requested buffer pointer
  4195. * to the Buffer Manager. This request process is controlled
  4196. * by the hardware, and the information about the buffer is
  4197. * comprised by the RX descriptor.
  4198. */
  4199. if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
  4200. err_drop_frame:
  4201. dev->stats.rx_errors++;
  4202. mvpp2_rx_error(port, rx_desc);
  4203. /* Return the buffer to the pool */
  4204. mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
  4205. rx_desc->buf_cookie);
  4206. continue;
  4207. }
  4208. skb = (struct sk_buff *)rx_desc->buf_cookie;
  4209. err = mvpp2_rx_refill(port, bm_pool, bm, 0);
  4210. if (err) {
  4211. netdev_err(port->dev, "failed to refill BM pools\n");
  4212. goto err_drop_frame;
  4213. }
  4214. dma_unmap_single(dev->dev.parent, phys_addr,
  4215. bm_pool->buf_size, DMA_FROM_DEVICE);
  4216. rcvd_pkts++;
  4217. rcvd_bytes += rx_bytes;
  4218. atomic_inc(&bm_pool->in_use);
  4219. skb_reserve(skb, MVPP2_MH_SIZE);
  4220. skb_put(skb, rx_bytes);
  4221. skb->protocol = eth_type_trans(skb, dev);
  4222. mvpp2_rx_csum(port, rx_status, skb);
  4223. napi_gro_receive(&port->napi, skb);
  4224. }
  4225. if (rcvd_pkts) {
  4226. struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
  4227. u64_stats_update_begin(&stats->syncp);
  4228. stats->rx_packets += rcvd_pkts;
  4229. stats->rx_bytes += rcvd_bytes;
  4230. u64_stats_update_end(&stats->syncp);
  4231. }
  4232. /* Update Rx queue management counters */
  4233. wmb();
  4234. mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
  4235. return rx_todo;
  4236. }
  4237. static inline void
  4238. tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
  4239. struct mvpp2_tx_desc *desc)
  4240. {
  4241. dma_unmap_single(dev, desc->buf_phys_addr,
  4242. desc->data_size, DMA_TO_DEVICE);
  4243. mvpp2_txq_desc_put(txq);
  4244. }
  4245. /* Handle tx fragmentation processing */
  4246. static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
  4247. struct mvpp2_tx_queue *aggr_txq,
  4248. struct mvpp2_tx_queue *txq)
  4249. {
  4250. struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
  4251. struct mvpp2_tx_desc *tx_desc;
  4252. int i;
  4253. dma_addr_t buf_phys_addr;
  4254. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  4255. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  4256. void *addr = page_address(frag->page.p) + frag->page_offset;
  4257. tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
  4258. tx_desc->phys_txq = txq->id;
  4259. tx_desc->data_size = frag->size;
  4260. buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
  4261. tx_desc->data_size,
  4262. DMA_TO_DEVICE);
  4263. if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
  4264. mvpp2_txq_desc_put(txq);
  4265. goto error;
  4266. }
  4267. tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
  4268. tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
  4269. if (i == (skb_shinfo(skb)->nr_frags - 1)) {
  4270. /* Last descriptor */
  4271. tx_desc->command = MVPP2_TXD_L_DESC;
  4272. mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
  4273. } else {
  4274. /* Descriptor in the middle: Not First, Not Last */
  4275. tx_desc->command = 0;
  4276. mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
  4277. }
  4278. }
  4279. return 0;
  4280. error:
  4281. /* Release all descriptors that were used to map fragments of
  4282. * this packet, as well as the corresponding DMA mappings
  4283. */
  4284. for (i = i - 1; i >= 0; i--) {
  4285. tx_desc = txq->descs + i;
  4286. tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
  4287. }
  4288. return -ENOMEM;
  4289. }
  4290. /* Main tx processing */
  4291. static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
  4292. {
  4293. struct mvpp2_port *port = netdev_priv(dev);
  4294. struct mvpp2_tx_queue *txq, *aggr_txq;
  4295. struct mvpp2_txq_pcpu *txq_pcpu;
  4296. struct mvpp2_tx_desc *tx_desc;
  4297. dma_addr_t buf_phys_addr;
  4298. int frags = 0;
  4299. u16 txq_id;
  4300. u32 tx_cmd;
  4301. txq_id = skb_get_queue_mapping(skb);
  4302. txq = port->txqs[txq_id];
  4303. txq_pcpu = this_cpu_ptr(txq->pcpu);
  4304. aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
  4305. frags = skb_shinfo(skb)->nr_frags + 1;
  4306. /* Check number of available descriptors */
  4307. if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
  4308. mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
  4309. txq_pcpu, frags)) {
  4310. frags = 0;
  4311. goto out;
  4312. }
  4313. /* Get a descriptor for the first part of the packet */
  4314. tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
  4315. tx_desc->phys_txq = txq->id;
  4316. tx_desc->data_size = skb_headlen(skb);
  4317. buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
  4318. tx_desc->data_size, DMA_TO_DEVICE);
  4319. if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
  4320. mvpp2_txq_desc_put(txq);
  4321. frags = 0;
  4322. goto out;
  4323. }
  4324. tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
  4325. tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
  4326. tx_cmd = mvpp2_skb_tx_csum(port, skb);
  4327. if (frags == 1) {
  4328. /* First and Last descriptor */
  4329. tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
  4330. tx_desc->command = tx_cmd;
  4331. mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
  4332. } else {
  4333. /* First but not Last */
  4334. tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
  4335. tx_desc->command = tx_cmd;
  4336. mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
  4337. /* Continue with other skb fragments */
  4338. if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
  4339. tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
  4340. frags = 0;
  4341. goto out;
  4342. }
  4343. }
  4344. txq_pcpu->reserved_num -= frags;
  4345. txq_pcpu->count += frags;
  4346. aggr_txq->count += frags;
  4347. /* Enable transmit */
  4348. wmb();
  4349. mvpp2_aggr_txq_pend_desc_add(port, frags);
  4350. if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
  4351. struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
  4352. netif_tx_stop_queue(nq);
  4353. }
  4354. out:
  4355. if (frags > 0) {
  4356. struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
  4357. u64_stats_update_begin(&stats->syncp);
  4358. stats->tx_packets++;
  4359. stats->tx_bytes += skb->len;
  4360. u64_stats_update_end(&stats->syncp);
  4361. } else {
  4362. dev->stats.tx_dropped++;
  4363. dev_kfree_skb_any(skb);
  4364. }
  4365. /* Finalize TX processing */
  4366. if (txq_pcpu->count >= txq->done_pkts_coal)
  4367. mvpp2_txq_done(port, txq, txq_pcpu);
  4368. /* Set the timer in case not all frags were processed */
  4369. if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
  4370. struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
  4371. mvpp2_timer_set(port_pcpu);
  4372. }
  4373. return NETDEV_TX_OK;
  4374. }
  4375. static inline void mvpp2_cause_error(struct net_device *dev, int cause)
  4376. {
  4377. if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
  4378. netdev_err(dev, "FCS error\n");
  4379. if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
  4380. netdev_err(dev, "rx fifo overrun error\n");
  4381. if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
  4382. netdev_err(dev, "tx fifo underrun error\n");
  4383. }
  4384. static int mvpp2_poll(struct napi_struct *napi, int budget)
  4385. {
  4386. u32 cause_rx_tx, cause_rx, cause_misc;
  4387. int rx_done = 0;
  4388. struct mvpp2_port *port = netdev_priv(napi->dev);
  4389. /* Rx/Tx cause register
  4390. *
  4391. * Bits 0-15: each bit indicates received packets on the Rx queue
  4392. * (bit 0 is for Rx queue 0).
  4393. *
  4394. * Bits 16-23: each bit indicates transmitted packets on the Tx queue
  4395. * (bit 16 is for Tx queue 0).
  4396. *
  4397. * Each CPU has its own Rx/Tx cause register
  4398. */
  4399. cause_rx_tx = mvpp2_read(port->priv,
  4400. MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
  4401. cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
  4402. cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
  4403. if (cause_misc) {
  4404. mvpp2_cause_error(port->dev, cause_misc);
  4405. /* Clear the cause register */
  4406. mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
  4407. mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
  4408. cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
  4409. }
  4410. cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
  4411. /* Process RX packets */
  4412. cause_rx |= port->pending_cause_rx;
  4413. while (cause_rx && budget > 0) {
  4414. int count;
  4415. struct mvpp2_rx_queue *rxq;
  4416. rxq = mvpp2_get_rx_queue(port, cause_rx);
  4417. if (!rxq)
  4418. break;
  4419. count = mvpp2_rx(port, budget, rxq);
  4420. rx_done += count;
  4421. budget -= count;
  4422. if (budget > 0) {
  4423. /* Clear the bit associated to this Rx queue
  4424. * so that next iteration will continue from
  4425. * the next Rx queue.
  4426. */
  4427. cause_rx &= ~(1 << rxq->logic_rxq);
  4428. }
  4429. }
  4430. if (budget > 0) {
  4431. cause_rx = 0;
  4432. napi_complete(napi);
  4433. mvpp2_interrupts_enable(port);
  4434. }
  4435. port->pending_cause_rx = cause_rx;
  4436. return rx_done;
  4437. }
  4438. /* Set hw internals when starting port */
  4439. static void mvpp2_start_dev(struct mvpp2_port *port)
  4440. {
  4441. struct net_device *ndev = port->dev;
  4442. mvpp2_gmac_max_rx_size_set(port);
  4443. mvpp2_txp_max_tx_size_set(port);
  4444. napi_enable(&port->napi);
  4445. /* Enable interrupts on all CPUs */
  4446. mvpp2_interrupts_enable(port);
  4447. mvpp2_port_enable(port);
  4448. phy_start(ndev->phydev);
  4449. netif_tx_start_all_queues(port->dev);
  4450. }
  4451. /* Set hw internals when stopping port */
  4452. static void mvpp2_stop_dev(struct mvpp2_port *port)
  4453. {
  4454. struct net_device *ndev = port->dev;
  4455. /* Stop new packets from arriving to RXQs */
  4456. mvpp2_ingress_disable(port);
  4457. mdelay(10);
  4458. /* Disable interrupts on all CPUs */
  4459. mvpp2_interrupts_disable(port);
  4460. napi_disable(&port->napi);
  4461. netif_carrier_off(port->dev);
  4462. netif_tx_stop_all_queues(port->dev);
  4463. mvpp2_egress_disable(port);
  4464. mvpp2_port_disable(port);
  4465. phy_stop(ndev->phydev);
  4466. }
  4467. /* Return positive if MTU is valid */
  4468. static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
  4469. {
  4470. if (mtu < 68) {
  4471. netdev_err(dev, "cannot change mtu to less than 68\n");
  4472. return -EINVAL;
  4473. }
  4474. /* 9676 == 9700 - 20 and rounding to 8 */
  4475. if (mtu > 9676) {
  4476. netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
  4477. mtu = 9676;
  4478. }
  4479. if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
  4480. netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
  4481. ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
  4482. mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
  4483. }
  4484. return mtu;
  4485. }
  4486. static int mvpp2_check_ringparam_valid(struct net_device *dev,
  4487. struct ethtool_ringparam *ring)
  4488. {
  4489. u16 new_rx_pending = ring->rx_pending;
  4490. u16 new_tx_pending = ring->tx_pending;
  4491. if (ring->rx_pending == 0 || ring->tx_pending == 0)
  4492. return -EINVAL;
  4493. if (ring->rx_pending > MVPP2_MAX_RXD)
  4494. new_rx_pending = MVPP2_MAX_RXD;
  4495. else if (!IS_ALIGNED(ring->rx_pending, 16))
  4496. new_rx_pending = ALIGN(ring->rx_pending, 16);
  4497. if (ring->tx_pending > MVPP2_MAX_TXD)
  4498. new_tx_pending = MVPP2_MAX_TXD;
  4499. else if (!IS_ALIGNED(ring->tx_pending, 32))
  4500. new_tx_pending = ALIGN(ring->tx_pending, 32);
  4501. if (ring->rx_pending != new_rx_pending) {
  4502. netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
  4503. ring->rx_pending, new_rx_pending);
  4504. ring->rx_pending = new_rx_pending;
  4505. }
  4506. if (ring->tx_pending != new_tx_pending) {
  4507. netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
  4508. ring->tx_pending, new_tx_pending);
  4509. ring->tx_pending = new_tx_pending;
  4510. }
  4511. return 0;
  4512. }
  4513. static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
  4514. {
  4515. u32 mac_addr_l, mac_addr_m, mac_addr_h;
  4516. mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
  4517. mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
  4518. mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
  4519. addr[0] = (mac_addr_h >> 24) & 0xFF;
  4520. addr[1] = (mac_addr_h >> 16) & 0xFF;
  4521. addr[2] = (mac_addr_h >> 8) & 0xFF;
  4522. addr[3] = mac_addr_h & 0xFF;
  4523. addr[4] = mac_addr_m & 0xFF;
  4524. addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
  4525. }
  4526. static int mvpp2_phy_connect(struct mvpp2_port *port)
  4527. {
  4528. struct phy_device *phy_dev;
  4529. phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
  4530. port->phy_interface);
  4531. if (!phy_dev) {
  4532. netdev_err(port->dev, "cannot connect to phy\n");
  4533. return -ENODEV;
  4534. }
  4535. phy_dev->supported &= PHY_GBIT_FEATURES;
  4536. phy_dev->advertising = phy_dev->supported;
  4537. port->link = 0;
  4538. port->duplex = 0;
  4539. port->speed = 0;
  4540. return 0;
  4541. }
  4542. static void mvpp2_phy_disconnect(struct mvpp2_port *port)
  4543. {
  4544. struct net_device *ndev = port->dev;
  4545. phy_disconnect(ndev->phydev);
  4546. }
  4547. static int mvpp2_open(struct net_device *dev)
  4548. {
  4549. struct mvpp2_port *port = netdev_priv(dev);
  4550. unsigned char mac_bcast[ETH_ALEN] = {
  4551. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  4552. int err;
  4553. err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
  4554. if (err) {
  4555. netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
  4556. return err;
  4557. }
  4558. err = mvpp2_prs_mac_da_accept(port->priv, port->id,
  4559. dev->dev_addr, true);
  4560. if (err) {
  4561. netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
  4562. return err;
  4563. }
  4564. err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
  4565. if (err) {
  4566. netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
  4567. return err;
  4568. }
  4569. err = mvpp2_prs_def_flow(port);
  4570. if (err) {
  4571. netdev_err(dev, "mvpp2_prs_def_flow failed\n");
  4572. return err;
  4573. }
  4574. /* Allocate the Rx/Tx queues */
  4575. err = mvpp2_setup_rxqs(port);
  4576. if (err) {
  4577. netdev_err(port->dev, "cannot allocate Rx queues\n");
  4578. return err;
  4579. }
  4580. err = mvpp2_setup_txqs(port);
  4581. if (err) {
  4582. netdev_err(port->dev, "cannot allocate Tx queues\n");
  4583. goto err_cleanup_rxqs;
  4584. }
  4585. err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
  4586. if (err) {
  4587. netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
  4588. goto err_cleanup_txqs;
  4589. }
  4590. /* In default link is down */
  4591. netif_carrier_off(port->dev);
  4592. err = mvpp2_phy_connect(port);
  4593. if (err < 0)
  4594. goto err_free_irq;
  4595. /* Unmask interrupts on all CPUs */
  4596. on_each_cpu(mvpp2_interrupts_unmask, port, 1);
  4597. mvpp2_start_dev(port);
  4598. return 0;
  4599. err_free_irq:
  4600. free_irq(port->irq, port);
  4601. err_cleanup_txqs:
  4602. mvpp2_cleanup_txqs(port);
  4603. err_cleanup_rxqs:
  4604. mvpp2_cleanup_rxqs(port);
  4605. return err;
  4606. }
  4607. static int mvpp2_stop(struct net_device *dev)
  4608. {
  4609. struct mvpp2_port *port = netdev_priv(dev);
  4610. struct mvpp2_port_pcpu *port_pcpu;
  4611. int cpu;
  4612. mvpp2_stop_dev(port);
  4613. mvpp2_phy_disconnect(port);
  4614. /* Mask interrupts on all CPUs */
  4615. on_each_cpu(mvpp2_interrupts_mask, port, 1);
  4616. free_irq(port->irq, port);
  4617. for_each_present_cpu(cpu) {
  4618. port_pcpu = per_cpu_ptr(port->pcpu, cpu);
  4619. hrtimer_cancel(&port_pcpu->tx_done_timer);
  4620. port_pcpu->timer_scheduled = false;
  4621. tasklet_kill(&port_pcpu->tx_done_tasklet);
  4622. }
  4623. mvpp2_cleanup_rxqs(port);
  4624. mvpp2_cleanup_txqs(port);
  4625. return 0;
  4626. }
  4627. static void mvpp2_set_rx_mode(struct net_device *dev)
  4628. {
  4629. struct mvpp2_port *port = netdev_priv(dev);
  4630. struct mvpp2 *priv = port->priv;
  4631. struct netdev_hw_addr *ha;
  4632. int id = port->id;
  4633. bool allmulti = dev->flags & IFF_ALLMULTI;
  4634. retry:
  4635. mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
  4636. mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
  4637. mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
  4638. /* Remove all port->id's mcast enries */
  4639. mvpp2_prs_mcast_del_all(priv, id);
  4640. if (!allmulti) {
  4641. netdev_for_each_mc_addr(ha, dev) {
  4642. if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
  4643. allmulti = true;
  4644. goto retry;
  4645. }
  4646. }
  4647. }
  4648. }
  4649. static int mvpp2_set_mac_address(struct net_device *dev, void *p)
  4650. {
  4651. struct mvpp2_port *port = netdev_priv(dev);
  4652. const struct sockaddr *addr = p;
  4653. int err;
  4654. if (!is_valid_ether_addr(addr->sa_data)) {
  4655. err = -EADDRNOTAVAIL;
  4656. goto error;
  4657. }
  4658. if (!netif_running(dev)) {
  4659. err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
  4660. if (!err)
  4661. return 0;
  4662. /* Reconfigure parser to accept the original MAC address */
  4663. err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
  4664. if (err)
  4665. goto error;
  4666. }
  4667. mvpp2_stop_dev(port);
  4668. err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
  4669. if (!err)
  4670. goto out_start;
  4671. /* Reconfigure parser accept the original MAC address */
  4672. err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
  4673. if (err)
  4674. goto error;
  4675. out_start:
  4676. mvpp2_start_dev(port);
  4677. mvpp2_egress_enable(port);
  4678. mvpp2_ingress_enable(port);
  4679. return 0;
  4680. error:
  4681. netdev_err(dev, "fail to change MAC address\n");
  4682. return err;
  4683. }
  4684. static int mvpp2_change_mtu(struct net_device *dev, int mtu)
  4685. {
  4686. struct mvpp2_port *port = netdev_priv(dev);
  4687. int err;
  4688. mtu = mvpp2_check_mtu_valid(dev, mtu);
  4689. if (mtu < 0) {
  4690. err = mtu;
  4691. goto error;
  4692. }
  4693. if (!netif_running(dev)) {
  4694. err = mvpp2_bm_update_mtu(dev, mtu);
  4695. if (!err) {
  4696. port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
  4697. return 0;
  4698. }
  4699. /* Reconfigure BM to the original MTU */
  4700. err = mvpp2_bm_update_mtu(dev, dev->mtu);
  4701. if (err)
  4702. goto error;
  4703. }
  4704. mvpp2_stop_dev(port);
  4705. err = mvpp2_bm_update_mtu(dev, mtu);
  4706. if (!err) {
  4707. port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
  4708. goto out_start;
  4709. }
  4710. /* Reconfigure BM to the original MTU */
  4711. err = mvpp2_bm_update_mtu(dev, dev->mtu);
  4712. if (err)
  4713. goto error;
  4714. out_start:
  4715. mvpp2_start_dev(port);
  4716. mvpp2_egress_enable(port);
  4717. mvpp2_ingress_enable(port);
  4718. return 0;
  4719. error:
  4720. netdev_err(dev, "fail to change MTU\n");
  4721. return err;
  4722. }
  4723. static struct rtnl_link_stats64 *
  4724. mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  4725. {
  4726. struct mvpp2_port *port = netdev_priv(dev);
  4727. unsigned int start;
  4728. int cpu;
  4729. for_each_possible_cpu(cpu) {
  4730. struct mvpp2_pcpu_stats *cpu_stats;
  4731. u64 rx_packets;
  4732. u64 rx_bytes;
  4733. u64 tx_packets;
  4734. u64 tx_bytes;
  4735. cpu_stats = per_cpu_ptr(port->stats, cpu);
  4736. do {
  4737. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  4738. rx_packets = cpu_stats->rx_packets;
  4739. rx_bytes = cpu_stats->rx_bytes;
  4740. tx_packets = cpu_stats->tx_packets;
  4741. tx_bytes = cpu_stats->tx_bytes;
  4742. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  4743. stats->rx_packets += rx_packets;
  4744. stats->rx_bytes += rx_bytes;
  4745. stats->tx_packets += tx_packets;
  4746. stats->tx_bytes += tx_bytes;
  4747. }
  4748. stats->rx_errors = dev->stats.rx_errors;
  4749. stats->rx_dropped = dev->stats.rx_dropped;
  4750. stats->tx_dropped = dev->stats.tx_dropped;
  4751. return stats;
  4752. }
  4753. static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  4754. {
  4755. int ret;
  4756. if (!dev->phydev)
  4757. return -ENOTSUPP;
  4758. ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
  4759. if (!ret)
  4760. mvpp2_link_event(dev);
  4761. return ret;
  4762. }
  4763. /* Ethtool methods */
  4764. /* Set interrupt coalescing for ethtools */
  4765. static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
  4766. struct ethtool_coalesce *c)
  4767. {
  4768. struct mvpp2_port *port = netdev_priv(dev);
  4769. int queue;
  4770. for (queue = 0; queue < rxq_number; queue++) {
  4771. struct mvpp2_rx_queue *rxq = port->rxqs[queue];
  4772. rxq->time_coal = c->rx_coalesce_usecs;
  4773. rxq->pkts_coal = c->rx_max_coalesced_frames;
  4774. mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
  4775. mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
  4776. }
  4777. for (queue = 0; queue < txq_number; queue++) {
  4778. struct mvpp2_tx_queue *txq = port->txqs[queue];
  4779. txq->done_pkts_coal = c->tx_max_coalesced_frames;
  4780. }
  4781. return 0;
  4782. }
  4783. /* get coalescing for ethtools */
  4784. static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
  4785. struct ethtool_coalesce *c)
  4786. {
  4787. struct mvpp2_port *port = netdev_priv(dev);
  4788. c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
  4789. c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
  4790. c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
  4791. return 0;
  4792. }
  4793. static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
  4794. struct ethtool_drvinfo *drvinfo)
  4795. {
  4796. strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
  4797. sizeof(drvinfo->driver));
  4798. strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
  4799. sizeof(drvinfo->version));
  4800. strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
  4801. sizeof(drvinfo->bus_info));
  4802. }
  4803. static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
  4804. struct ethtool_ringparam *ring)
  4805. {
  4806. struct mvpp2_port *port = netdev_priv(dev);
  4807. ring->rx_max_pending = MVPP2_MAX_RXD;
  4808. ring->tx_max_pending = MVPP2_MAX_TXD;
  4809. ring->rx_pending = port->rx_ring_size;
  4810. ring->tx_pending = port->tx_ring_size;
  4811. }
  4812. static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
  4813. struct ethtool_ringparam *ring)
  4814. {
  4815. struct mvpp2_port *port = netdev_priv(dev);
  4816. u16 prev_rx_ring_size = port->rx_ring_size;
  4817. u16 prev_tx_ring_size = port->tx_ring_size;
  4818. int err;
  4819. err = mvpp2_check_ringparam_valid(dev, ring);
  4820. if (err)
  4821. return err;
  4822. if (!netif_running(dev)) {
  4823. port->rx_ring_size = ring->rx_pending;
  4824. port->tx_ring_size = ring->tx_pending;
  4825. return 0;
  4826. }
  4827. /* The interface is running, so we have to force a
  4828. * reallocation of the queues
  4829. */
  4830. mvpp2_stop_dev(port);
  4831. mvpp2_cleanup_rxqs(port);
  4832. mvpp2_cleanup_txqs(port);
  4833. port->rx_ring_size = ring->rx_pending;
  4834. port->tx_ring_size = ring->tx_pending;
  4835. err = mvpp2_setup_rxqs(port);
  4836. if (err) {
  4837. /* Reallocate Rx queues with the original ring size */
  4838. port->rx_ring_size = prev_rx_ring_size;
  4839. ring->rx_pending = prev_rx_ring_size;
  4840. err = mvpp2_setup_rxqs(port);
  4841. if (err)
  4842. goto err_out;
  4843. }
  4844. err = mvpp2_setup_txqs(port);
  4845. if (err) {
  4846. /* Reallocate Tx queues with the original ring size */
  4847. port->tx_ring_size = prev_tx_ring_size;
  4848. ring->tx_pending = prev_tx_ring_size;
  4849. err = mvpp2_setup_txqs(port);
  4850. if (err)
  4851. goto err_clean_rxqs;
  4852. }
  4853. mvpp2_start_dev(port);
  4854. mvpp2_egress_enable(port);
  4855. mvpp2_ingress_enable(port);
  4856. return 0;
  4857. err_clean_rxqs:
  4858. mvpp2_cleanup_rxqs(port);
  4859. err_out:
  4860. netdev_err(dev, "fail to change ring parameters");
  4861. return err;
  4862. }
  4863. /* Device ops */
  4864. static const struct net_device_ops mvpp2_netdev_ops = {
  4865. .ndo_open = mvpp2_open,
  4866. .ndo_stop = mvpp2_stop,
  4867. .ndo_start_xmit = mvpp2_tx,
  4868. .ndo_set_rx_mode = mvpp2_set_rx_mode,
  4869. .ndo_set_mac_address = mvpp2_set_mac_address,
  4870. .ndo_change_mtu = mvpp2_change_mtu,
  4871. .ndo_get_stats64 = mvpp2_get_stats64,
  4872. .ndo_do_ioctl = mvpp2_ioctl,
  4873. };
  4874. static const struct ethtool_ops mvpp2_eth_tool_ops = {
  4875. .get_link = ethtool_op_get_link,
  4876. .set_coalesce = mvpp2_ethtool_set_coalesce,
  4877. .get_coalesce = mvpp2_ethtool_get_coalesce,
  4878. .get_drvinfo = mvpp2_ethtool_get_drvinfo,
  4879. .get_ringparam = mvpp2_ethtool_get_ringparam,
  4880. .set_ringparam = mvpp2_ethtool_set_ringparam,
  4881. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  4882. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  4883. };
  4884. /* Driver initialization */
  4885. static void mvpp2_port_power_up(struct mvpp2_port *port)
  4886. {
  4887. mvpp2_port_mii_set(port);
  4888. mvpp2_port_periodic_xon_disable(port);
  4889. mvpp2_port_fc_adv_enable(port);
  4890. mvpp2_port_reset(port);
  4891. }
  4892. /* Initialize port HW */
  4893. static int mvpp2_port_init(struct mvpp2_port *port)
  4894. {
  4895. struct device *dev = port->dev->dev.parent;
  4896. struct mvpp2 *priv = port->priv;
  4897. struct mvpp2_txq_pcpu *txq_pcpu;
  4898. int queue, cpu, err;
  4899. if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
  4900. return -EINVAL;
  4901. /* Disable port */
  4902. mvpp2_egress_disable(port);
  4903. mvpp2_port_disable(port);
  4904. port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
  4905. GFP_KERNEL);
  4906. if (!port->txqs)
  4907. return -ENOMEM;
  4908. /* Associate physical Tx queues to this port and initialize.
  4909. * The mapping is predefined.
  4910. */
  4911. for (queue = 0; queue < txq_number; queue++) {
  4912. int queue_phy_id = mvpp2_txq_phys(port->id, queue);
  4913. struct mvpp2_tx_queue *txq;
  4914. txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
  4915. if (!txq)
  4916. return -ENOMEM;
  4917. txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
  4918. if (!txq->pcpu) {
  4919. err = -ENOMEM;
  4920. goto err_free_percpu;
  4921. }
  4922. txq->id = queue_phy_id;
  4923. txq->log_id = queue;
  4924. txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
  4925. for_each_present_cpu(cpu) {
  4926. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  4927. txq_pcpu->cpu = cpu;
  4928. }
  4929. port->txqs[queue] = txq;
  4930. }
  4931. port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
  4932. GFP_KERNEL);
  4933. if (!port->rxqs) {
  4934. err = -ENOMEM;
  4935. goto err_free_percpu;
  4936. }
  4937. /* Allocate and initialize Rx queue for this port */
  4938. for (queue = 0; queue < rxq_number; queue++) {
  4939. struct mvpp2_rx_queue *rxq;
  4940. /* Map physical Rx queue to port's logical Rx queue */
  4941. rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
  4942. if (!rxq) {
  4943. err = -ENOMEM;
  4944. goto err_free_percpu;
  4945. }
  4946. /* Map this Rx queue to a physical queue */
  4947. rxq->id = port->first_rxq + queue;
  4948. rxq->port = port->id;
  4949. rxq->logic_rxq = queue;
  4950. port->rxqs[queue] = rxq;
  4951. }
  4952. /* Configure Rx queue group interrupt for this port */
  4953. mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
  4954. /* Create Rx descriptor rings */
  4955. for (queue = 0; queue < rxq_number; queue++) {
  4956. struct mvpp2_rx_queue *rxq = port->rxqs[queue];
  4957. rxq->size = port->rx_ring_size;
  4958. rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
  4959. rxq->time_coal = MVPP2_RX_COAL_USEC;
  4960. }
  4961. mvpp2_ingress_disable(port);
  4962. /* Port default configuration */
  4963. mvpp2_defaults_set(port);
  4964. /* Port's classifier configuration */
  4965. mvpp2_cls_oversize_rxq_set(port);
  4966. mvpp2_cls_port_config(port);
  4967. /* Provide an initial Rx packet size */
  4968. port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
  4969. /* Initialize pools for swf */
  4970. err = mvpp2_swf_bm_pool_init(port);
  4971. if (err)
  4972. goto err_free_percpu;
  4973. return 0;
  4974. err_free_percpu:
  4975. for (queue = 0; queue < txq_number; queue++) {
  4976. if (!port->txqs[queue])
  4977. continue;
  4978. free_percpu(port->txqs[queue]->pcpu);
  4979. }
  4980. return err;
  4981. }
  4982. /* Ports initialization */
  4983. static int mvpp2_port_probe(struct platform_device *pdev,
  4984. struct device_node *port_node,
  4985. struct mvpp2 *priv,
  4986. int *next_first_rxq)
  4987. {
  4988. struct device_node *phy_node;
  4989. struct mvpp2_port *port;
  4990. struct mvpp2_port_pcpu *port_pcpu;
  4991. struct net_device *dev;
  4992. struct resource *res;
  4993. const char *dt_mac_addr;
  4994. const char *mac_from;
  4995. char hw_mac_addr[ETH_ALEN];
  4996. u32 id;
  4997. int features;
  4998. int phy_mode;
  4999. int priv_common_regs_num = 2;
  5000. int err, i, cpu;
  5001. dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
  5002. rxq_number);
  5003. if (!dev)
  5004. return -ENOMEM;
  5005. phy_node = of_parse_phandle(port_node, "phy", 0);
  5006. if (!phy_node) {
  5007. dev_err(&pdev->dev, "missing phy\n");
  5008. err = -ENODEV;
  5009. goto err_free_netdev;
  5010. }
  5011. phy_mode = of_get_phy_mode(port_node);
  5012. if (phy_mode < 0) {
  5013. dev_err(&pdev->dev, "incorrect phy mode\n");
  5014. err = phy_mode;
  5015. goto err_free_netdev;
  5016. }
  5017. if (of_property_read_u32(port_node, "port-id", &id)) {
  5018. err = -EINVAL;
  5019. dev_err(&pdev->dev, "missing port-id value\n");
  5020. goto err_free_netdev;
  5021. }
  5022. dev->tx_queue_len = MVPP2_MAX_TXD;
  5023. dev->watchdog_timeo = 5 * HZ;
  5024. dev->netdev_ops = &mvpp2_netdev_ops;
  5025. dev->ethtool_ops = &mvpp2_eth_tool_ops;
  5026. port = netdev_priv(dev);
  5027. port->irq = irq_of_parse_and_map(port_node, 0);
  5028. if (port->irq <= 0) {
  5029. err = -EINVAL;
  5030. goto err_free_netdev;
  5031. }
  5032. if (of_property_read_bool(port_node, "marvell,loopback"))
  5033. port->flags |= MVPP2_F_LOOPBACK;
  5034. port->priv = priv;
  5035. port->id = id;
  5036. port->first_rxq = *next_first_rxq;
  5037. port->phy_node = phy_node;
  5038. port->phy_interface = phy_mode;
  5039. res = platform_get_resource(pdev, IORESOURCE_MEM,
  5040. priv_common_regs_num + id);
  5041. port->base = devm_ioremap_resource(&pdev->dev, res);
  5042. if (IS_ERR(port->base)) {
  5043. err = PTR_ERR(port->base);
  5044. goto err_free_irq;
  5045. }
  5046. /* Alloc per-cpu stats */
  5047. port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
  5048. if (!port->stats) {
  5049. err = -ENOMEM;
  5050. goto err_free_irq;
  5051. }
  5052. dt_mac_addr = of_get_mac_address(port_node);
  5053. if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
  5054. mac_from = "device tree";
  5055. ether_addr_copy(dev->dev_addr, dt_mac_addr);
  5056. } else {
  5057. mvpp2_get_mac_address(port, hw_mac_addr);
  5058. if (is_valid_ether_addr(hw_mac_addr)) {
  5059. mac_from = "hardware";
  5060. ether_addr_copy(dev->dev_addr, hw_mac_addr);
  5061. } else {
  5062. mac_from = "random";
  5063. eth_hw_addr_random(dev);
  5064. }
  5065. }
  5066. port->tx_ring_size = MVPP2_MAX_TXD;
  5067. port->rx_ring_size = MVPP2_MAX_RXD;
  5068. port->dev = dev;
  5069. SET_NETDEV_DEV(dev, &pdev->dev);
  5070. err = mvpp2_port_init(port);
  5071. if (err < 0) {
  5072. dev_err(&pdev->dev, "failed to init port %d\n", id);
  5073. goto err_free_stats;
  5074. }
  5075. mvpp2_port_power_up(port);
  5076. port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
  5077. if (!port->pcpu) {
  5078. err = -ENOMEM;
  5079. goto err_free_txq_pcpu;
  5080. }
  5081. for_each_present_cpu(cpu) {
  5082. port_pcpu = per_cpu_ptr(port->pcpu, cpu);
  5083. hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
  5084. HRTIMER_MODE_REL_PINNED);
  5085. port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
  5086. port_pcpu->timer_scheduled = false;
  5087. tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
  5088. (unsigned long)dev);
  5089. }
  5090. netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
  5091. features = NETIF_F_SG | NETIF_F_IP_CSUM;
  5092. dev->features = features | NETIF_F_RXCSUM;
  5093. dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
  5094. dev->vlan_features |= features;
  5095. err = register_netdev(dev);
  5096. if (err < 0) {
  5097. dev_err(&pdev->dev, "failed to register netdev\n");
  5098. goto err_free_port_pcpu;
  5099. }
  5100. netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
  5101. /* Increment the first Rx queue number to be used by the next port */
  5102. *next_first_rxq += rxq_number;
  5103. priv->port_list[id] = port;
  5104. return 0;
  5105. err_free_port_pcpu:
  5106. free_percpu(port->pcpu);
  5107. err_free_txq_pcpu:
  5108. for (i = 0; i < txq_number; i++)
  5109. free_percpu(port->txqs[i]->pcpu);
  5110. err_free_stats:
  5111. free_percpu(port->stats);
  5112. err_free_irq:
  5113. irq_dispose_mapping(port->irq);
  5114. err_free_netdev:
  5115. of_node_put(phy_node);
  5116. free_netdev(dev);
  5117. return err;
  5118. }
  5119. /* Ports removal routine */
  5120. static void mvpp2_port_remove(struct mvpp2_port *port)
  5121. {
  5122. int i;
  5123. unregister_netdev(port->dev);
  5124. of_node_put(port->phy_node);
  5125. free_percpu(port->pcpu);
  5126. free_percpu(port->stats);
  5127. for (i = 0; i < txq_number; i++)
  5128. free_percpu(port->txqs[i]->pcpu);
  5129. irq_dispose_mapping(port->irq);
  5130. free_netdev(port->dev);
  5131. }
  5132. /* Initialize decoding windows */
  5133. static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
  5134. struct mvpp2 *priv)
  5135. {
  5136. u32 win_enable;
  5137. int i;
  5138. for (i = 0; i < 6; i++) {
  5139. mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
  5140. mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
  5141. if (i < 4)
  5142. mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
  5143. }
  5144. win_enable = 0;
  5145. for (i = 0; i < dram->num_cs; i++) {
  5146. const struct mbus_dram_window *cs = dram->cs + i;
  5147. mvpp2_write(priv, MVPP2_WIN_BASE(i),
  5148. (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
  5149. dram->mbus_dram_target_id);
  5150. mvpp2_write(priv, MVPP2_WIN_SIZE(i),
  5151. (cs->size - 1) & 0xffff0000);
  5152. win_enable |= (1 << i);
  5153. }
  5154. mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
  5155. }
  5156. /* Initialize Rx FIFO's */
  5157. static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
  5158. {
  5159. int port;
  5160. for (port = 0; port < MVPP2_MAX_PORTS; port++) {
  5161. mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
  5162. MVPP2_RX_FIFO_PORT_DATA_SIZE);
  5163. mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
  5164. MVPP2_RX_FIFO_PORT_ATTR_SIZE);
  5165. }
  5166. mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
  5167. MVPP2_RX_FIFO_PORT_MIN_PKT);
  5168. mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
  5169. }
  5170. /* Initialize network controller common part HW */
  5171. static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
  5172. {
  5173. const struct mbus_dram_target_info *dram_target_info;
  5174. int err, i;
  5175. u32 val;
  5176. /* Checks for hardware constraints */
  5177. if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
  5178. (txq_number > MVPP2_MAX_TXQ)) {
  5179. dev_err(&pdev->dev, "invalid queue size parameter\n");
  5180. return -EINVAL;
  5181. }
  5182. /* MBUS windows configuration */
  5183. dram_target_info = mv_mbus_dram_info();
  5184. if (dram_target_info)
  5185. mvpp2_conf_mbus_windows(dram_target_info, priv);
  5186. /* Disable HW PHY polling */
  5187. val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
  5188. val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
  5189. writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
  5190. /* Allocate and initialize aggregated TXQs */
  5191. priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
  5192. sizeof(struct mvpp2_tx_queue),
  5193. GFP_KERNEL);
  5194. if (!priv->aggr_txqs)
  5195. return -ENOMEM;
  5196. for_each_present_cpu(i) {
  5197. priv->aggr_txqs[i].id = i;
  5198. priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
  5199. err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
  5200. MVPP2_AGGR_TXQ_SIZE, i, priv);
  5201. if (err < 0)
  5202. return err;
  5203. }
  5204. /* Rx Fifo Init */
  5205. mvpp2_rx_fifo_init(priv);
  5206. /* Reset Rx queue group interrupt configuration */
  5207. for (i = 0; i < MVPP2_MAX_PORTS; i++)
  5208. mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
  5209. writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
  5210. priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
  5211. /* Allow cache snoop when transmiting packets */
  5212. mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
  5213. /* Buffer Manager initialization */
  5214. err = mvpp2_bm_init(pdev, priv);
  5215. if (err < 0)
  5216. return err;
  5217. /* Parser default initialization */
  5218. err = mvpp2_prs_default_init(pdev, priv);
  5219. if (err < 0)
  5220. return err;
  5221. /* Classifier default initialization */
  5222. mvpp2_cls_init(priv);
  5223. return 0;
  5224. }
  5225. static int mvpp2_probe(struct platform_device *pdev)
  5226. {
  5227. struct device_node *dn = pdev->dev.of_node;
  5228. struct device_node *port_node;
  5229. struct mvpp2 *priv;
  5230. struct resource *res;
  5231. int port_count, first_rxq;
  5232. int err;
  5233. priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
  5234. if (!priv)
  5235. return -ENOMEM;
  5236. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  5237. priv->base = devm_ioremap_resource(&pdev->dev, res);
  5238. if (IS_ERR(priv->base))
  5239. return PTR_ERR(priv->base);
  5240. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  5241. priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
  5242. if (IS_ERR(priv->lms_base))
  5243. return PTR_ERR(priv->lms_base);
  5244. priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
  5245. if (IS_ERR(priv->pp_clk))
  5246. return PTR_ERR(priv->pp_clk);
  5247. err = clk_prepare_enable(priv->pp_clk);
  5248. if (err < 0)
  5249. return err;
  5250. priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
  5251. if (IS_ERR(priv->gop_clk)) {
  5252. err = PTR_ERR(priv->gop_clk);
  5253. goto err_pp_clk;
  5254. }
  5255. err = clk_prepare_enable(priv->gop_clk);
  5256. if (err < 0)
  5257. goto err_pp_clk;
  5258. /* Get system's tclk rate */
  5259. priv->tclk = clk_get_rate(priv->pp_clk);
  5260. /* Initialize network controller */
  5261. err = mvpp2_init(pdev, priv);
  5262. if (err < 0) {
  5263. dev_err(&pdev->dev, "failed to initialize controller\n");
  5264. goto err_gop_clk;
  5265. }
  5266. port_count = of_get_available_child_count(dn);
  5267. if (port_count == 0) {
  5268. dev_err(&pdev->dev, "no ports enabled\n");
  5269. err = -ENODEV;
  5270. goto err_gop_clk;
  5271. }
  5272. priv->port_list = devm_kcalloc(&pdev->dev, port_count,
  5273. sizeof(struct mvpp2_port *),
  5274. GFP_KERNEL);
  5275. if (!priv->port_list) {
  5276. err = -ENOMEM;
  5277. goto err_gop_clk;
  5278. }
  5279. /* Initialize ports */
  5280. first_rxq = 0;
  5281. for_each_available_child_of_node(dn, port_node) {
  5282. err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
  5283. if (err < 0)
  5284. goto err_gop_clk;
  5285. }
  5286. platform_set_drvdata(pdev, priv);
  5287. return 0;
  5288. err_gop_clk:
  5289. clk_disable_unprepare(priv->gop_clk);
  5290. err_pp_clk:
  5291. clk_disable_unprepare(priv->pp_clk);
  5292. return err;
  5293. }
  5294. static int mvpp2_remove(struct platform_device *pdev)
  5295. {
  5296. struct mvpp2 *priv = platform_get_drvdata(pdev);
  5297. struct device_node *dn = pdev->dev.of_node;
  5298. struct device_node *port_node;
  5299. int i = 0;
  5300. for_each_available_child_of_node(dn, port_node) {
  5301. if (priv->port_list[i])
  5302. mvpp2_port_remove(priv->port_list[i]);
  5303. i++;
  5304. }
  5305. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
  5306. struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
  5307. mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
  5308. }
  5309. for_each_present_cpu(i) {
  5310. struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
  5311. dma_free_coherent(&pdev->dev,
  5312. MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
  5313. aggr_txq->descs,
  5314. aggr_txq->descs_phys);
  5315. }
  5316. clk_disable_unprepare(priv->pp_clk);
  5317. clk_disable_unprepare(priv->gop_clk);
  5318. return 0;
  5319. }
  5320. static const struct of_device_id mvpp2_match[] = {
  5321. { .compatible = "marvell,armada-375-pp2" },
  5322. { }
  5323. };
  5324. MODULE_DEVICE_TABLE(of, mvpp2_match);
  5325. static struct platform_driver mvpp2_driver = {
  5326. .probe = mvpp2_probe,
  5327. .remove = mvpp2_remove,
  5328. .driver = {
  5329. .name = MVPP2_DRIVER_NAME,
  5330. .of_match_table = mvpp2_match,
  5331. },
  5332. };
  5333. module_platform_driver(mvpp2_driver);
  5334. MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
  5335. MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
  5336. MODULE_LICENSE("GPL v2");