chip.c 453 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519
  1. /*
  2. * Copyright(c) 2015 - 2018 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. /*
  48. * This file contains all of the code that is specific to the HFI chip
  49. */
  50. #include <linux/pci.h>
  51. #include <linux/delay.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/module.h>
  54. #include "hfi.h"
  55. #include "trace.h"
  56. #include "mad.h"
  57. #include "pio.h"
  58. #include "sdma.h"
  59. #include "eprom.h"
  60. #include "efivar.h"
  61. #include "platform.h"
  62. #include "aspm.h"
  63. #include "affinity.h"
  64. #include "debugfs.h"
  65. #include "fault.h"
  66. #define NUM_IB_PORTS 1
  67. uint kdeth_qp;
  68. module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
  69. MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
  70. uint num_vls = HFI1_MAX_VLS_SUPPORTED;
  71. module_param(num_vls, uint, S_IRUGO);
  72. MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  73. /*
  74. * Default time to aggregate two 10K packets from the idle state
  75. * (timer not running). The timer starts at the end of the first packet,
  76. * so only the time for one 10K packet and header plus a bit extra is needed.
  77. * 10 * 1024 + 64 header byte = 10304 byte
  78. * 10304 byte / 12.5 GB/s = 824.32ns
  79. */
  80. uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
  81. module_param(rcv_intr_timeout, uint, S_IRUGO);
  82. MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
  83. uint rcv_intr_count = 16; /* same as qib */
  84. module_param(rcv_intr_count, uint, S_IRUGO);
  85. MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
  86. ushort link_crc_mask = SUPPORTED_CRCS;
  87. module_param(link_crc_mask, ushort, S_IRUGO);
  88. MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
  89. uint loopback;
  90. module_param_named(loopback, loopback, uint, S_IRUGO);
  91. MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
  92. /* Other driver tunables */
  93. uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
  94. static ushort crc_14b_sideband = 1;
  95. static uint use_flr = 1;
  96. uint quick_linkup; /* skip LNI */
  97. struct flag_table {
  98. u64 flag; /* the flag */
  99. char *str; /* description string */
  100. u16 extra; /* extra information */
  101. u16 unused0;
  102. u32 unused1;
  103. };
  104. /* str must be a string constant */
  105. #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
  106. #define FLAG_ENTRY0(str, flag) {flag, str, 0}
  107. /* Send Error Consequences */
  108. #define SEC_WRITE_DROPPED 0x1
  109. #define SEC_PACKET_DROPPED 0x2
  110. #define SEC_SC_HALTED 0x4 /* per-context only */
  111. #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
  112. #define DEFAULT_KRCVQS 2
  113. #define MIN_KERNEL_KCTXTS 2
  114. #define FIRST_KERNEL_KCTXT 1
  115. /*
  116. * RSM instance allocation
  117. * 0 - Verbs
  118. * 1 - User Fecn Handling
  119. * 2 - Vnic
  120. */
  121. #define RSM_INS_VERBS 0
  122. #define RSM_INS_FECN 1
  123. #define RSM_INS_VNIC 2
  124. /* Bit offset into the GUID which carries HFI id information */
  125. #define GUID_HFI_INDEX_SHIFT 39
  126. /* extract the emulation revision */
  127. #define emulator_rev(dd) ((dd)->irev >> 8)
  128. /* parallel and serial emulation versions are 3 and 4 respectively */
  129. #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
  130. #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
  131. /* RSM fields for Verbs */
  132. /* packet type */
  133. #define IB_PACKET_TYPE 2ull
  134. #define QW_SHIFT 6ull
  135. /* QPN[7..1] */
  136. #define QPN_WIDTH 7ull
  137. /* LRH.BTH: QW 0, OFFSET 48 - for match */
  138. #define LRH_BTH_QW 0ull
  139. #define LRH_BTH_BIT_OFFSET 48ull
  140. #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
  141. #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
  142. #define LRH_BTH_SELECT
  143. #define LRH_BTH_MASK 3ull
  144. #define LRH_BTH_VALUE 2ull
  145. /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
  146. #define LRH_SC_QW 0ull
  147. #define LRH_SC_BIT_OFFSET 56ull
  148. #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
  149. #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
  150. #define LRH_SC_MASK 128ull
  151. #define LRH_SC_VALUE 0ull
  152. /* SC[n..0] QW 0, OFFSET 60 - for select */
  153. #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
  154. /* QPN[m+n:1] QW 1, OFFSET 1 */
  155. #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
  156. /* RSM fields for Vnic */
  157. /* L2_TYPE: QW 0, OFFSET 61 - for match */
  158. #define L2_TYPE_QW 0ull
  159. #define L2_TYPE_BIT_OFFSET 61ull
  160. #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
  161. #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
  162. #define L2_TYPE_MASK 3ull
  163. #define L2_16B_VALUE 2ull
  164. /* L4_TYPE QW 1, OFFSET 0 - for match */
  165. #define L4_TYPE_QW 1ull
  166. #define L4_TYPE_BIT_OFFSET 0ull
  167. #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
  168. #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
  169. #define L4_16B_TYPE_MASK 0xFFull
  170. #define L4_16B_ETH_VALUE 0x78ull
  171. /* 16B VESWID - for select */
  172. #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
  173. /* 16B ENTROPY - for select */
  174. #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
  175. /* defines to build power on SC2VL table */
  176. #define SC2VL_VAL( \
  177. num, \
  178. sc0, sc0val, \
  179. sc1, sc1val, \
  180. sc2, sc2val, \
  181. sc3, sc3val, \
  182. sc4, sc4val, \
  183. sc5, sc5val, \
  184. sc6, sc6val, \
  185. sc7, sc7val) \
  186. ( \
  187. ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
  188. ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
  189. ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
  190. ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
  191. ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
  192. ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
  193. ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
  194. ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
  195. )
  196. #define DC_SC_VL_VAL( \
  197. range, \
  198. e0, e0val, \
  199. e1, e1val, \
  200. e2, e2val, \
  201. e3, e3val, \
  202. e4, e4val, \
  203. e5, e5val, \
  204. e6, e6val, \
  205. e7, e7val, \
  206. e8, e8val, \
  207. e9, e9val, \
  208. e10, e10val, \
  209. e11, e11val, \
  210. e12, e12val, \
  211. e13, e13val, \
  212. e14, e14val, \
  213. e15, e15val) \
  214. ( \
  215. ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
  216. ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
  217. ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
  218. ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
  219. ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
  220. ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
  221. ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
  222. ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
  223. ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
  224. ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
  225. ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
  226. ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
  227. ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
  228. ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
  229. ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
  230. ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
  231. )
  232. /* all CceStatus sub-block freeze bits */
  233. #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
  234. | CCE_STATUS_RXE_FROZE_SMASK \
  235. | CCE_STATUS_TXE_FROZE_SMASK \
  236. | CCE_STATUS_TXE_PIO_FROZE_SMASK)
  237. /* all CceStatus sub-block TXE pause bits */
  238. #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
  239. | CCE_STATUS_TXE_PAUSED_SMASK \
  240. | CCE_STATUS_SDMA_PAUSED_SMASK)
  241. /* all CceStatus sub-block RXE pause bits */
  242. #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
  243. #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
  244. #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
  245. /*
  246. * CCE Error flags.
  247. */
  248. static struct flag_table cce_err_status_flags[] = {
  249. /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
  250. CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
  251. /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
  252. CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
  253. /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
  254. CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
  255. /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
  256. CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
  257. /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
  258. CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
  259. /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
  260. CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
  261. /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
  262. CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
  263. /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
  264. CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
  265. /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
  266. CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
  267. /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
  268. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
  269. /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
  270. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
  271. /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
  272. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
  273. /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
  274. CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
  275. /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
  276. CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
  277. /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
  278. CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
  279. /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
  280. CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
  281. /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
  282. CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
  283. /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
  284. CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
  285. /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
  286. CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
  287. /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
  288. CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
  289. /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
  290. CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
  291. /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
  292. CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
  293. /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
  294. CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
  295. /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
  296. CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
  297. /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
  298. CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
  299. /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
  300. CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
  301. /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
  302. CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
  303. /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
  304. CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
  305. /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
  306. CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
  307. /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
  308. CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
  309. /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
  310. CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
  311. /*31*/ FLAG_ENTRY0("LATriggered",
  312. CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
  313. /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
  314. CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
  315. /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
  316. CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
  317. /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
  318. CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
  319. /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
  320. CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
  321. /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
  322. CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
  323. /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
  324. CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
  325. /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
  326. CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
  327. /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
  328. CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
  329. /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
  330. CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
  331. /*41-63 reserved*/
  332. };
  333. /*
  334. * Misc Error flags
  335. */
  336. #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
  337. static struct flag_table misc_err_status_flags[] = {
  338. /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
  339. /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
  340. /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
  341. /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
  342. /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
  343. /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
  344. /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
  345. /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
  346. /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
  347. /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
  348. /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
  349. /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
  350. /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
  351. };
  352. /*
  353. * TXE PIO Error flags and consequences
  354. */
  355. static struct flag_table pio_err_status_flags[] = {
  356. /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
  357. SEC_WRITE_DROPPED,
  358. SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
  359. /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
  360. SEC_SPC_FREEZE,
  361. SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
  362. /* 2*/ FLAG_ENTRY("PioCsrParity",
  363. SEC_SPC_FREEZE,
  364. SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
  365. /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
  366. SEC_SPC_FREEZE,
  367. SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
  368. /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
  369. SEC_SPC_FREEZE,
  370. SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
  371. /* 5*/ FLAG_ENTRY("PioPccFifoParity",
  372. SEC_SPC_FREEZE,
  373. SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
  374. /* 6*/ FLAG_ENTRY("PioPecFifoParity",
  375. SEC_SPC_FREEZE,
  376. SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
  377. /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
  378. SEC_SPC_FREEZE,
  379. SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
  380. /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
  381. SEC_SPC_FREEZE,
  382. SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
  383. /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
  384. SEC_SPC_FREEZE,
  385. SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
  386. /*10*/ FLAG_ENTRY("PioSmPktResetParity",
  387. SEC_SPC_FREEZE,
  388. SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
  389. /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
  390. SEC_SPC_FREEZE,
  391. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
  392. /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
  393. SEC_SPC_FREEZE,
  394. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
  395. /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
  396. 0,
  397. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
  398. /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
  399. 0,
  400. SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
  401. /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
  402. SEC_SPC_FREEZE,
  403. SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
  404. /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
  405. SEC_SPC_FREEZE,
  406. SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
  407. /*17*/ FLAG_ENTRY("PioInitSmIn",
  408. 0,
  409. SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
  410. /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
  411. SEC_SPC_FREEZE,
  412. SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
  413. /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
  414. SEC_SPC_FREEZE,
  415. SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
  416. /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
  417. 0,
  418. SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
  419. /*21*/ FLAG_ENTRY("PioWriteDataParity",
  420. SEC_SPC_FREEZE,
  421. SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
  422. /*22*/ FLAG_ENTRY("PioStateMachine",
  423. SEC_SPC_FREEZE,
  424. SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
  425. /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
  426. SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
  427. SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
  428. /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
  429. SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
  430. SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
  431. /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
  432. SEC_SPC_FREEZE,
  433. SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
  434. /*26*/ FLAG_ENTRY("PioVlfSopParity",
  435. SEC_SPC_FREEZE,
  436. SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
  437. /*27*/ FLAG_ENTRY("PioVlFifoParity",
  438. SEC_SPC_FREEZE,
  439. SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
  440. /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
  441. SEC_SPC_FREEZE,
  442. SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
  443. /*29*/ FLAG_ENTRY("PioPpmcSopLen",
  444. SEC_SPC_FREEZE,
  445. SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
  446. /*30-31 reserved*/
  447. /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
  448. SEC_SPC_FREEZE,
  449. SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
  450. /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
  451. SEC_SPC_FREEZE,
  452. SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
  453. /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
  454. SEC_SPC_FREEZE,
  455. SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
  456. /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
  457. SEC_SPC_FREEZE,
  458. SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
  459. /*36-63 reserved*/
  460. };
  461. /* TXE PIO errors that cause an SPC freeze */
  462. #define ALL_PIO_FREEZE_ERR \
  463. (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
  464. | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
  465. | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
  466. | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
  467. | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
  468. | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
  469. | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
  470. | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
  471. | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
  472. | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
  473. | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
  474. | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
  475. | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
  476. | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
  477. | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
  478. | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
  479. | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
  480. | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
  481. | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
  482. | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
  483. | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
  484. | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
  485. | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
  486. | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
  487. | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
  488. | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
  489. | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
  490. | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
  491. | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
  492. /*
  493. * TXE SDMA Error flags
  494. */
  495. static struct flag_table sdma_err_status_flags[] = {
  496. /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
  497. SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
  498. /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
  499. SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
  500. /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
  501. SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
  502. /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
  503. SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
  504. /*04-63 reserved*/
  505. };
  506. /* TXE SDMA errors that cause an SPC freeze */
  507. #define ALL_SDMA_FREEZE_ERR \
  508. (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
  509. | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
  510. | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
  511. /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
  512. #define PORT_DISCARD_EGRESS_ERRS \
  513. (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
  514. | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
  515. | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
  516. /*
  517. * TXE Egress Error flags
  518. */
  519. #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
  520. static struct flag_table egress_err_status_flags[] = {
  521. /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
  522. /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
  523. /* 2 reserved */
  524. /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
  525. SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
  526. /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
  527. /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
  528. /* 6 reserved */
  529. /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
  530. SEES(TX_PIO_LAUNCH_INTF_PARITY)),
  531. /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
  532. SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
  533. /* 9-10 reserved */
  534. /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
  535. SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
  536. /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
  537. /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
  538. /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
  539. /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
  540. /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
  541. SEES(TX_SDMA0_DISALLOWED_PACKET)),
  542. /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
  543. SEES(TX_SDMA1_DISALLOWED_PACKET)),
  544. /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
  545. SEES(TX_SDMA2_DISALLOWED_PACKET)),
  546. /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
  547. SEES(TX_SDMA3_DISALLOWED_PACKET)),
  548. /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
  549. SEES(TX_SDMA4_DISALLOWED_PACKET)),
  550. /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
  551. SEES(TX_SDMA5_DISALLOWED_PACKET)),
  552. /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
  553. SEES(TX_SDMA6_DISALLOWED_PACKET)),
  554. /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
  555. SEES(TX_SDMA7_DISALLOWED_PACKET)),
  556. /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
  557. SEES(TX_SDMA8_DISALLOWED_PACKET)),
  558. /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
  559. SEES(TX_SDMA9_DISALLOWED_PACKET)),
  560. /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
  561. SEES(TX_SDMA10_DISALLOWED_PACKET)),
  562. /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
  563. SEES(TX_SDMA11_DISALLOWED_PACKET)),
  564. /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
  565. SEES(TX_SDMA12_DISALLOWED_PACKET)),
  566. /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
  567. SEES(TX_SDMA13_DISALLOWED_PACKET)),
  568. /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
  569. SEES(TX_SDMA14_DISALLOWED_PACKET)),
  570. /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
  571. SEES(TX_SDMA15_DISALLOWED_PACKET)),
  572. /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
  573. SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
  574. /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
  575. SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
  576. /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
  577. SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
  578. /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
  579. SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
  580. /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
  581. SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
  582. /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
  583. SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
  584. /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
  585. SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
  586. /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
  587. SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
  588. /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
  589. SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
  590. /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
  591. /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
  592. /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
  593. /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
  594. /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
  595. /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
  596. /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
  597. /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
  598. /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
  599. /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
  600. /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
  601. /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
  602. /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
  603. /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
  604. /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
  605. /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
  606. /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
  607. /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
  608. /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
  609. /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
  610. /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
  611. /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
  612. SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
  613. /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
  614. SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
  615. };
  616. /*
  617. * TXE Egress Error Info flags
  618. */
  619. #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
  620. static struct flag_table egress_err_info_flags[] = {
  621. /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
  622. /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
  623. /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
  624. /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
  625. /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
  626. /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
  627. /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
  628. /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
  629. /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
  630. /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
  631. /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
  632. /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
  633. /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
  634. /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
  635. /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
  636. /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
  637. /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
  638. /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
  639. /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
  640. /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
  641. /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
  642. /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
  643. };
  644. /* TXE Egress errors that cause an SPC freeze */
  645. #define ALL_TXE_EGRESS_FREEZE_ERR \
  646. (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
  647. | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
  648. | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
  649. | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
  650. | SEES(TX_LAUNCH_CSR_PARITY) \
  651. | SEES(TX_SBRD_CTL_CSR_PARITY) \
  652. | SEES(TX_CONFIG_PARITY) \
  653. | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
  654. | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
  655. | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
  656. | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
  657. | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
  658. | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
  659. | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
  660. | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
  661. | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
  662. | SEES(TX_CREDIT_RETURN_PARITY))
  663. /*
  664. * TXE Send error flags
  665. */
  666. #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
  667. static struct flag_table send_err_status_flags[] = {
  668. /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
  669. /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
  670. /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
  671. };
  672. /*
  673. * TXE Send Context Error flags and consequences
  674. */
  675. static struct flag_table sc_err_status_flags[] = {
  676. /* 0*/ FLAG_ENTRY("InconsistentSop",
  677. SEC_PACKET_DROPPED | SEC_SC_HALTED,
  678. SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
  679. /* 1*/ FLAG_ENTRY("DisallowedPacket",
  680. SEC_PACKET_DROPPED | SEC_SC_HALTED,
  681. SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
  682. /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
  683. SEC_WRITE_DROPPED | SEC_SC_HALTED,
  684. SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
  685. /* 3*/ FLAG_ENTRY("WriteOverflow",
  686. SEC_WRITE_DROPPED | SEC_SC_HALTED,
  687. SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
  688. /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
  689. SEC_WRITE_DROPPED | SEC_SC_HALTED,
  690. SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
  691. /* 5-63 reserved*/
  692. };
  693. /*
  694. * RXE Receive Error flags
  695. */
  696. #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
  697. static struct flag_table rxe_err_status_flags[] = {
  698. /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
  699. /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
  700. /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
  701. /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
  702. /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
  703. /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
  704. /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
  705. /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
  706. /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
  707. /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
  708. /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
  709. /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
  710. /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
  711. /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
  712. /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
  713. /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
  714. /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
  715. RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
  716. /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
  717. /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
  718. /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
  719. RXES(RBUF_BLOCK_LIST_READ_UNC)),
  720. /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
  721. RXES(RBUF_BLOCK_LIST_READ_COR)),
  722. /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
  723. RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
  724. /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
  725. RXES(RBUF_CSR_QENT_CNT_PARITY)),
  726. /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
  727. RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
  728. /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
  729. RXES(RBUF_CSR_QVLD_BIT_PARITY)),
  730. /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
  731. /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
  732. /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
  733. RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
  734. /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
  735. /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
  736. /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
  737. /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
  738. /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
  739. /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
  740. /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
  741. /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
  742. RXES(RBUF_FL_INITDONE_PARITY)),
  743. /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
  744. RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
  745. /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
  746. /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
  747. /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
  748. /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
  749. RXES(LOOKUP_DES_PART1_UNC_COR)),
  750. /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
  751. RXES(LOOKUP_DES_PART2_PARITY)),
  752. /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
  753. /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
  754. /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
  755. /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
  756. /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
  757. /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
  758. /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
  759. /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
  760. /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
  761. /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
  762. /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
  763. /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
  764. /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
  765. /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
  766. /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
  767. /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
  768. /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
  769. /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
  770. /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
  771. /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
  772. /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
  773. /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
  774. };
  775. /* RXE errors that will trigger an SPC freeze */
  776. #define ALL_RXE_FREEZE_ERR \
  777. (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
  778. | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
  779. | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
  780. | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
  781. | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
  782. | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
  783. | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
  784. | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
  785. | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
  786. | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
  787. | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
  788. | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
  789. | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
  790. | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
  791. | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
  792. | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
  793. | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
  794. | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
  795. | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
  796. | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
  797. | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
  798. | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
  799. | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
  800. | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
  801. | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
  802. | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
  803. | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
  804. | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
  805. | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
  806. | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
  807. | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
  808. | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
  809. | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
  810. | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
  811. | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
  812. | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
  813. | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
  814. | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
  815. | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
  816. | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
  817. | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
  818. | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
  819. | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
  820. | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
  821. #define RXE_FREEZE_ABORT_MASK \
  822. (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
  823. RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
  824. RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
  825. /*
  826. * DCC Error Flags
  827. */
  828. #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
  829. static struct flag_table dcc_err_flags[] = {
  830. FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
  831. FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
  832. FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
  833. FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
  834. FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
  835. FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
  836. FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
  837. FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
  838. FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
  839. FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
  840. FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
  841. FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
  842. FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
  843. FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
  844. FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
  845. FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
  846. FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
  847. FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
  848. FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
  849. FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
  850. FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
  851. FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
  852. FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
  853. FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
  854. FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
  855. FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
  856. FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
  857. FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
  858. FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
  859. FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
  860. FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
  861. FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
  862. FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
  863. FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
  864. FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
  865. FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
  866. FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
  867. FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
  868. FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
  869. FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
  870. FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
  871. FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
  872. FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
  873. FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
  874. FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
  875. FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
  876. };
  877. /*
  878. * LCB error flags
  879. */
  880. #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
  881. static struct flag_table lcb_err_flags[] = {
  882. /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
  883. /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
  884. /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
  885. /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
  886. LCBE(ALL_LNS_FAILED_REINIT_TEST)),
  887. /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
  888. /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
  889. /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
  890. /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
  891. /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
  892. /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
  893. /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
  894. /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
  895. /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
  896. /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
  897. LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
  898. /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
  899. /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
  900. /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
  901. /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
  902. /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
  903. /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
  904. LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
  905. /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
  906. /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
  907. /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
  908. /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
  909. /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
  910. /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
  911. /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
  912. LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
  913. /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
  914. /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
  915. LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
  916. /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
  917. LCBE(REDUNDANT_FLIT_PARITY_ERR))
  918. };
  919. /*
  920. * DC8051 Error Flags
  921. */
  922. #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
  923. static struct flag_table dc8051_err_flags[] = {
  924. FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
  925. FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
  926. FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
  927. FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
  928. FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
  929. FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
  930. FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
  931. FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
  932. FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
  933. D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
  934. FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
  935. };
  936. /*
  937. * DC8051 Information Error flags
  938. *
  939. * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
  940. */
  941. static struct flag_table dc8051_info_err_flags[] = {
  942. FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
  943. FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
  944. FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
  945. FLAG_ENTRY0("Serdes internal loopback failure",
  946. FAILED_SERDES_INTERNAL_LOOPBACK),
  947. FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
  948. FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
  949. FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
  950. FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
  951. FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
  952. FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
  953. FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
  954. FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
  955. FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
  956. FLAG_ENTRY0("External Device Request Timeout",
  957. EXTERNAL_DEVICE_REQ_TIMEOUT),
  958. };
  959. /*
  960. * DC8051 Information Host Information flags
  961. *
  962. * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
  963. */
  964. static struct flag_table dc8051_info_host_msg_flags[] = {
  965. FLAG_ENTRY0("Host request done", 0x0001),
  966. FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
  967. FLAG_ENTRY0("BC SMA message", 0x0004),
  968. FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
  969. FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
  970. FLAG_ENTRY0("External device config request", 0x0020),
  971. FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
  972. FLAG_ENTRY0("LinkUp achieved", 0x0080),
  973. FLAG_ENTRY0("Link going down", 0x0100),
  974. FLAG_ENTRY0("Link width downgraded", 0x0200),
  975. };
  976. static u32 encoded_size(u32 size);
  977. static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
  978. static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
  979. static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
  980. u8 *continuous);
  981. static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
  982. u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
  983. static void read_vc_remote_link_width(struct hfi1_devdata *dd,
  984. u8 *remote_tx_rate, u16 *link_widths);
  985. static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
  986. u8 *flag_bits, u16 *link_widths);
  987. static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
  988. u8 *device_rev);
  989. static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
  990. static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
  991. u8 *tx_polarity_inversion,
  992. u8 *rx_polarity_inversion, u8 *max_rate);
  993. static void handle_sdma_eng_err(struct hfi1_devdata *dd,
  994. unsigned int context, u64 err_status);
  995. static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
  996. static void handle_dcc_err(struct hfi1_devdata *dd,
  997. unsigned int context, u64 err_status);
  998. static void handle_lcb_err(struct hfi1_devdata *dd,
  999. unsigned int context, u64 err_status);
  1000. static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1001. static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1002. static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1003. static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1004. static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1005. static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1006. static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1007. static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
  1008. static void set_partition_keys(struct hfi1_pportdata *ppd);
  1009. static const char *link_state_name(u32 state);
  1010. static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
  1011. u32 state);
  1012. static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
  1013. u64 *out_data);
  1014. static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
  1015. static int thermal_init(struct hfi1_devdata *dd);
  1016. static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
  1017. static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
  1018. int msecs);
  1019. static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
  1020. int msecs);
  1021. static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
  1022. static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
  1023. static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
  1024. int msecs);
  1025. static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
  1026. int msecs);
  1027. static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
  1028. static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
  1029. static void handle_temp_err(struct hfi1_devdata *dd);
  1030. static void dc_shutdown(struct hfi1_devdata *dd);
  1031. static void dc_start(struct hfi1_devdata *dd);
  1032. static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
  1033. unsigned int *np);
  1034. static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
  1035. static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
  1036. static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
  1037. static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
  1038. /*
  1039. * Error interrupt table entry. This is used as input to the interrupt
  1040. * "clear down" routine used for all second tier error interrupt register.
  1041. * Second tier interrupt registers have a single bit representing them
  1042. * in the top-level CceIntStatus.
  1043. */
  1044. struct err_reg_info {
  1045. u32 status; /* status CSR offset */
  1046. u32 clear; /* clear CSR offset */
  1047. u32 mask; /* mask CSR offset */
  1048. void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
  1049. const char *desc;
  1050. };
  1051. #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
  1052. #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
  1053. #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
  1054. /*
  1055. * Helpers for building HFI and DC error interrupt table entries. Different
  1056. * helpers are needed because of inconsistent register names.
  1057. */
  1058. #define EE(reg, handler, desc) \
  1059. { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
  1060. handler, desc }
  1061. #define DC_EE1(reg, handler, desc) \
  1062. { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
  1063. #define DC_EE2(reg, handler, desc) \
  1064. { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
  1065. /*
  1066. * Table of the "misc" grouping of error interrupts. Each entry refers to
  1067. * another register containing more information.
  1068. */
  1069. static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
  1070. /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
  1071. /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
  1072. /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
  1073. /* 3*/ { 0, 0, 0, NULL }, /* reserved */
  1074. /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
  1075. /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
  1076. /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
  1077. /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
  1078. /* the rest are reserved */
  1079. };
  1080. /*
  1081. * Index into the Various section of the interrupt sources
  1082. * corresponding to the Critical Temperature interrupt.
  1083. */
  1084. #define TCRIT_INT_SOURCE 4
  1085. /*
  1086. * SDMA error interrupt entry - refers to another register containing more
  1087. * information.
  1088. */
  1089. static const struct err_reg_info sdma_eng_err =
  1090. EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
  1091. static const struct err_reg_info various_err[NUM_VARIOUS] = {
  1092. /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
  1093. /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
  1094. /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
  1095. /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
  1096. /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
  1097. /* rest are reserved */
  1098. };
  1099. /*
  1100. * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
  1101. * register can not be derived from the MTU value because 10K is not
  1102. * a power of 2. Therefore, we need a constant. Everything else can
  1103. * be calculated.
  1104. */
  1105. #define DCC_CFG_PORT_MTU_CAP_10240 7
  1106. /*
  1107. * Table of the DC grouping of error interrupts. Each entry refers to
  1108. * another register containing more information.
  1109. */
  1110. static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
  1111. /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
  1112. /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
  1113. /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
  1114. /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
  1115. /* the rest are reserved */
  1116. };
  1117. struct cntr_entry {
  1118. /*
  1119. * counter name
  1120. */
  1121. char *name;
  1122. /*
  1123. * csr to read for name (if applicable)
  1124. */
  1125. u64 csr;
  1126. /*
  1127. * offset into dd or ppd to store the counter's value
  1128. */
  1129. int offset;
  1130. /*
  1131. * flags
  1132. */
  1133. u8 flags;
  1134. /*
  1135. * accessor for stat element, context either dd or ppd
  1136. */
  1137. u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
  1138. int mode, u64 data);
  1139. };
  1140. #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
  1141. #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
  1142. #define CNTR_ELEM(name, csr, offset, flags, accessor) \
  1143. { \
  1144. name, \
  1145. csr, \
  1146. offset, \
  1147. flags, \
  1148. accessor \
  1149. }
  1150. /* 32bit RXE */
  1151. #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
  1152. CNTR_ELEM(#name, \
  1153. (counter * 8 + RCV_COUNTER_ARRAY32), \
  1154. 0, flags | CNTR_32BIT, \
  1155. port_access_u32_csr)
  1156. #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
  1157. CNTR_ELEM(#name, \
  1158. (counter * 8 + RCV_COUNTER_ARRAY32), \
  1159. 0, flags | CNTR_32BIT, \
  1160. dev_access_u32_csr)
  1161. /* 64bit RXE */
  1162. #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
  1163. CNTR_ELEM(#name, \
  1164. (counter * 8 + RCV_COUNTER_ARRAY64), \
  1165. 0, flags, \
  1166. port_access_u64_csr)
  1167. #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
  1168. CNTR_ELEM(#name, \
  1169. (counter * 8 + RCV_COUNTER_ARRAY64), \
  1170. 0, flags, \
  1171. dev_access_u64_csr)
  1172. #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
  1173. #define OVR_ELM(ctx) \
  1174. CNTR_ELEM("RcvHdrOvr" #ctx, \
  1175. (RCV_HDR_OVFL_CNT + ctx * 0x100), \
  1176. 0, CNTR_NORMAL, port_access_u64_csr)
  1177. /* 32bit TXE */
  1178. #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
  1179. CNTR_ELEM(#name, \
  1180. (counter * 8 + SEND_COUNTER_ARRAY32), \
  1181. 0, flags | CNTR_32BIT, \
  1182. port_access_u32_csr)
  1183. /* 64bit TXE */
  1184. #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
  1185. CNTR_ELEM(#name, \
  1186. (counter * 8 + SEND_COUNTER_ARRAY64), \
  1187. 0, flags, \
  1188. port_access_u64_csr)
  1189. # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
  1190. CNTR_ELEM(#name,\
  1191. counter * 8 + SEND_COUNTER_ARRAY64, \
  1192. 0, \
  1193. flags, \
  1194. dev_access_u64_csr)
  1195. /* CCE */
  1196. #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
  1197. CNTR_ELEM(#name, \
  1198. (counter * 8 + CCE_COUNTER_ARRAY32), \
  1199. 0, flags | CNTR_32BIT, \
  1200. dev_access_u32_csr)
  1201. #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
  1202. CNTR_ELEM(#name, \
  1203. (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
  1204. 0, flags | CNTR_32BIT, \
  1205. dev_access_u32_csr)
  1206. /* DC */
  1207. #define DC_PERF_CNTR(name, counter, flags) \
  1208. CNTR_ELEM(#name, \
  1209. counter, \
  1210. 0, \
  1211. flags, \
  1212. dev_access_u64_csr)
  1213. #define DC_PERF_CNTR_LCB(name, counter, flags) \
  1214. CNTR_ELEM(#name, \
  1215. counter, \
  1216. 0, \
  1217. flags, \
  1218. dc_access_lcb_cntr)
  1219. /* ibp counters */
  1220. #define SW_IBP_CNTR(name, cntr) \
  1221. CNTR_ELEM(#name, \
  1222. 0, \
  1223. 0, \
  1224. CNTR_SYNTH, \
  1225. access_ibp_##cntr)
  1226. /**
  1227. * hfi_addr_from_offset - return addr for readq/writeq
  1228. * @dd - the dd device
  1229. * @offset - the offset of the CSR within bar0
  1230. *
  1231. * This routine selects the appropriate base address
  1232. * based on the indicated offset.
  1233. */
  1234. static inline void __iomem *hfi1_addr_from_offset(
  1235. const struct hfi1_devdata *dd,
  1236. u32 offset)
  1237. {
  1238. if (offset >= dd->base2_start)
  1239. return dd->kregbase2 + (offset - dd->base2_start);
  1240. return dd->kregbase1 + offset;
  1241. }
  1242. /**
  1243. * read_csr - read CSR at the indicated offset
  1244. * @dd - the dd device
  1245. * @offset - the offset of the CSR within bar0
  1246. *
  1247. * Return: the value read or all FF's if there
  1248. * is no mapping
  1249. */
  1250. u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
  1251. {
  1252. if (dd->flags & HFI1_PRESENT)
  1253. return readq(hfi1_addr_from_offset(dd, offset));
  1254. return -1;
  1255. }
  1256. /**
  1257. * write_csr - write CSR at the indicated offset
  1258. * @dd - the dd device
  1259. * @offset - the offset of the CSR within bar0
  1260. * @value - value to write
  1261. */
  1262. void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
  1263. {
  1264. if (dd->flags & HFI1_PRESENT) {
  1265. void __iomem *base = hfi1_addr_from_offset(dd, offset);
  1266. /* avoid write to RcvArray */
  1267. if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
  1268. return;
  1269. writeq(value, base);
  1270. }
  1271. }
  1272. /**
  1273. * get_csr_addr - return te iomem address for offset
  1274. * @dd - the dd device
  1275. * @offset - the offset of the CSR within bar0
  1276. *
  1277. * Return: The iomem address to use in subsequent
  1278. * writeq/readq operations.
  1279. */
  1280. void __iomem *get_csr_addr(
  1281. const struct hfi1_devdata *dd,
  1282. u32 offset)
  1283. {
  1284. if (dd->flags & HFI1_PRESENT)
  1285. return hfi1_addr_from_offset(dd, offset);
  1286. return NULL;
  1287. }
  1288. static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
  1289. int mode, u64 value)
  1290. {
  1291. u64 ret;
  1292. if (mode == CNTR_MODE_R) {
  1293. ret = read_csr(dd, csr);
  1294. } else if (mode == CNTR_MODE_W) {
  1295. write_csr(dd, csr, value);
  1296. ret = value;
  1297. } else {
  1298. dd_dev_err(dd, "Invalid cntr register access mode");
  1299. return 0;
  1300. }
  1301. hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
  1302. return ret;
  1303. }
  1304. /* Dev Access */
  1305. static u64 dev_access_u32_csr(const struct cntr_entry *entry,
  1306. void *context, int vl, int mode, u64 data)
  1307. {
  1308. struct hfi1_devdata *dd = context;
  1309. u64 csr = entry->csr;
  1310. if (entry->flags & CNTR_SDMA) {
  1311. if (vl == CNTR_INVALID_VL)
  1312. return 0;
  1313. csr += 0x100 * vl;
  1314. } else {
  1315. if (vl != CNTR_INVALID_VL)
  1316. return 0;
  1317. }
  1318. return read_write_csr(dd, csr, mode, data);
  1319. }
  1320. static u64 access_sde_err_cnt(const struct cntr_entry *entry,
  1321. void *context, int idx, int mode, u64 data)
  1322. {
  1323. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1324. if (dd->per_sdma && idx < dd->num_sdma)
  1325. return dd->per_sdma[idx].err_cnt;
  1326. return 0;
  1327. }
  1328. static u64 access_sde_int_cnt(const struct cntr_entry *entry,
  1329. void *context, int idx, int mode, u64 data)
  1330. {
  1331. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1332. if (dd->per_sdma && idx < dd->num_sdma)
  1333. return dd->per_sdma[idx].sdma_int_cnt;
  1334. return 0;
  1335. }
  1336. static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
  1337. void *context, int idx, int mode, u64 data)
  1338. {
  1339. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1340. if (dd->per_sdma && idx < dd->num_sdma)
  1341. return dd->per_sdma[idx].idle_int_cnt;
  1342. return 0;
  1343. }
  1344. static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
  1345. void *context, int idx, int mode,
  1346. u64 data)
  1347. {
  1348. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1349. if (dd->per_sdma && idx < dd->num_sdma)
  1350. return dd->per_sdma[idx].progress_int_cnt;
  1351. return 0;
  1352. }
  1353. static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
  1354. int vl, int mode, u64 data)
  1355. {
  1356. struct hfi1_devdata *dd = context;
  1357. u64 val = 0;
  1358. u64 csr = entry->csr;
  1359. if (entry->flags & CNTR_VL) {
  1360. if (vl == CNTR_INVALID_VL)
  1361. return 0;
  1362. csr += 8 * vl;
  1363. } else {
  1364. if (vl != CNTR_INVALID_VL)
  1365. return 0;
  1366. }
  1367. val = read_write_csr(dd, csr, mode, data);
  1368. return val;
  1369. }
  1370. static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
  1371. int vl, int mode, u64 data)
  1372. {
  1373. struct hfi1_devdata *dd = context;
  1374. u32 csr = entry->csr;
  1375. int ret = 0;
  1376. if (vl != CNTR_INVALID_VL)
  1377. return 0;
  1378. if (mode == CNTR_MODE_R)
  1379. ret = read_lcb_csr(dd, csr, &data);
  1380. else if (mode == CNTR_MODE_W)
  1381. ret = write_lcb_csr(dd, csr, data);
  1382. if (ret) {
  1383. dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
  1384. return 0;
  1385. }
  1386. hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
  1387. return data;
  1388. }
  1389. /* Port Access */
  1390. static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
  1391. int vl, int mode, u64 data)
  1392. {
  1393. struct hfi1_pportdata *ppd = context;
  1394. if (vl != CNTR_INVALID_VL)
  1395. return 0;
  1396. return read_write_csr(ppd->dd, entry->csr, mode, data);
  1397. }
  1398. static u64 port_access_u64_csr(const struct cntr_entry *entry,
  1399. void *context, int vl, int mode, u64 data)
  1400. {
  1401. struct hfi1_pportdata *ppd = context;
  1402. u64 val;
  1403. u64 csr = entry->csr;
  1404. if (entry->flags & CNTR_VL) {
  1405. if (vl == CNTR_INVALID_VL)
  1406. return 0;
  1407. csr += 8 * vl;
  1408. } else {
  1409. if (vl != CNTR_INVALID_VL)
  1410. return 0;
  1411. }
  1412. val = read_write_csr(ppd->dd, csr, mode, data);
  1413. return val;
  1414. }
  1415. /* Software defined */
  1416. static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
  1417. u64 data)
  1418. {
  1419. u64 ret;
  1420. if (mode == CNTR_MODE_R) {
  1421. ret = *cntr;
  1422. } else if (mode == CNTR_MODE_W) {
  1423. *cntr = data;
  1424. ret = data;
  1425. } else {
  1426. dd_dev_err(dd, "Invalid cntr sw access mode");
  1427. return 0;
  1428. }
  1429. hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
  1430. return ret;
  1431. }
  1432. static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
  1433. int vl, int mode, u64 data)
  1434. {
  1435. struct hfi1_pportdata *ppd = context;
  1436. if (vl != CNTR_INVALID_VL)
  1437. return 0;
  1438. return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
  1439. }
  1440. static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
  1441. int vl, int mode, u64 data)
  1442. {
  1443. struct hfi1_pportdata *ppd = context;
  1444. if (vl != CNTR_INVALID_VL)
  1445. return 0;
  1446. return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
  1447. }
  1448. static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
  1449. void *context, int vl, int mode,
  1450. u64 data)
  1451. {
  1452. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
  1453. if (vl != CNTR_INVALID_VL)
  1454. return 0;
  1455. return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
  1456. }
  1457. static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
  1458. void *context, int vl, int mode, u64 data)
  1459. {
  1460. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
  1461. u64 zero = 0;
  1462. u64 *counter;
  1463. if (vl == CNTR_INVALID_VL)
  1464. counter = &ppd->port_xmit_discards;
  1465. else if (vl >= 0 && vl < C_VL_COUNT)
  1466. counter = &ppd->port_xmit_discards_vl[vl];
  1467. else
  1468. counter = &zero;
  1469. return read_write_sw(ppd->dd, counter, mode, data);
  1470. }
  1471. static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
  1472. void *context, int vl, int mode,
  1473. u64 data)
  1474. {
  1475. struct hfi1_pportdata *ppd = context;
  1476. if (vl != CNTR_INVALID_VL)
  1477. return 0;
  1478. return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
  1479. mode, data);
  1480. }
  1481. static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
  1482. void *context, int vl, int mode, u64 data)
  1483. {
  1484. struct hfi1_pportdata *ppd = context;
  1485. if (vl != CNTR_INVALID_VL)
  1486. return 0;
  1487. return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
  1488. mode, data);
  1489. }
  1490. u64 get_all_cpu_total(u64 __percpu *cntr)
  1491. {
  1492. int cpu;
  1493. u64 counter = 0;
  1494. for_each_possible_cpu(cpu)
  1495. counter += *per_cpu_ptr(cntr, cpu);
  1496. return counter;
  1497. }
  1498. static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
  1499. u64 __percpu *cntr,
  1500. int vl, int mode, u64 data)
  1501. {
  1502. u64 ret = 0;
  1503. if (vl != CNTR_INVALID_VL)
  1504. return 0;
  1505. if (mode == CNTR_MODE_R) {
  1506. ret = get_all_cpu_total(cntr) - *z_val;
  1507. } else if (mode == CNTR_MODE_W) {
  1508. /* A write can only zero the counter */
  1509. if (data == 0)
  1510. *z_val = get_all_cpu_total(cntr);
  1511. else
  1512. dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
  1513. } else {
  1514. dd_dev_err(dd, "Invalid cntr sw cpu access mode");
  1515. return 0;
  1516. }
  1517. return ret;
  1518. }
  1519. static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
  1520. void *context, int vl, int mode, u64 data)
  1521. {
  1522. struct hfi1_devdata *dd = context;
  1523. return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
  1524. mode, data);
  1525. }
  1526. static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
  1527. void *context, int vl, int mode, u64 data)
  1528. {
  1529. struct hfi1_devdata *dd = context;
  1530. return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
  1531. mode, data);
  1532. }
  1533. static u64 access_sw_pio_wait(const struct cntr_entry *entry,
  1534. void *context, int vl, int mode, u64 data)
  1535. {
  1536. struct hfi1_devdata *dd = context;
  1537. return dd->verbs_dev.n_piowait;
  1538. }
  1539. static u64 access_sw_pio_drain(const struct cntr_entry *entry,
  1540. void *context, int vl, int mode, u64 data)
  1541. {
  1542. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1543. return dd->verbs_dev.n_piodrain;
  1544. }
  1545. static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
  1546. void *context, int vl, int mode, u64 data)
  1547. {
  1548. struct hfi1_devdata *dd = context;
  1549. return dd->ctx0_seq_drop;
  1550. }
  1551. static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
  1552. void *context, int vl, int mode, u64 data)
  1553. {
  1554. struct hfi1_devdata *dd = context;
  1555. return dd->verbs_dev.n_txwait;
  1556. }
  1557. static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
  1558. void *context, int vl, int mode, u64 data)
  1559. {
  1560. struct hfi1_devdata *dd = context;
  1561. return dd->verbs_dev.n_kmem_wait;
  1562. }
  1563. static u64 access_sw_send_schedule(const struct cntr_entry *entry,
  1564. void *context, int vl, int mode, u64 data)
  1565. {
  1566. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1567. return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
  1568. mode, data);
  1569. }
  1570. /* Software counters for the error status bits within MISC_ERR_STATUS */
  1571. static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
  1572. void *context, int vl, int mode,
  1573. u64 data)
  1574. {
  1575. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1576. return dd->misc_err_status_cnt[12];
  1577. }
  1578. static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
  1579. void *context, int vl, int mode,
  1580. u64 data)
  1581. {
  1582. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1583. return dd->misc_err_status_cnt[11];
  1584. }
  1585. static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
  1586. void *context, int vl, int mode,
  1587. u64 data)
  1588. {
  1589. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1590. return dd->misc_err_status_cnt[10];
  1591. }
  1592. static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
  1593. void *context, int vl,
  1594. int mode, u64 data)
  1595. {
  1596. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1597. return dd->misc_err_status_cnt[9];
  1598. }
  1599. static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
  1600. void *context, int vl, int mode,
  1601. u64 data)
  1602. {
  1603. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1604. return dd->misc_err_status_cnt[8];
  1605. }
  1606. static u64 access_misc_efuse_read_bad_addr_err_cnt(
  1607. const struct cntr_entry *entry,
  1608. void *context, int vl, int mode, u64 data)
  1609. {
  1610. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1611. return dd->misc_err_status_cnt[7];
  1612. }
  1613. static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
  1614. void *context, int vl,
  1615. int mode, u64 data)
  1616. {
  1617. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1618. return dd->misc_err_status_cnt[6];
  1619. }
  1620. static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
  1621. void *context, int vl, int mode,
  1622. u64 data)
  1623. {
  1624. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1625. return dd->misc_err_status_cnt[5];
  1626. }
  1627. static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
  1628. void *context, int vl, int mode,
  1629. u64 data)
  1630. {
  1631. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1632. return dd->misc_err_status_cnt[4];
  1633. }
  1634. static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
  1635. void *context, int vl,
  1636. int mode, u64 data)
  1637. {
  1638. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1639. return dd->misc_err_status_cnt[3];
  1640. }
  1641. static u64 access_misc_csr_write_bad_addr_err_cnt(
  1642. const struct cntr_entry *entry,
  1643. void *context, int vl, int mode, u64 data)
  1644. {
  1645. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1646. return dd->misc_err_status_cnt[2];
  1647. }
  1648. static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1649. void *context, int vl,
  1650. int mode, u64 data)
  1651. {
  1652. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1653. return dd->misc_err_status_cnt[1];
  1654. }
  1655. static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
  1656. void *context, int vl, int mode,
  1657. u64 data)
  1658. {
  1659. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1660. return dd->misc_err_status_cnt[0];
  1661. }
  1662. /*
  1663. * Software counter for the aggregate of
  1664. * individual CceErrStatus counters
  1665. */
  1666. static u64 access_sw_cce_err_status_aggregated_cnt(
  1667. const struct cntr_entry *entry,
  1668. void *context, int vl, int mode, u64 data)
  1669. {
  1670. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1671. return dd->sw_cce_err_status_aggregate;
  1672. }
  1673. /*
  1674. * Software counters corresponding to each of the
  1675. * error status bits within CceErrStatus
  1676. */
  1677. static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
  1678. void *context, int vl, int mode,
  1679. u64 data)
  1680. {
  1681. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1682. return dd->cce_err_status_cnt[40];
  1683. }
  1684. static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
  1685. void *context, int vl, int mode,
  1686. u64 data)
  1687. {
  1688. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1689. return dd->cce_err_status_cnt[39];
  1690. }
  1691. static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
  1692. void *context, int vl, int mode,
  1693. u64 data)
  1694. {
  1695. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1696. return dd->cce_err_status_cnt[38];
  1697. }
  1698. static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
  1699. void *context, int vl, int mode,
  1700. u64 data)
  1701. {
  1702. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1703. return dd->cce_err_status_cnt[37];
  1704. }
  1705. static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
  1706. void *context, int vl, int mode,
  1707. u64 data)
  1708. {
  1709. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1710. return dd->cce_err_status_cnt[36];
  1711. }
  1712. static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
  1713. const struct cntr_entry *entry,
  1714. void *context, int vl, int mode, u64 data)
  1715. {
  1716. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1717. return dd->cce_err_status_cnt[35];
  1718. }
  1719. static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
  1720. const struct cntr_entry *entry,
  1721. void *context, int vl, int mode, u64 data)
  1722. {
  1723. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1724. return dd->cce_err_status_cnt[34];
  1725. }
  1726. static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
  1727. void *context, int vl,
  1728. int mode, u64 data)
  1729. {
  1730. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1731. return dd->cce_err_status_cnt[33];
  1732. }
  1733. static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1734. void *context, int vl, int mode,
  1735. u64 data)
  1736. {
  1737. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1738. return dd->cce_err_status_cnt[32];
  1739. }
  1740. static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
  1741. void *context, int vl, int mode, u64 data)
  1742. {
  1743. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1744. return dd->cce_err_status_cnt[31];
  1745. }
  1746. static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
  1747. void *context, int vl, int mode,
  1748. u64 data)
  1749. {
  1750. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1751. return dd->cce_err_status_cnt[30];
  1752. }
  1753. static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
  1754. void *context, int vl, int mode,
  1755. u64 data)
  1756. {
  1757. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1758. return dd->cce_err_status_cnt[29];
  1759. }
  1760. static u64 access_pcic_transmit_back_parity_err_cnt(
  1761. const struct cntr_entry *entry,
  1762. void *context, int vl, int mode, u64 data)
  1763. {
  1764. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1765. return dd->cce_err_status_cnt[28];
  1766. }
  1767. static u64 access_pcic_transmit_front_parity_err_cnt(
  1768. const struct cntr_entry *entry,
  1769. void *context, int vl, int mode, u64 data)
  1770. {
  1771. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1772. return dd->cce_err_status_cnt[27];
  1773. }
  1774. static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
  1775. void *context, int vl, int mode,
  1776. u64 data)
  1777. {
  1778. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1779. return dd->cce_err_status_cnt[26];
  1780. }
  1781. static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
  1782. void *context, int vl, int mode,
  1783. u64 data)
  1784. {
  1785. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1786. return dd->cce_err_status_cnt[25];
  1787. }
  1788. static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
  1789. void *context, int vl, int mode,
  1790. u64 data)
  1791. {
  1792. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1793. return dd->cce_err_status_cnt[24];
  1794. }
  1795. static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
  1796. void *context, int vl, int mode,
  1797. u64 data)
  1798. {
  1799. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1800. return dd->cce_err_status_cnt[23];
  1801. }
  1802. static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
  1803. void *context, int vl,
  1804. int mode, u64 data)
  1805. {
  1806. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1807. return dd->cce_err_status_cnt[22];
  1808. }
  1809. static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
  1810. void *context, int vl, int mode,
  1811. u64 data)
  1812. {
  1813. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1814. return dd->cce_err_status_cnt[21];
  1815. }
  1816. static u64 access_pcic_n_post_dat_q_parity_err_cnt(
  1817. const struct cntr_entry *entry,
  1818. void *context, int vl, int mode, u64 data)
  1819. {
  1820. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1821. return dd->cce_err_status_cnt[20];
  1822. }
  1823. static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
  1824. void *context, int vl,
  1825. int mode, u64 data)
  1826. {
  1827. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1828. return dd->cce_err_status_cnt[19];
  1829. }
  1830. static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
  1831. void *context, int vl, int mode,
  1832. u64 data)
  1833. {
  1834. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1835. return dd->cce_err_status_cnt[18];
  1836. }
  1837. static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
  1838. void *context, int vl, int mode,
  1839. u64 data)
  1840. {
  1841. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1842. return dd->cce_err_status_cnt[17];
  1843. }
  1844. static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
  1845. void *context, int vl, int mode,
  1846. u64 data)
  1847. {
  1848. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1849. return dd->cce_err_status_cnt[16];
  1850. }
  1851. static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
  1852. void *context, int vl, int mode,
  1853. u64 data)
  1854. {
  1855. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1856. return dd->cce_err_status_cnt[15];
  1857. }
  1858. static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
  1859. void *context, int vl,
  1860. int mode, u64 data)
  1861. {
  1862. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1863. return dd->cce_err_status_cnt[14];
  1864. }
  1865. static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
  1866. void *context, int vl, int mode,
  1867. u64 data)
  1868. {
  1869. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1870. return dd->cce_err_status_cnt[13];
  1871. }
  1872. static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
  1873. const struct cntr_entry *entry,
  1874. void *context, int vl, int mode, u64 data)
  1875. {
  1876. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1877. return dd->cce_err_status_cnt[12];
  1878. }
  1879. static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
  1880. const struct cntr_entry *entry,
  1881. void *context, int vl, int mode, u64 data)
  1882. {
  1883. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1884. return dd->cce_err_status_cnt[11];
  1885. }
  1886. static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
  1887. const struct cntr_entry *entry,
  1888. void *context, int vl, int mode, u64 data)
  1889. {
  1890. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1891. return dd->cce_err_status_cnt[10];
  1892. }
  1893. static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
  1894. const struct cntr_entry *entry,
  1895. void *context, int vl, int mode, u64 data)
  1896. {
  1897. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1898. return dd->cce_err_status_cnt[9];
  1899. }
  1900. static u64 access_cce_cli2_async_fifo_parity_err_cnt(
  1901. const struct cntr_entry *entry,
  1902. void *context, int vl, int mode, u64 data)
  1903. {
  1904. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1905. return dd->cce_err_status_cnt[8];
  1906. }
  1907. static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
  1908. void *context, int vl,
  1909. int mode, u64 data)
  1910. {
  1911. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1912. return dd->cce_err_status_cnt[7];
  1913. }
  1914. static u64 access_cce_cli0_async_fifo_parity_err_cnt(
  1915. const struct cntr_entry *entry,
  1916. void *context, int vl, int mode, u64 data)
  1917. {
  1918. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1919. return dd->cce_err_status_cnt[6];
  1920. }
  1921. static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
  1922. void *context, int vl, int mode,
  1923. u64 data)
  1924. {
  1925. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1926. return dd->cce_err_status_cnt[5];
  1927. }
  1928. static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
  1929. void *context, int vl, int mode,
  1930. u64 data)
  1931. {
  1932. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1933. return dd->cce_err_status_cnt[4];
  1934. }
  1935. static u64 access_cce_trgt_async_fifo_parity_err_cnt(
  1936. const struct cntr_entry *entry,
  1937. void *context, int vl, int mode, u64 data)
  1938. {
  1939. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1940. return dd->cce_err_status_cnt[3];
  1941. }
  1942. static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
  1943. void *context, int vl,
  1944. int mode, u64 data)
  1945. {
  1946. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1947. return dd->cce_err_status_cnt[2];
  1948. }
  1949. static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1950. void *context, int vl,
  1951. int mode, u64 data)
  1952. {
  1953. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1954. return dd->cce_err_status_cnt[1];
  1955. }
  1956. static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
  1957. void *context, int vl, int mode,
  1958. u64 data)
  1959. {
  1960. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1961. return dd->cce_err_status_cnt[0];
  1962. }
  1963. /*
  1964. * Software counters corresponding to each of the
  1965. * error status bits within RcvErrStatus
  1966. */
  1967. static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
  1968. void *context, int vl, int mode,
  1969. u64 data)
  1970. {
  1971. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1972. return dd->rcv_err_status_cnt[63];
  1973. }
  1974. static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
  1975. void *context, int vl,
  1976. int mode, u64 data)
  1977. {
  1978. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1979. return dd->rcv_err_status_cnt[62];
  1980. }
  1981. static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  1982. void *context, int vl, int mode,
  1983. u64 data)
  1984. {
  1985. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1986. return dd->rcv_err_status_cnt[61];
  1987. }
  1988. static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
  1989. void *context, int vl, int mode,
  1990. u64 data)
  1991. {
  1992. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  1993. return dd->rcv_err_status_cnt[60];
  1994. }
  1995. static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
  1996. void *context, int vl,
  1997. int mode, u64 data)
  1998. {
  1999. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2000. return dd->rcv_err_status_cnt[59];
  2001. }
  2002. static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
  2003. void *context, int vl,
  2004. int mode, u64 data)
  2005. {
  2006. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2007. return dd->rcv_err_status_cnt[58];
  2008. }
  2009. static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
  2010. void *context, int vl, int mode,
  2011. u64 data)
  2012. {
  2013. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2014. return dd->rcv_err_status_cnt[57];
  2015. }
  2016. static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
  2017. void *context, int vl, int mode,
  2018. u64 data)
  2019. {
  2020. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2021. return dd->rcv_err_status_cnt[56];
  2022. }
  2023. static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
  2024. void *context, int vl, int mode,
  2025. u64 data)
  2026. {
  2027. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2028. return dd->rcv_err_status_cnt[55];
  2029. }
  2030. static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
  2031. const struct cntr_entry *entry,
  2032. void *context, int vl, int mode, u64 data)
  2033. {
  2034. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2035. return dd->rcv_err_status_cnt[54];
  2036. }
  2037. static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
  2038. const struct cntr_entry *entry,
  2039. void *context, int vl, int mode, u64 data)
  2040. {
  2041. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2042. return dd->rcv_err_status_cnt[53];
  2043. }
  2044. static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
  2045. void *context, int vl,
  2046. int mode, u64 data)
  2047. {
  2048. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2049. return dd->rcv_err_status_cnt[52];
  2050. }
  2051. static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
  2052. void *context, int vl,
  2053. int mode, u64 data)
  2054. {
  2055. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2056. return dd->rcv_err_status_cnt[51];
  2057. }
  2058. static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
  2059. void *context, int vl,
  2060. int mode, u64 data)
  2061. {
  2062. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2063. return dd->rcv_err_status_cnt[50];
  2064. }
  2065. static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
  2066. void *context, int vl,
  2067. int mode, u64 data)
  2068. {
  2069. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2070. return dd->rcv_err_status_cnt[49];
  2071. }
  2072. static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
  2073. void *context, int vl,
  2074. int mode, u64 data)
  2075. {
  2076. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2077. return dd->rcv_err_status_cnt[48];
  2078. }
  2079. static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
  2080. void *context, int vl,
  2081. int mode, u64 data)
  2082. {
  2083. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2084. return dd->rcv_err_status_cnt[47];
  2085. }
  2086. static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
  2087. void *context, int vl, int mode,
  2088. u64 data)
  2089. {
  2090. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2091. return dd->rcv_err_status_cnt[46];
  2092. }
  2093. static u64 access_rx_hq_intr_csr_parity_err_cnt(
  2094. const struct cntr_entry *entry,
  2095. void *context, int vl, int mode, u64 data)
  2096. {
  2097. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2098. return dd->rcv_err_status_cnt[45];
  2099. }
  2100. static u64 access_rx_lookup_csr_parity_err_cnt(
  2101. const struct cntr_entry *entry,
  2102. void *context, int vl, int mode, u64 data)
  2103. {
  2104. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2105. return dd->rcv_err_status_cnt[44];
  2106. }
  2107. static u64 access_rx_lookup_rcv_array_cor_err_cnt(
  2108. const struct cntr_entry *entry,
  2109. void *context, int vl, int mode, u64 data)
  2110. {
  2111. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2112. return dd->rcv_err_status_cnt[43];
  2113. }
  2114. static u64 access_rx_lookup_rcv_array_unc_err_cnt(
  2115. const struct cntr_entry *entry,
  2116. void *context, int vl, int mode, u64 data)
  2117. {
  2118. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2119. return dd->rcv_err_status_cnt[42];
  2120. }
  2121. static u64 access_rx_lookup_des_part2_parity_err_cnt(
  2122. const struct cntr_entry *entry,
  2123. void *context, int vl, int mode, u64 data)
  2124. {
  2125. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2126. return dd->rcv_err_status_cnt[41];
  2127. }
  2128. static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
  2129. const struct cntr_entry *entry,
  2130. void *context, int vl, int mode, u64 data)
  2131. {
  2132. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2133. return dd->rcv_err_status_cnt[40];
  2134. }
  2135. static u64 access_rx_lookup_des_part1_unc_err_cnt(
  2136. const struct cntr_entry *entry,
  2137. void *context, int vl, int mode, u64 data)
  2138. {
  2139. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2140. return dd->rcv_err_status_cnt[39];
  2141. }
  2142. static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
  2143. const struct cntr_entry *entry,
  2144. void *context, int vl, int mode, u64 data)
  2145. {
  2146. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2147. return dd->rcv_err_status_cnt[38];
  2148. }
  2149. static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
  2150. const struct cntr_entry *entry,
  2151. void *context, int vl, int mode, u64 data)
  2152. {
  2153. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2154. return dd->rcv_err_status_cnt[37];
  2155. }
  2156. static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
  2157. const struct cntr_entry *entry,
  2158. void *context, int vl, int mode, u64 data)
  2159. {
  2160. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2161. return dd->rcv_err_status_cnt[36];
  2162. }
  2163. static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
  2164. const struct cntr_entry *entry,
  2165. void *context, int vl, int mode, u64 data)
  2166. {
  2167. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2168. return dd->rcv_err_status_cnt[35];
  2169. }
  2170. static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
  2171. const struct cntr_entry *entry,
  2172. void *context, int vl, int mode, u64 data)
  2173. {
  2174. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2175. return dd->rcv_err_status_cnt[34];
  2176. }
  2177. static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
  2178. const struct cntr_entry *entry,
  2179. void *context, int vl, int mode, u64 data)
  2180. {
  2181. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2182. return dd->rcv_err_status_cnt[33];
  2183. }
  2184. static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
  2185. void *context, int vl, int mode,
  2186. u64 data)
  2187. {
  2188. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2189. return dd->rcv_err_status_cnt[32];
  2190. }
  2191. static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
  2192. void *context, int vl, int mode,
  2193. u64 data)
  2194. {
  2195. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2196. return dd->rcv_err_status_cnt[31];
  2197. }
  2198. static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
  2199. void *context, int vl, int mode,
  2200. u64 data)
  2201. {
  2202. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2203. return dd->rcv_err_status_cnt[30];
  2204. }
  2205. static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
  2206. void *context, int vl, int mode,
  2207. u64 data)
  2208. {
  2209. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2210. return dd->rcv_err_status_cnt[29];
  2211. }
  2212. static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
  2213. void *context, int vl,
  2214. int mode, u64 data)
  2215. {
  2216. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2217. return dd->rcv_err_status_cnt[28];
  2218. }
  2219. static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
  2220. const struct cntr_entry *entry,
  2221. void *context, int vl, int mode, u64 data)
  2222. {
  2223. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2224. return dd->rcv_err_status_cnt[27];
  2225. }
  2226. static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
  2227. const struct cntr_entry *entry,
  2228. void *context, int vl, int mode, u64 data)
  2229. {
  2230. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2231. return dd->rcv_err_status_cnt[26];
  2232. }
  2233. static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
  2234. const struct cntr_entry *entry,
  2235. void *context, int vl, int mode, u64 data)
  2236. {
  2237. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2238. return dd->rcv_err_status_cnt[25];
  2239. }
  2240. static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
  2241. const struct cntr_entry *entry,
  2242. void *context, int vl, int mode, u64 data)
  2243. {
  2244. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2245. return dd->rcv_err_status_cnt[24];
  2246. }
  2247. static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
  2248. const struct cntr_entry *entry,
  2249. void *context, int vl, int mode, u64 data)
  2250. {
  2251. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2252. return dd->rcv_err_status_cnt[23];
  2253. }
  2254. static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
  2255. const struct cntr_entry *entry,
  2256. void *context, int vl, int mode, u64 data)
  2257. {
  2258. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2259. return dd->rcv_err_status_cnt[22];
  2260. }
  2261. static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
  2262. const struct cntr_entry *entry,
  2263. void *context, int vl, int mode, u64 data)
  2264. {
  2265. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2266. return dd->rcv_err_status_cnt[21];
  2267. }
  2268. static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
  2269. const struct cntr_entry *entry,
  2270. void *context, int vl, int mode, u64 data)
  2271. {
  2272. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2273. return dd->rcv_err_status_cnt[20];
  2274. }
  2275. static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
  2276. const struct cntr_entry *entry,
  2277. void *context, int vl, int mode, u64 data)
  2278. {
  2279. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2280. return dd->rcv_err_status_cnt[19];
  2281. }
  2282. static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
  2283. void *context, int vl,
  2284. int mode, u64 data)
  2285. {
  2286. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2287. return dd->rcv_err_status_cnt[18];
  2288. }
  2289. static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
  2290. void *context, int vl,
  2291. int mode, u64 data)
  2292. {
  2293. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2294. return dd->rcv_err_status_cnt[17];
  2295. }
  2296. static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
  2297. const struct cntr_entry *entry,
  2298. void *context, int vl, int mode, u64 data)
  2299. {
  2300. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2301. return dd->rcv_err_status_cnt[16];
  2302. }
  2303. static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
  2304. const struct cntr_entry *entry,
  2305. void *context, int vl, int mode, u64 data)
  2306. {
  2307. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2308. return dd->rcv_err_status_cnt[15];
  2309. }
  2310. static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
  2311. void *context, int vl,
  2312. int mode, u64 data)
  2313. {
  2314. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2315. return dd->rcv_err_status_cnt[14];
  2316. }
  2317. static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
  2318. void *context, int vl,
  2319. int mode, u64 data)
  2320. {
  2321. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2322. return dd->rcv_err_status_cnt[13];
  2323. }
  2324. static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
  2325. void *context, int vl, int mode,
  2326. u64 data)
  2327. {
  2328. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2329. return dd->rcv_err_status_cnt[12];
  2330. }
  2331. static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
  2332. void *context, int vl, int mode,
  2333. u64 data)
  2334. {
  2335. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2336. return dd->rcv_err_status_cnt[11];
  2337. }
  2338. static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
  2339. void *context, int vl, int mode,
  2340. u64 data)
  2341. {
  2342. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2343. return dd->rcv_err_status_cnt[10];
  2344. }
  2345. static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
  2346. void *context, int vl, int mode,
  2347. u64 data)
  2348. {
  2349. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2350. return dd->rcv_err_status_cnt[9];
  2351. }
  2352. static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
  2353. void *context, int vl, int mode,
  2354. u64 data)
  2355. {
  2356. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2357. return dd->rcv_err_status_cnt[8];
  2358. }
  2359. static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
  2360. const struct cntr_entry *entry,
  2361. void *context, int vl, int mode, u64 data)
  2362. {
  2363. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2364. return dd->rcv_err_status_cnt[7];
  2365. }
  2366. static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
  2367. const struct cntr_entry *entry,
  2368. void *context, int vl, int mode, u64 data)
  2369. {
  2370. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2371. return dd->rcv_err_status_cnt[6];
  2372. }
  2373. static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
  2374. void *context, int vl, int mode,
  2375. u64 data)
  2376. {
  2377. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2378. return dd->rcv_err_status_cnt[5];
  2379. }
  2380. static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
  2381. void *context, int vl, int mode,
  2382. u64 data)
  2383. {
  2384. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2385. return dd->rcv_err_status_cnt[4];
  2386. }
  2387. static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
  2388. void *context, int vl, int mode,
  2389. u64 data)
  2390. {
  2391. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2392. return dd->rcv_err_status_cnt[3];
  2393. }
  2394. static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
  2395. void *context, int vl, int mode,
  2396. u64 data)
  2397. {
  2398. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2399. return dd->rcv_err_status_cnt[2];
  2400. }
  2401. static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
  2402. void *context, int vl, int mode,
  2403. u64 data)
  2404. {
  2405. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2406. return dd->rcv_err_status_cnt[1];
  2407. }
  2408. static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
  2409. void *context, int vl, int mode,
  2410. u64 data)
  2411. {
  2412. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2413. return dd->rcv_err_status_cnt[0];
  2414. }
  2415. /*
  2416. * Software counters corresponding to each of the
  2417. * error status bits within SendPioErrStatus
  2418. */
  2419. static u64 access_pio_pec_sop_head_parity_err_cnt(
  2420. const struct cntr_entry *entry,
  2421. void *context, int vl, int mode, u64 data)
  2422. {
  2423. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2424. return dd->send_pio_err_status_cnt[35];
  2425. }
  2426. static u64 access_pio_pcc_sop_head_parity_err_cnt(
  2427. const struct cntr_entry *entry,
  2428. void *context, int vl, int mode, u64 data)
  2429. {
  2430. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2431. return dd->send_pio_err_status_cnt[34];
  2432. }
  2433. static u64 access_pio_last_returned_cnt_parity_err_cnt(
  2434. const struct cntr_entry *entry,
  2435. void *context, int vl, int mode, u64 data)
  2436. {
  2437. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2438. return dd->send_pio_err_status_cnt[33];
  2439. }
  2440. static u64 access_pio_current_free_cnt_parity_err_cnt(
  2441. const struct cntr_entry *entry,
  2442. void *context, int vl, int mode, u64 data)
  2443. {
  2444. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2445. return dd->send_pio_err_status_cnt[32];
  2446. }
  2447. static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
  2448. void *context, int vl, int mode,
  2449. u64 data)
  2450. {
  2451. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2452. return dd->send_pio_err_status_cnt[31];
  2453. }
  2454. static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
  2455. void *context, int vl, int mode,
  2456. u64 data)
  2457. {
  2458. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2459. return dd->send_pio_err_status_cnt[30];
  2460. }
  2461. static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
  2462. void *context, int vl, int mode,
  2463. u64 data)
  2464. {
  2465. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2466. return dd->send_pio_err_status_cnt[29];
  2467. }
  2468. static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
  2469. const struct cntr_entry *entry,
  2470. void *context, int vl, int mode, u64 data)
  2471. {
  2472. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2473. return dd->send_pio_err_status_cnt[28];
  2474. }
  2475. static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
  2476. void *context, int vl, int mode,
  2477. u64 data)
  2478. {
  2479. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2480. return dd->send_pio_err_status_cnt[27];
  2481. }
  2482. static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
  2483. void *context, int vl, int mode,
  2484. u64 data)
  2485. {
  2486. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2487. return dd->send_pio_err_status_cnt[26];
  2488. }
  2489. static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
  2490. void *context, int vl,
  2491. int mode, u64 data)
  2492. {
  2493. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2494. return dd->send_pio_err_status_cnt[25];
  2495. }
  2496. static u64 access_pio_block_qw_count_parity_err_cnt(
  2497. const struct cntr_entry *entry,
  2498. void *context, int vl, int mode, u64 data)
  2499. {
  2500. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2501. return dd->send_pio_err_status_cnt[24];
  2502. }
  2503. static u64 access_pio_write_qw_valid_parity_err_cnt(
  2504. const struct cntr_entry *entry,
  2505. void *context, int vl, int mode, u64 data)
  2506. {
  2507. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2508. return dd->send_pio_err_status_cnt[23];
  2509. }
  2510. static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
  2511. void *context, int vl, int mode,
  2512. u64 data)
  2513. {
  2514. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2515. return dd->send_pio_err_status_cnt[22];
  2516. }
  2517. static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
  2518. void *context, int vl,
  2519. int mode, u64 data)
  2520. {
  2521. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2522. return dd->send_pio_err_status_cnt[21];
  2523. }
  2524. static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
  2525. void *context, int vl,
  2526. int mode, u64 data)
  2527. {
  2528. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2529. return dd->send_pio_err_status_cnt[20];
  2530. }
  2531. static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
  2532. void *context, int vl,
  2533. int mode, u64 data)
  2534. {
  2535. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2536. return dd->send_pio_err_status_cnt[19];
  2537. }
  2538. static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
  2539. const struct cntr_entry *entry,
  2540. void *context, int vl, int mode, u64 data)
  2541. {
  2542. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2543. return dd->send_pio_err_status_cnt[18];
  2544. }
  2545. static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
  2546. void *context, int vl, int mode,
  2547. u64 data)
  2548. {
  2549. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2550. return dd->send_pio_err_status_cnt[17];
  2551. }
  2552. static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
  2553. void *context, int vl, int mode,
  2554. u64 data)
  2555. {
  2556. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2557. return dd->send_pio_err_status_cnt[16];
  2558. }
  2559. static u64 access_pio_credit_ret_fifo_parity_err_cnt(
  2560. const struct cntr_entry *entry,
  2561. void *context, int vl, int mode, u64 data)
  2562. {
  2563. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2564. return dd->send_pio_err_status_cnt[15];
  2565. }
  2566. static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
  2567. const struct cntr_entry *entry,
  2568. void *context, int vl, int mode, u64 data)
  2569. {
  2570. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2571. return dd->send_pio_err_status_cnt[14];
  2572. }
  2573. static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
  2574. const struct cntr_entry *entry,
  2575. void *context, int vl, int mode, u64 data)
  2576. {
  2577. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2578. return dd->send_pio_err_status_cnt[13];
  2579. }
  2580. static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
  2581. const struct cntr_entry *entry,
  2582. void *context, int vl, int mode, u64 data)
  2583. {
  2584. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2585. return dd->send_pio_err_status_cnt[12];
  2586. }
  2587. static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
  2588. const struct cntr_entry *entry,
  2589. void *context, int vl, int mode, u64 data)
  2590. {
  2591. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2592. return dd->send_pio_err_status_cnt[11];
  2593. }
  2594. static u64 access_pio_sm_pkt_reset_parity_err_cnt(
  2595. const struct cntr_entry *entry,
  2596. void *context, int vl, int mode, u64 data)
  2597. {
  2598. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2599. return dd->send_pio_err_status_cnt[10];
  2600. }
  2601. static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
  2602. const struct cntr_entry *entry,
  2603. void *context, int vl, int mode, u64 data)
  2604. {
  2605. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2606. return dd->send_pio_err_status_cnt[9];
  2607. }
  2608. static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
  2609. const struct cntr_entry *entry,
  2610. void *context, int vl, int mode, u64 data)
  2611. {
  2612. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2613. return dd->send_pio_err_status_cnt[8];
  2614. }
  2615. static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
  2616. const struct cntr_entry *entry,
  2617. void *context, int vl, int mode, u64 data)
  2618. {
  2619. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2620. return dd->send_pio_err_status_cnt[7];
  2621. }
  2622. static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
  2623. void *context, int vl, int mode,
  2624. u64 data)
  2625. {
  2626. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2627. return dd->send_pio_err_status_cnt[6];
  2628. }
  2629. static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
  2630. void *context, int vl, int mode,
  2631. u64 data)
  2632. {
  2633. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2634. return dd->send_pio_err_status_cnt[5];
  2635. }
  2636. static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
  2637. void *context, int vl, int mode,
  2638. u64 data)
  2639. {
  2640. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2641. return dd->send_pio_err_status_cnt[4];
  2642. }
  2643. static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
  2644. void *context, int vl, int mode,
  2645. u64 data)
  2646. {
  2647. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2648. return dd->send_pio_err_status_cnt[3];
  2649. }
  2650. static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
  2651. void *context, int vl, int mode,
  2652. u64 data)
  2653. {
  2654. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2655. return dd->send_pio_err_status_cnt[2];
  2656. }
  2657. static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
  2658. void *context, int vl,
  2659. int mode, u64 data)
  2660. {
  2661. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2662. return dd->send_pio_err_status_cnt[1];
  2663. }
  2664. static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
  2665. void *context, int vl, int mode,
  2666. u64 data)
  2667. {
  2668. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2669. return dd->send_pio_err_status_cnt[0];
  2670. }
  2671. /*
  2672. * Software counters corresponding to each of the
  2673. * error status bits within SendDmaErrStatus
  2674. */
  2675. static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
  2676. const struct cntr_entry *entry,
  2677. void *context, int vl, int mode, u64 data)
  2678. {
  2679. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2680. return dd->send_dma_err_status_cnt[3];
  2681. }
  2682. static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
  2683. const struct cntr_entry *entry,
  2684. void *context, int vl, int mode, u64 data)
  2685. {
  2686. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2687. return dd->send_dma_err_status_cnt[2];
  2688. }
  2689. static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
  2690. void *context, int vl, int mode,
  2691. u64 data)
  2692. {
  2693. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2694. return dd->send_dma_err_status_cnt[1];
  2695. }
  2696. static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
  2697. void *context, int vl, int mode,
  2698. u64 data)
  2699. {
  2700. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2701. return dd->send_dma_err_status_cnt[0];
  2702. }
  2703. /*
  2704. * Software counters corresponding to each of the
  2705. * error status bits within SendEgressErrStatus
  2706. */
  2707. static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
  2708. const struct cntr_entry *entry,
  2709. void *context, int vl, int mode, u64 data)
  2710. {
  2711. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2712. return dd->send_egress_err_status_cnt[63];
  2713. }
  2714. static u64 access_tx_read_sdma_memory_csr_err_cnt(
  2715. const struct cntr_entry *entry,
  2716. void *context, int vl, int mode, u64 data)
  2717. {
  2718. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2719. return dd->send_egress_err_status_cnt[62];
  2720. }
  2721. static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
  2722. void *context, int vl, int mode,
  2723. u64 data)
  2724. {
  2725. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2726. return dd->send_egress_err_status_cnt[61];
  2727. }
  2728. static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
  2729. void *context, int vl,
  2730. int mode, u64 data)
  2731. {
  2732. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2733. return dd->send_egress_err_status_cnt[60];
  2734. }
  2735. static u64 access_tx_read_sdma_memory_cor_err_cnt(
  2736. const struct cntr_entry *entry,
  2737. void *context, int vl, int mode, u64 data)
  2738. {
  2739. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2740. return dd->send_egress_err_status_cnt[59];
  2741. }
  2742. static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
  2743. void *context, int vl, int mode,
  2744. u64 data)
  2745. {
  2746. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2747. return dd->send_egress_err_status_cnt[58];
  2748. }
  2749. static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
  2750. void *context, int vl, int mode,
  2751. u64 data)
  2752. {
  2753. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2754. return dd->send_egress_err_status_cnt[57];
  2755. }
  2756. static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
  2757. void *context, int vl, int mode,
  2758. u64 data)
  2759. {
  2760. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2761. return dd->send_egress_err_status_cnt[56];
  2762. }
  2763. static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
  2764. void *context, int vl, int mode,
  2765. u64 data)
  2766. {
  2767. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2768. return dd->send_egress_err_status_cnt[55];
  2769. }
  2770. static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
  2771. void *context, int vl, int mode,
  2772. u64 data)
  2773. {
  2774. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2775. return dd->send_egress_err_status_cnt[54];
  2776. }
  2777. static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
  2778. void *context, int vl, int mode,
  2779. u64 data)
  2780. {
  2781. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2782. return dd->send_egress_err_status_cnt[53];
  2783. }
  2784. static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
  2785. void *context, int vl, int mode,
  2786. u64 data)
  2787. {
  2788. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2789. return dd->send_egress_err_status_cnt[52];
  2790. }
  2791. static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
  2792. void *context, int vl, int mode,
  2793. u64 data)
  2794. {
  2795. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2796. return dd->send_egress_err_status_cnt[51];
  2797. }
  2798. static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
  2799. void *context, int vl, int mode,
  2800. u64 data)
  2801. {
  2802. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2803. return dd->send_egress_err_status_cnt[50];
  2804. }
  2805. static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
  2806. void *context, int vl, int mode,
  2807. u64 data)
  2808. {
  2809. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2810. return dd->send_egress_err_status_cnt[49];
  2811. }
  2812. static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
  2813. void *context, int vl, int mode,
  2814. u64 data)
  2815. {
  2816. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2817. return dd->send_egress_err_status_cnt[48];
  2818. }
  2819. static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
  2820. void *context, int vl, int mode,
  2821. u64 data)
  2822. {
  2823. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2824. return dd->send_egress_err_status_cnt[47];
  2825. }
  2826. static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
  2827. void *context, int vl, int mode,
  2828. u64 data)
  2829. {
  2830. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2831. return dd->send_egress_err_status_cnt[46];
  2832. }
  2833. static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
  2834. void *context, int vl, int mode,
  2835. u64 data)
  2836. {
  2837. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2838. return dd->send_egress_err_status_cnt[45];
  2839. }
  2840. static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
  2841. void *context, int vl,
  2842. int mode, u64 data)
  2843. {
  2844. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2845. return dd->send_egress_err_status_cnt[44];
  2846. }
  2847. static u64 access_tx_read_sdma_memory_unc_err_cnt(
  2848. const struct cntr_entry *entry,
  2849. void *context, int vl, int mode, u64 data)
  2850. {
  2851. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2852. return dd->send_egress_err_status_cnt[43];
  2853. }
  2854. static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
  2855. void *context, int vl, int mode,
  2856. u64 data)
  2857. {
  2858. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2859. return dd->send_egress_err_status_cnt[42];
  2860. }
  2861. static u64 access_tx_credit_return_partiy_err_cnt(
  2862. const struct cntr_entry *entry,
  2863. void *context, int vl, int mode, u64 data)
  2864. {
  2865. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2866. return dd->send_egress_err_status_cnt[41];
  2867. }
  2868. static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
  2869. const struct cntr_entry *entry,
  2870. void *context, int vl, int mode, u64 data)
  2871. {
  2872. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2873. return dd->send_egress_err_status_cnt[40];
  2874. }
  2875. static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
  2876. const struct cntr_entry *entry,
  2877. void *context, int vl, int mode, u64 data)
  2878. {
  2879. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2880. return dd->send_egress_err_status_cnt[39];
  2881. }
  2882. static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
  2883. const struct cntr_entry *entry,
  2884. void *context, int vl, int mode, u64 data)
  2885. {
  2886. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2887. return dd->send_egress_err_status_cnt[38];
  2888. }
  2889. static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
  2890. const struct cntr_entry *entry,
  2891. void *context, int vl, int mode, u64 data)
  2892. {
  2893. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2894. return dd->send_egress_err_status_cnt[37];
  2895. }
  2896. static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
  2897. const struct cntr_entry *entry,
  2898. void *context, int vl, int mode, u64 data)
  2899. {
  2900. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2901. return dd->send_egress_err_status_cnt[36];
  2902. }
  2903. static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
  2904. const struct cntr_entry *entry,
  2905. void *context, int vl, int mode, u64 data)
  2906. {
  2907. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2908. return dd->send_egress_err_status_cnt[35];
  2909. }
  2910. static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
  2911. const struct cntr_entry *entry,
  2912. void *context, int vl, int mode, u64 data)
  2913. {
  2914. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2915. return dd->send_egress_err_status_cnt[34];
  2916. }
  2917. static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
  2918. const struct cntr_entry *entry,
  2919. void *context, int vl, int mode, u64 data)
  2920. {
  2921. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2922. return dd->send_egress_err_status_cnt[33];
  2923. }
  2924. static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
  2925. const struct cntr_entry *entry,
  2926. void *context, int vl, int mode, u64 data)
  2927. {
  2928. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2929. return dd->send_egress_err_status_cnt[32];
  2930. }
  2931. static u64 access_tx_sdma15_disallowed_packet_err_cnt(
  2932. const struct cntr_entry *entry,
  2933. void *context, int vl, int mode, u64 data)
  2934. {
  2935. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2936. return dd->send_egress_err_status_cnt[31];
  2937. }
  2938. static u64 access_tx_sdma14_disallowed_packet_err_cnt(
  2939. const struct cntr_entry *entry,
  2940. void *context, int vl, int mode, u64 data)
  2941. {
  2942. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2943. return dd->send_egress_err_status_cnt[30];
  2944. }
  2945. static u64 access_tx_sdma13_disallowed_packet_err_cnt(
  2946. const struct cntr_entry *entry,
  2947. void *context, int vl, int mode, u64 data)
  2948. {
  2949. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2950. return dd->send_egress_err_status_cnt[29];
  2951. }
  2952. static u64 access_tx_sdma12_disallowed_packet_err_cnt(
  2953. const struct cntr_entry *entry,
  2954. void *context, int vl, int mode, u64 data)
  2955. {
  2956. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2957. return dd->send_egress_err_status_cnt[28];
  2958. }
  2959. static u64 access_tx_sdma11_disallowed_packet_err_cnt(
  2960. const struct cntr_entry *entry,
  2961. void *context, int vl, int mode, u64 data)
  2962. {
  2963. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2964. return dd->send_egress_err_status_cnt[27];
  2965. }
  2966. static u64 access_tx_sdma10_disallowed_packet_err_cnt(
  2967. const struct cntr_entry *entry,
  2968. void *context, int vl, int mode, u64 data)
  2969. {
  2970. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2971. return dd->send_egress_err_status_cnt[26];
  2972. }
  2973. static u64 access_tx_sdma9_disallowed_packet_err_cnt(
  2974. const struct cntr_entry *entry,
  2975. void *context, int vl, int mode, u64 data)
  2976. {
  2977. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2978. return dd->send_egress_err_status_cnt[25];
  2979. }
  2980. static u64 access_tx_sdma8_disallowed_packet_err_cnt(
  2981. const struct cntr_entry *entry,
  2982. void *context, int vl, int mode, u64 data)
  2983. {
  2984. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2985. return dd->send_egress_err_status_cnt[24];
  2986. }
  2987. static u64 access_tx_sdma7_disallowed_packet_err_cnt(
  2988. const struct cntr_entry *entry,
  2989. void *context, int vl, int mode, u64 data)
  2990. {
  2991. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2992. return dd->send_egress_err_status_cnt[23];
  2993. }
  2994. static u64 access_tx_sdma6_disallowed_packet_err_cnt(
  2995. const struct cntr_entry *entry,
  2996. void *context, int vl, int mode, u64 data)
  2997. {
  2998. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  2999. return dd->send_egress_err_status_cnt[22];
  3000. }
  3001. static u64 access_tx_sdma5_disallowed_packet_err_cnt(
  3002. const struct cntr_entry *entry,
  3003. void *context, int vl, int mode, u64 data)
  3004. {
  3005. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3006. return dd->send_egress_err_status_cnt[21];
  3007. }
  3008. static u64 access_tx_sdma4_disallowed_packet_err_cnt(
  3009. const struct cntr_entry *entry,
  3010. void *context, int vl, int mode, u64 data)
  3011. {
  3012. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3013. return dd->send_egress_err_status_cnt[20];
  3014. }
  3015. static u64 access_tx_sdma3_disallowed_packet_err_cnt(
  3016. const struct cntr_entry *entry,
  3017. void *context, int vl, int mode, u64 data)
  3018. {
  3019. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3020. return dd->send_egress_err_status_cnt[19];
  3021. }
  3022. static u64 access_tx_sdma2_disallowed_packet_err_cnt(
  3023. const struct cntr_entry *entry,
  3024. void *context, int vl, int mode, u64 data)
  3025. {
  3026. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3027. return dd->send_egress_err_status_cnt[18];
  3028. }
  3029. static u64 access_tx_sdma1_disallowed_packet_err_cnt(
  3030. const struct cntr_entry *entry,
  3031. void *context, int vl, int mode, u64 data)
  3032. {
  3033. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3034. return dd->send_egress_err_status_cnt[17];
  3035. }
  3036. static u64 access_tx_sdma0_disallowed_packet_err_cnt(
  3037. const struct cntr_entry *entry,
  3038. void *context, int vl, int mode, u64 data)
  3039. {
  3040. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3041. return dd->send_egress_err_status_cnt[16];
  3042. }
  3043. static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
  3044. void *context, int vl, int mode,
  3045. u64 data)
  3046. {
  3047. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3048. return dd->send_egress_err_status_cnt[15];
  3049. }
  3050. static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
  3051. void *context, int vl,
  3052. int mode, u64 data)
  3053. {
  3054. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3055. return dd->send_egress_err_status_cnt[14];
  3056. }
  3057. static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
  3058. void *context, int vl, int mode,
  3059. u64 data)
  3060. {
  3061. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3062. return dd->send_egress_err_status_cnt[13];
  3063. }
  3064. static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
  3065. void *context, int vl, int mode,
  3066. u64 data)
  3067. {
  3068. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3069. return dd->send_egress_err_status_cnt[12];
  3070. }
  3071. static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
  3072. const struct cntr_entry *entry,
  3073. void *context, int vl, int mode, u64 data)
  3074. {
  3075. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3076. return dd->send_egress_err_status_cnt[11];
  3077. }
  3078. static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
  3079. void *context, int vl, int mode,
  3080. u64 data)
  3081. {
  3082. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3083. return dd->send_egress_err_status_cnt[10];
  3084. }
  3085. static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
  3086. void *context, int vl, int mode,
  3087. u64 data)
  3088. {
  3089. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3090. return dd->send_egress_err_status_cnt[9];
  3091. }
  3092. static u64 access_tx_sdma_launch_intf_parity_err_cnt(
  3093. const struct cntr_entry *entry,
  3094. void *context, int vl, int mode, u64 data)
  3095. {
  3096. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3097. return dd->send_egress_err_status_cnt[8];
  3098. }
  3099. static u64 access_tx_pio_launch_intf_parity_err_cnt(
  3100. const struct cntr_entry *entry,
  3101. void *context, int vl, int mode, u64 data)
  3102. {
  3103. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3104. return dd->send_egress_err_status_cnt[7];
  3105. }
  3106. static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
  3107. void *context, int vl, int mode,
  3108. u64 data)
  3109. {
  3110. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3111. return dd->send_egress_err_status_cnt[6];
  3112. }
  3113. static u64 access_tx_incorrect_link_state_err_cnt(
  3114. const struct cntr_entry *entry,
  3115. void *context, int vl, int mode, u64 data)
  3116. {
  3117. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3118. return dd->send_egress_err_status_cnt[5];
  3119. }
  3120. static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
  3121. void *context, int vl, int mode,
  3122. u64 data)
  3123. {
  3124. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3125. return dd->send_egress_err_status_cnt[4];
  3126. }
  3127. static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
  3128. const struct cntr_entry *entry,
  3129. void *context, int vl, int mode, u64 data)
  3130. {
  3131. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3132. return dd->send_egress_err_status_cnt[3];
  3133. }
  3134. static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
  3135. void *context, int vl, int mode,
  3136. u64 data)
  3137. {
  3138. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3139. return dd->send_egress_err_status_cnt[2];
  3140. }
  3141. static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
  3142. const struct cntr_entry *entry,
  3143. void *context, int vl, int mode, u64 data)
  3144. {
  3145. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3146. return dd->send_egress_err_status_cnt[1];
  3147. }
  3148. static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
  3149. const struct cntr_entry *entry,
  3150. void *context, int vl, int mode, u64 data)
  3151. {
  3152. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3153. return dd->send_egress_err_status_cnt[0];
  3154. }
  3155. /*
  3156. * Software counters corresponding to each of the
  3157. * error status bits within SendErrStatus
  3158. */
  3159. static u64 access_send_csr_write_bad_addr_err_cnt(
  3160. const struct cntr_entry *entry,
  3161. void *context, int vl, int mode, u64 data)
  3162. {
  3163. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3164. return dd->send_err_status_cnt[2];
  3165. }
  3166. static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
  3167. void *context, int vl,
  3168. int mode, u64 data)
  3169. {
  3170. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3171. return dd->send_err_status_cnt[1];
  3172. }
  3173. static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
  3174. void *context, int vl, int mode,
  3175. u64 data)
  3176. {
  3177. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3178. return dd->send_err_status_cnt[0];
  3179. }
  3180. /*
  3181. * Software counters corresponding to each of the
  3182. * error status bits within SendCtxtErrStatus
  3183. */
  3184. static u64 access_pio_write_out_of_bounds_err_cnt(
  3185. const struct cntr_entry *entry,
  3186. void *context, int vl, int mode, u64 data)
  3187. {
  3188. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3189. return dd->sw_ctxt_err_status_cnt[4];
  3190. }
  3191. static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
  3192. void *context, int vl, int mode,
  3193. u64 data)
  3194. {
  3195. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3196. return dd->sw_ctxt_err_status_cnt[3];
  3197. }
  3198. static u64 access_pio_write_crosses_boundary_err_cnt(
  3199. const struct cntr_entry *entry,
  3200. void *context, int vl, int mode, u64 data)
  3201. {
  3202. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3203. return dd->sw_ctxt_err_status_cnt[2];
  3204. }
  3205. static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
  3206. void *context, int vl,
  3207. int mode, u64 data)
  3208. {
  3209. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3210. return dd->sw_ctxt_err_status_cnt[1];
  3211. }
  3212. static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
  3213. void *context, int vl, int mode,
  3214. u64 data)
  3215. {
  3216. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3217. return dd->sw_ctxt_err_status_cnt[0];
  3218. }
  3219. /*
  3220. * Software counters corresponding to each of the
  3221. * error status bits within SendDmaEngErrStatus
  3222. */
  3223. static u64 access_sdma_header_request_fifo_cor_err_cnt(
  3224. const struct cntr_entry *entry,
  3225. void *context, int vl, int mode, u64 data)
  3226. {
  3227. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3228. return dd->sw_send_dma_eng_err_status_cnt[23];
  3229. }
  3230. static u64 access_sdma_header_storage_cor_err_cnt(
  3231. const struct cntr_entry *entry,
  3232. void *context, int vl, int mode, u64 data)
  3233. {
  3234. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3235. return dd->sw_send_dma_eng_err_status_cnt[22];
  3236. }
  3237. static u64 access_sdma_packet_tracking_cor_err_cnt(
  3238. const struct cntr_entry *entry,
  3239. void *context, int vl, int mode, u64 data)
  3240. {
  3241. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3242. return dd->sw_send_dma_eng_err_status_cnt[21];
  3243. }
  3244. static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
  3245. void *context, int vl, int mode,
  3246. u64 data)
  3247. {
  3248. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3249. return dd->sw_send_dma_eng_err_status_cnt[20];
  3250. }
  3251. static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
  3252. void *context, int vl, int mode,
  3253. u64 data)
  3254. {
  3255. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3256. return dd->sw_send_dma_eng_err_status_cnt[19];
  3257. }
  3258. static u64 access_sdma_header_request_fifo_unc_err_cnt(
  3259. const struct cntr_entry *entry,
  3260. void *context, int vl, int mode, u64 data)
  3261. {
  3262. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3263. return dd->sw_send_dma_eng_err_status_cnt[18];
  3264. }
  3265. static u64 access_sdma_header_storage_unc_err_cnt(
  3266. const struct cntr_entry *entry,
  3267. void *context, int vl, int mode, u64 data)
  3268. {
  3269. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3270. return dd->sw_send_dma_eng_err_status_cnt[17];
  3271. }
  3272. static u64 access_sdma_packet_tracking_unc_err_cnt(
  3273. const struct cntr_entry *entry,
  3274. void *context, int vl, int mode, u64 data)
  3275. {
  3276. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3277. return dd->sw_send_dma_eng_err_status_cnt[16];
  3278. }
  3279. static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
  3280. void *context, int vl, int mode,
  3281. u64 data)
  3282. {
  3283. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3284. return dd->sw_send_dma_eng_err_status_cnt[15];
  3285. }
  3286. static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
  3287. void *context, int vl, int mode,
  3288. u64 data)
  3289. {
  3290. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3291. return dd->sw_send_dma_eng_err_status_cnt[14];
  3292. }
  3293. static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
  3294. void *context, int vl, int mode,
  3295. u64 data)
  3296. {
  3297. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3298. return dd->sw_send_dma_eng_err_status_cnt[13];
  3299. }
  3300. static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
  3301. void *context, int vl, int mode,
  3302. u64 data)
  3303. {
  3304. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3305. return dd->sw_send_dma_eng_err_status_cnt[12];
  3306. }
  3307. static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
  3308. void *context, int vl, int mode,
  3309. u64 data)
  3310. {
  3311. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3312. return dd->sw_send_dma_eng_err_status_cnt[11];
  3313. }
  3314. static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
  3315. void *context, int vl, int mode,
  3316. u64 data)
  3317. {
  3318. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3319. return dd->sw_send_dma_eng_err_status_cnt[10];
  3320. }
  3321. static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
  3322. void *context, int vl, int mode,
  3323. u64 data)
  3324. {
  3325. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3326. return dd->sw_send_dma_eng_err_status_cnt[9];
  3327. }
  3328. static u64 access_sdma_packet_desc_overflow_err_cnt(
  3329. const struct cntr_entry *entry,
  3330. void *context, int vl, int mode, u64 data)
  3331. {
  3332. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3333. return dd->sw_send_dma_eng_err_status_cnt[8];
  3334. }
  3335. static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
  3336. void *context, int vl,
  3337. int mode, u64 data)
  3338. {
  3339. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3340. return dd->sw_send_dma_eng_err_status_cnt[7];
  3341. }
  3342. static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
  3343. void *context, int vl, int mode, u64 data)
  3344. {
  3345. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3346. return dd->sw_send_dma_eng_err_status_cnt[6];
  3347. }
  3348. static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
  3349. void *context, int vl, int mode,
  3350. u64 data)
  3351. {
  3352. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3353. return dd->sw_send_dma_eng_err_status_cnt[5];
  3354. }
  3355. static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
  3356. void *context, int vl, int mode,
  3357. u64 data)
  3358. {
  3359. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3360. return dd->sw_send_dma_eng_err_status_cnt[4];
  3361. }
  3362. static u64 access_sdma_tail_out_of_bounds_err_cnt(
  3363. const struct cntr_entry *entry,
  3364. void *context, int vl, int mode, u64 data)
  3365. {
  3366. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3367. return dd->sw_send_dma_eng_err_status_cnt[3];
  3368. }
  3369. static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
  3370. void *context, int vl, int mode,
  3371. u64 data)
  3372. {
  3373. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3374. return dd->sw_send_dma_eng_err_status_cnt[2];
  3375. }
  3376. static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
  3377. void *context, int vl, int mode,
  3378. u64 data)
  3379. {
  3380. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3381. return dd->sw_send_dma_eng_err_status_cnt[1];
  3382. }
  3383. static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
  3384. void *context, int vl, int mode,
  3385. u64 data)
  3386. {
  3387. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3388. return dd->sw_send_dma_eng_err_status_cnt[0];
  3389. }
  3390. static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
  3391. void *context, int vl, int mode,
  3392. u64 data)
  3393. {
  3394. struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
  3395. u64 val = 0;
  3396. u64 csr = entry->csr;
  3397. val = read_write_csr(dd, csr, mode, data);
  3398. if (mode == CNTR_MODE_R) {
  3399. val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
  3400. CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
  3401. } else if (mode == CNTR_MODE_W) {
  3402. dd->sw_rcv_bypass_packet_errors = 0;
  3403. } else {
  3404. dd_dev_err(dd, "Invalid cntr register access mode");
  3405. return 0;
  3406. }
  3407. return val;
  3408. }
  3409. #define def_access_sw_cpu(cntr) \
  3410. static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
  3411. void *context, int vl, int mode, u64 data) \
  3412. { \
  3413. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
  3414. return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
  3415. ppd->ibport_data.rvp.cntr, vl, \
  3416. mode, data); \
  3417. }
  3418. def_access_sw_cpu(rc_acks);
  3419. def_access_sw_cpu(rc_qacks);
  3420. def_access_sw_cpu(rc_delayed_comp);
  3421. #define def_access_ibp_counter(cntr) \
  3422. static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
  3423. void *context, int vl, int mode, u64 data) \
  3424. { \
  3425. struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
  3426. \
  3427. if (vl != CNTR_INVALID_VL) \
  3428. return 0; \
  3429. \
  3430. return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
  3431. mode, data); \
  3432. }
  3433. def_access_ibp_counter(loop_pkts);
  3434. def_access_ibp_counter(rc_resends);
  3435. def_access_ibp_counter(rnr_naks);
  3436. def_access_ibp_counter(other_naks);
  3437. def_access_ibp_counter(rc_timeouts);
  3438. def_access_ibp_counter(pkt_drops);
  3439. def_access_ibp_counter(dmawait);
  3440. def_access_ibp_counter(rc_seqnak);
  3441. def_access_ibp_counter(rc_dupreq);
  3442. def_access_ibp_counter(rdma_seq);
  3443. def_access_ibp_counter(unaligned);
  3444. def_access_ibp_counter(seq_naks);
  3445. static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
  3446. [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
  3447. [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
  3448. CNTR_NORMAL),
  3449. [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
  3450. CNTR_NORMAL),
  3451. [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
  3452. RCV_TID_FLOW_GEN_MISMATCH_CNT,
  3453. CNTR_NORMAL),
  3454. [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
  3455. CNTR_NORMAL),
  3456. [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
  3457. RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
  3458. [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
  3459. CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
  3460. [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
  3461. CNTR_NORMAL),
  3462. [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
  3463. CNTR_NORMAL),
  3464. [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
  3465. CNTR_NORMAL),
  3466. [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
  3467. CNTR_NORMAL),
  3468. [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
  3469. CNTR_NORMAL),
  3470. [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
  3471. CNTR_NORMAL),
  3472. [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
  3473. CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
  3474. [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
  3475. CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
  3476. [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
  3477. CNTR_SYNTH),
  3478. [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
  3479. access_dc_rcv_err_cnt),
  3480. [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
  3481. CNTR_SYNTH),
  3482. [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
  3483. CNTR_SYNTH),
  3484. [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
  3485. CNTR_SYNTH),
  3486. [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
  3487. DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
  3488. [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
  3489. DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
  3490. CNTR_SYNTH),
  3491. [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
  3492. DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
  3493. [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
  3494. CNTR_SYNTH),
  3495. [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
  3496. CNTR_SYNTH),
  3497. [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
  3498. CNTR_SYNTH),
  3499. [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
  3500. CNTR_SYNTH),
  3501. [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
  3502. CNTR_SYNTH),
  3503. [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
  3504. CNTR_SYNTH),
  3505. [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
  3506. CNTR_SYNTH),
  3507. [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
  3508. CNTR_SYNTH | CNTR_VL),
  3509. [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
  3510. CNTR_SYNTH | CNTR_VL),
  3511. [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
  3512. [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
  3513. CNTR_SYNTH | CNTR_VL),
  3514. [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
  3515. [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
  3516. CNTR_SYNTH | CNTR_VL),
  3517. [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
  3518. CNTR_SYNTH),
  3519. [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
  3520. CNTR_SYNTH | CNTR_VL),
  3521. [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
  3522. CNTR_SYNTH),
  3523. [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
  3524. CNTR_SYNTH | CNTR_VL),
  3525. [C_DC_TOTAL_CRC] =
  3526. DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
  3527. CNTR_SYNTH),
  3528. [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
  3529. CNTR_SYNTH),
  3530. [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
  3531. CNTR_SYNTH),
  3532. [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
  3533. CNTR_SYNTH),
  3534. [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
  3535. CNTR_SYNTH),
  3536. [C_DC_CRC_MULT_LN] =
  3537. DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
  3538. CNTR_SYNTH),
  3539. [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
  3540. CNTR_SYNTH),
  3541. [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
  3542. CNTR_SYNTH),
  3543. [C_DC_SEQ_CRC_CNT] =
  3544. DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
  3545. CNTR_SYNTH),
  3546. [C_DC_ESC0_ONLY_CNT] =
  3547. DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
  3548. CNTR_SYNTH),
  3549. [C_DC_ESC0_PLUS1_CNT] =
  3550. DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
  3551. CNTR_SYNTH),
  3552. [C_DC_ESC0_PLUS2_CNT] =
  3553. DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
  3554. CNTR_SYNTH),
  3555. [C_DC_REINIT_FROM_PEER_CNT] =
  3556. DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
  3557. CNTR_SYNTH),
  3558. [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
  3559. CNTR_SYNTH),
  3560. [C_DC_MISC_FLG_CNT] =
  3561. DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
  3562. CNTR_SYNTH),
  3563. [C_DC_PRF_GOOD_LTP_CNT] =
  3564. DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
  3565. [C_DC_PRF_ACCEPTED_LTP_CNT] =
  3566. DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
  3567. CNTR_SYNTH),
  3568. [C_DC_PRF_RX_FLIT_CNT] =
  3569. DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
  3570. [C_DC_PRF_TX_FLIT_CNT] =
  3571. DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
  3572. [C_DC_PRF_CLK_CNTR] =
  3573. DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
  3574. [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
  3575. DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
  3576. [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
  3577. DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
  3578. CNTR_SYNTH),
  3579. [C_DC_PG_STS_TX_SBE_CNT] =
  3580. DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
  3581. [C_DC_PG_STS_TX_MBE_CNT] =
  3582. DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
  3583. CNTR_SYNTH),
  3584. [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
  3585. access_sw_cpu_intr),
  3586. [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
  3587. access_sw_cpu_rcv_limit),
  3588. [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
  3589. access_sw_ctx0_seq_drop),
  3590. [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
  3591. access_sw_vtx_wait),
  3592. [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
  3593. access_sw_pio_wait),
  3594. [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
  3595. access_sw_pio_drain),
  3596. [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
  3597. access_sw_kmem_wait),
  3598. [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
  3599. access_sw_send_schedule),
  3600. [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
  3601. SEND_DMA_DESC_FETCHED_CNT, 0,
  3602. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3603. dev_access_u32_csr),
  3604. [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
  3605. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3606. access_sde_int_cnt),
  3607. [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
  3608. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3609. access_sde_err_cnt),
  3610. [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
  3611. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3612. access_sde_idle_int_cnt),
  3613. [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
  3614. CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
  3615. access_sde_progress_int_cnt),
  3616. /* MISC_ERR_STATUS */
  3617. [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
  3618. CNTR_NORMAL,
  3619. access_misc_pll_lock_fail_err_cnt),
  3620. [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
  3621. CNTR_NORMAL,
  3622. access_misc_mbist_fail_err_cnt),
  3623. [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
  3624. CNTR_NORMAL,
  3625. access_misc_invalid_eep_cmd_err_cnt),
  3626. [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
  3627. CNTR_NORMAL,
  3628. access_misc_efuse_done_parity_err_cnt),
  3629. [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
  3630. CNTR_NORMAL,
  3631. access_misc_efuse_write_err_cnt),
  3632. [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
  3633. 0, CNTR_NORMAL,
  3634. access_misc_efuse_read_bad_addr_err_cnt),
  3635. [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
  3636. CNTR_NORMAL,
  3637. access_misc_efuse_csr_parity_err_cnt),
  3638. [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
  3639. CNTR_NORMAL,
  3640. access_misc_fw_auth_failed_err_cnt),
  3641. [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
  3642. CNTR_NORMAL,
  3643. access_misc_key_mismatch_err_cnt),
  3644. [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
  3645. CNTR_NORMAL,
  3646. access_misc_sbus_write_failed_err_cnt),
  3647. [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
  3648. CNTR_NORMAL,
  3649. access_misc_csr_write_bad_addr_err_cnt),
  3650. [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
  3651. CNTR_NORMAL,
  3652. access_misc_csr_read_bad_addr_err_cnt),
  3653. [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
  3654. CNTR_NORMAL,
  3655. access_misc_csr_parity_err_cnt),
  3656. /* CceErrStatus */
  3657. [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
  3658. CNTR_NORMAL,
  3659. access_sw_cce_err_status_aggregated_cnt),
  3660. [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
  3661. CNTR_NORMAL,
  3662. access_cce_msix_csr_parity_err_cnt),
  3663. [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
  3664. CNTR_NORMAL,
  3665. access_cce_int_map_unc_err_cnt),
  3666. [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
  3667. CNTR_NORMAL,
  3668. access_cce_int_map_cor_err_cnt),
  3669. [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
  3670. CNTR_NORMAL,
  3671. access_cce_msix_table_unc_err_cnt),
  3672. [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
  3673. CNTR_NORMAL,
  3674. access_cce_msix_table_cor_err_cnt),
  3675. [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
  3676. 0, CNTR_NORMAL,
  3677. access_cce_rxdma_conv_fifo_parity_err_cnt),
  3678. [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
  3679. 0, CNTR_NORMAL,
  3680. access_cce_rcpl_async_fifo_parity_err_cnt),
  3681. [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
  3682. CNTR_NORMAL,
  3683. access_cce_seg_write_bad_addr_err_cnt),
  3684. [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
  3685. CNTR_NORMAL,
  3686. access_cce_seg_read_bad_addr_err_cnt),
  3687. [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
  3688. CNTR_NORMAL,
  3689. access_la_triggered_cnt),
  3690. [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
  3691. CNTR_NORMAL,
  3692. access_cce_trgt_cpl_timeout_err_cnt),
  3693. [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
  3694. CNTR_NORMAL,
  3695. access_pcic_receive_parity_err_cnt),
  3696. [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
  3697. CNTR_NORMAL,
  3698. access_pcic_transmit_back_parity_err_cnt),
  3699. [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
  3700. 0, CNTR_NORMAL,
  3701. access_pcic_transmit_front_parity_err_cnt),
  3702. [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
  3703. CNTR_NORMAL,
  3704. access_pcic_cpl_dat_q_unc_err_cnt),
  3705. [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
  3706. CNTR_NORMAL,
  3707. access_pcic_cpl_hd_q_unc_err_cnt),
  3708. [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
  3709. CNTR_NORMAL,
  3710. access_pcic_post_dat_q_unc_err_cnt),
  3711. [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
  3712. CNTR_NORMAL,
  3713. access_pcic_post_hd_q_unc_err_cnt),
  3714. [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
  3715. CNTR_NORMAL,
  3716. access_pcic_retry_sot_mem_unc_err_cnt),
  3717. [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
  3718. CNTR_NORMAL,
  3719. access_pcic_retry_mem_unc_err),
  3720. [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
  3721. CNTR_NORMAL,
  3722. access_pcic_n_post_dat_q_parity_err_cnt),
  3723. [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
  3724. CNTR_NORMAL,
  3725. access_pcic_n_post_h_q_parity_err_cnt),
  3726. [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
  3727. CNTR_NORMAL,
  3728. access_pcic_cpl_dat_q_cor_err_cnt),
  3729. [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
  3730. CNTR_NORMAL,
  3731. access_pcic_cpl_hd_q_cor_err_cnt),
  3732. [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
  3733. CNTR_NORMAL,
  3734. access_pcic_post_dat_q_cor_err_cnt),
  3735. [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
  3736. CNTR_NORMAL,
  3737. access_pcic_post_hd_q_cor_err_cnt),
  3738. [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
  3739. CNTR_NORMAL,
  3740. access_pcic_retry_sot_mem_cor_err_cnt),
  3741. [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
  3742. CNTR_NORMAL,
  3743. access_pcic_retry_mem_cor_err_cnt),
  3744. [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
  3745. "CceCli1AsyncFifoDbgParityError", 0, 0,
  3746. CNTR_NORMAL,
  3747. access_cce_cli1_async_fifo_dbg_parity_err_cnt),
  3748. [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
  3749. "CceCli1AsyncFifoRxdmaParityError", 0, 0,
  3750. CNTR_NORMAL,
  3751. access_cce_cli1_async_fifo_rxdma_parity_err_cnt
  3752. ),
  3753. [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
  3754. "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
  3755. CNTR_NORMAL,
  3756. access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
  3757. [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
  3758. "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
  3759. CNTR_NORMAL,
  3760. access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
  3761. [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
  3762. 0, CNTR_NORMAL,
  3763. access_cce_cli2_async_fifo_parity_err_cnt),
  3764. [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
  3765. CNTR_NORMAL,
  3766. access_cce_csr_cfg_bus_parity_err_cnt),
  3767. [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
  3768. 0, CNTR_NORMAL,
  3769. access_cce_cli0_async_fifo_parity_err_cnt),
  3770. [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
  3771. CNTR_NORMAL,
  3772. access_cce_rspd_data_parity_err_cnt),
  3773. [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
  3774. CNTR_NORMAL,
  3775. access_cce_trgt_access_err_cnt),
  3776. [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
  3777. 0, CNTR_NORMAL,
  3778. access_cce_trgt_async_fifo_parity_err_cnt),
  3779. [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
  3780. CNTR_NORMAL,
  3781. access_cce_csr_write_bad_addr_err_cnt),
  3782. [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
  3783. CNTR_NORMAL,
  3784. access_cce_csr_read_bad_addr_err_cnt),
  3785. [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
  3786. CNTR_NORMAL,
  3787. access_ccs_csr_parity_err_cnt),
  3788. /* RcvErrStatus */
  3789. [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
  3790. CNTR_NORMAL,
  3791. access_rx_csr_parity_err_cnt),
  3792. [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
  3793. CNTR_NORMAL,
  3794. access_rx_csr_write_bad_addr_err_cnt),
  3795. [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
  3796. CNTR_NORMAL,
  3797. access_rx_csr_read_bad_addr_err_cnt),
  3798. [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
  3799. CNTR_NORMAL,
  3800. access_rx_dma_csr_unc_err_cnt),
  3801. [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
  3802. CNTR_NORMAL,
  3803. access_rx_dma_dq_fsm_encoding_err_cnt),
  3804. [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
  3805. CNTR_NORMAL,
  3806. access_rx_dma_eq_fsm_encoding_err_cnt),
  3807. [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
  3808. CNTR_NORMAL,
  3809. access_rx_dma_csr_parity_err_cnt),
  3810. [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
  3811. CNTR_NORMAL,
  3812. access_rx_rbuf_data_cor_err_cnt),
  3813. [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
  3814. CNTR_NORMAL,
  3815. access_rx_rbuf_data_unc_err_cnt),
  3816. [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
  3817. CNTR_NORMAL,
  3818. access_rx_dma_data_fifo_rd_cor_err_cnt),
  3819. [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
  3820. CNTR_NORMAL,
  3821. access_rx_dma_data_fifo_rd_unc_err_cnt),
  3822. [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
  3823. CNTR_NORMAL,
  3824. access_rx_dma_hdr_fifo_rd_cor_err_cnt),
  3825. [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
  3826. CNTR_NORMAL,
  3827. access_rx_dma_hdr_fifo_rd_unc_err_cnt),
  3828. [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
  3829. CNTR_NORMAL,
  3830. access_rx_rbuf_desc_part2_cor_err_cnt),
  3831. [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
  3832. CNTR_NORMAL,
  3833. access_rx_rbuf_desc_part2_unc_err_cnt),
  3834. [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
  3835. CNTR_NORMAL,
  3836. access_rx_rbuf_desc_part1_cor_err_cnt),
  3837. [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
  3838. CNTR_NORMAL,
  3839. access_rx_rbuf_desc_part1_unc_err_cnt),
  3840. [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
  3841. CNTR_NORMAL,
  3842. access_rx_hq_intr_fsm_err_cnt),
  3843. [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
  3844. CNTR_NORMAL,
  3845. access_rx_hq_intr_csr_parity_err_cnt),
  3846. [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
  3847. CNTR_NORMAL,
  3848. access_rx_lookup_csr_parity_err_cnt),
  3849. [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
  3850. CNTR_NORMAL,
  3851. access_rx_lookup_rcv_array_cor_err_cnt),
  3852. [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
  3853. CNTR_NORMAL,
  3854. access_rx_lookup_rcv_array_unc_err_cnt),
  3855. [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
  3856. 0, CNTR_NORMAL,
  3857. access_rx_lookup_des_part2_parity_err_cnt),
  3858. [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
  3859. 0, CNTR_NORMAL,
  3860. access_rx_lookup_des_part1_unc_cor_err_cnt),
  3861. [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
  3862. CNTR_NORMAL,
  3863. access_rx_lookup_des_part1_unc_err_cnt),
  3864. [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
  3865. CNTR_NORMAL,
  3866. access_rx_rbuf_next_free_buf_cor_err_cnt),
  3867. [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
  3868. CNTR_NORMAL,
  3869. access_rx_rbuf_next_free_buf_unc_err_cnt),
  3870. [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
  3871. "RxRbufFlInitWrAddrParityErr", 0, 0,
  3872. CNTR_NORMAL,
  3873. access_rbuf_fl_init_wr_addr_parity_err_cnt),
  3874. [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
  3875. 0, CNTR_NORMAL,
  3876. access_rx_rbuf_fl_initdone_parity_err_cnt),
  3877. [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
  3878. 0, CNTR_NORMAL,
  3879. access_rx_rbuf_fl_write_addr_parity_err_cnt),
  3880. [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
  3881. CNTR_NORMAL,
  3882. access_rx_rbuf_fl_rd_addr_parity_err_cnt),
  3883. [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
  3884. CNTR_NORMAL,
  3885. access_rx_rbuf_empty_err_cnt),
  3886. [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
  3887. CNTR_NORMAL,
  3888. access_rx_rbuf_full_err_cnt),
  3889. [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
  3890. CNTR_NORMAL,
  3891. access_rbuf_bad_lookup_err_cnt),
  3892. [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
  3893. CNTR_NORMAL,
  3894. access_rbuf_ctx_id_parity_err_cnt),
  3895. [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
  3896. CNTR_NORMAL,
  3897. access_rbuf_csr_qeopdw_parity_err_cnt),
  3898. [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
  3899. "RxRbufCsrQNumOfPktParityErr", 0, 0,
  3900. CNTR_NORMAL,
  3901. access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
  3902. [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
  3903. "RxRbufCsrQTlPtrParityErr", 0, 0,
  3904. CNTR_NORMAL,
  3905. access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
  3906. [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
  3907. 0, CNTR_NORMAL,
  3908. access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
  3909. [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
  3910. 0, CNTR_NORMAL,
  3911. access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
  3912. [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
  3913. 0, 0, CNTR_NORMAL,
  3914. access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
  3915. [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
  3916. 0, CNTR_NORMAL,
  3917. access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
  3918. [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
  3919. "RxRbufCsrQHeadBufNumParityErr", 0, 0,
  3920. CNTR_NORMAL,
  3921. access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
  3922. [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
  3923. 0, CNTR_NORMAL,
  3924. access_rx_rbuf_block_list_read_cor_err_cnt),
  3925. [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
  3926. 0, CNTR_NORMAL,
  3927. access_rx_rbuf_block_list_read_unc_err_cnt),
  3928. [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
  3929. CNTR_NORMAL,
  3930. access_rx_rbuf_lookup_des_cor_err_cnt),
  3931. [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
  3932. CNTR_NORMAL,
  3933. access_rx_rbuf_lookup_des_unc_err_cnt),
  3934. [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
  3935. "RxRbufLookupDesRegUncCorErr", 0, 0,
  3936. CNTR_NORMAL,
  3937. access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
  3938. [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
  3939. CNTR_NORMAL,
  3940. access_rx_rbuf_lookup_des_reg_unc_err_cnt),
  3941. [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
  3942. CNTR_NORMAL,
  3943. access_rx_rbuf_free_list_cor_err_cnt),
  3944. [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
  3945. CNTR_NORMAL,
  3946. access_rx_rbuf_free_list_unc_err_cnt),
  3947. [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
  3948. CNTR_NORMAL,
  3949. access_rx_rcv_fsm_encoding_err_cnt),
  3950. [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
  3951. CNTR_NORMAL,
  3952. access_rx_dma_flag_cor_err_cnt),
  3953. [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
  3954. CNTR_NORMAL,
  3955. access_rx_dma_flag_unc_err_cnt),
  3956. [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
  3957. CNTR_NORMAL,
  3958. access_rx_dc_sop_eop_parity_err_cnt),
  3959. [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
  3960. CNTR_NORMAL,
  3961. access_rx_rcv_csr_parity_err_cnt),
  3962. [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
  3963. CNTR_NORMAL,
  3964. access_rx_rcv_qp_map_table_cor_err_cnt),
  3965. [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
  3966. CNTR_NORMAL,
  3967. access_rx_rcv_qp_map_table_unc_err_cnt),
  3968. [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
  3969. CNTR_NORMAL,
  3970. access_rx_rcv_data_cor_err_cnt),
  3971. [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
  3972. CNTR_NORMAL,
  3973. access_rx_rcv_data_unc_err_cnt),
  3974. [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
  3975. CNTR_NORMAL,
  3976. access_rx_rcv_hdr_cor_err_cnt),
  3977. [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
  3978. CNTR_NORMAL,
  3979. access_rx_rcv_hdr_unc_err_cnt),
  3980. [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
  3981. CNTR_NORMAL,
  3982. access_rx_dc_intf_parity_err_cnt),
  3983. [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
  3984. CNTR_NORMAL,
  3985. access_rx_dma_csr_cor_err_cnt),
  3986. /* SendPioErrStatus */
  3987. [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
  3988. CNTR_NORMAL,
  3989. access_pio_pec_sop_head_parity_err_cnt),
  3990. [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
  3991. CNTR_NORMAL,
  3992. access_pio_pcc_sop_head_parity_err_cnt),
  3993. [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
  3994. 0, 0, CNTR_NORMAL,
  3995. access_pio_last_returned_cnt_parity_err_cnt),
  3996. [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
  3997. 0, CNTR_NORMAL,
  3998. access_pio_current_free_cnt_parity_err_cnt),
  3999. [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
  4000. CNTR_NORMAL,
  4001. access_pio_reserved_31_err_cnt),
  4002. [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
  4003. CNTR_NORMAL,
  4004. access_pio_reserved_30_err_cnt),
  4005. [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
  4006. CNTR_NORMAL,
  4007. access_pio_ppmc_sop_len_err_cnt),
  4008. [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
  4009. CNTR_NORMAL,
  4010. access_pio_ppmc_bqc_mem_parity_err_cnt),
  4011. [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
  4012. CNTR_NORMAL,
  4013. access_pio_vl_fifo_parity_err_cnt),
  4014. [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
  4015. CNTR_NORMAL,
  4016. access_pio_vlf_sop_parity_err_cnt),
  4017. [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
  4018. CNTR_NORMAL,
  4019. access_pio_vlf_v1_len_parity_err_cnt),
  4020. [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
  4021. CNTR_NORMAL,
  4022. access_pio_block_qw_count_parity_err_cnt),
  4023. [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
  4024. CNTR_NORMAL,
  4025. access_pio_write_qw_valid_parity_err_cnt),
  4026. [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
  4027. CNTR_NORMAL,
  4028. access_pio_state_machine_err_cnt),
  4029. [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
  4030. CNTR_NORMAL,
  4031. access_pio_write_data_parity_err_cnt),
  4032. [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
  4033. CNTR_NORMAL,
  4034. access_pio_host_addr_mem_cor_err_cnt),
  4035. [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
  4036. CNTR_NORMAL,
  4037. access_pio_host_addr_mem_unc_err_cnt),
  4038. [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
  4039. CNTR_NORMAL,
  4040. access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
  4041. [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
  4042. CNTR_NORMAL,
  4043. access_pio_init_sm_in_err_cnt),
  4044. [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
  4045. CNTR_NORMAL,
  4046. access_pio_ppmc_pbl_fifo_err_cnt),
  4047. [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
  4048. 0, CNTR_NORMAL,
  4049. access_pio_credit_ret_fifo_parity_err_cnt),
  4050. [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
  4051. CNTR_NORMAL,
  4052. access_pio_v1_len_mem_bank1_cor_err_cnt),
  4053. [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
  4054. CNTR_NORMAL,
  4055. access_pio_v1_len_mem_bank0_cor_err_cnt),
  4056. [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
  4057. CNTR_NORMAL,
  4058. access_pio_v1_len_mem_bank1_unc_err_cnt),
  4059. [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
  4060. CNTR_NORMAL,
  4061. access_pio_v1_len_mem_bank0_unc_err_cnt),
  4062. [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
  4063. CNTR_NORMAL,
  4064. access_pio_sm_pkt_reset_parity_err_cnt),
  4065. [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
  4066. CNTR_NORMAL,
  4067. access_pio_pkt_evict_fifo_parity_err_cnt),
  4068. [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
  4069. "PioSbrdctrlCrrelFifoParityErr", 0, 0,
  4070. CNTR_NORMAL,
  4071. access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
  4072. [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
  4073. CNTR_NORMAL,
  4074. access_pio_sbrdctl_crrel_parity_err_cnt),
  4075. [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
  4076. CNTR_NORMAL,
  4077. access_pio_pec_fifo_parity_err_cnt),
  4078. [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
  4079. CNTR_NORMAL,
  4080. access_pio_pcc_fifo_parity_err_cnt),
  4081. [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
  4082. CNTR_NORMAL,
  4083. access_pio_sb_mem_fifo1_err_cnt),
  4084. [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
  4085. CNTR_NORMAL,
  4086. access_pio_sb_mem_fifo0_err_cnt),
  4087. [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
  4088. CNTR_NORMAL,
  4089. access_pio_csr_parity_err_cnt),
  4090. [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
  4091. CNTR_NORMAL,
  4092. access_pio_write_addr_parity_err_cnt),
  4093. [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
  4094. CNTR_NORMAL,
  4095. access_pio_write_bad_ctxt_err_cnt),
  4096. /* SendDmaErrStatus */
  4097. [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
  4098. 0, CNTR_NORMAL,
  4099. access_sdma_pcie_req_tracking_cor_err_cnt),
  4100. [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
  4101. 0, CNTR_NORMAL,
  4102. access_sdma_pcie_req_tracking_unc_err_cnt),
  4103. [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
  4104. CNTR_NORMAL,
  4105. access_sdma_csr_parity_err_cnt),
  4106. [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
  4107. CNTR_NORMAL,
  4108. access_sdma_rpy_tag_err_cnt),
  4109. /* SendEgressErrStatus */
  4110. [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
  4111. CNTR_NORMAL,
  4112. access_tx_read_pio_memory_csr_unc_err_cnt),
  4113. [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
  4114. 0, CNTR_NORMAL,
  4115. access_tx_read_sdma_memory_csr_err_cnt),
  4116. [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
  4117. CNTR_NORMAL,
  4118. access_tx_egress_fifo_cor_err_cnt),
  4119. [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
  4120. CNTR_NORMAL,
  4121. access_tx_read_pio_memory_cor_err_cnt),
  4122. [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
  4123. CNTR_NORMAL,
  4124. access_tx_read_sdma_memory_cor_err_cnt),
  4125. [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
  4126. CNTR_NORMAL,
  4127. access_tx_sb_hdr_cor_err_cnt),
  4128. [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
  4129. CNTR_NORMAL,
  4130. access_tx_credit_overrun_err_cnt),
  4131. [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
  4132. CNTR_NORMAL,
  4133. access_tx_launch_fifo8_cor_err_cnt),
  4134. [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
  4135. CNTR_NORMAL,
  4136. access_tx_launch_fifo7_cor_err_cnt),
  4137. [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
  4138. CNTR_NORMAL,
  4139. access_tx_launch_fifo6_cor_err_cnt),
  4140. [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
  4141. CNTR_NORMAL,
  4142. access_tx_launch_fifo5_cor_err_cnt),
  4143. [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
  4144. CNTR_NORMAL,
  4145. access_tx_launch_fifo4_cor_err_cnt),
  4146. [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
  4147. CNTR_NORMAL,
  4148. access_tx_launch_fifo3_cor_err_cnt),
  4149. [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
  4150. CNTR_NORMAL,
  4151. access_tx_launch_fifo2_cor_err_cnt),
  4152. [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
  4153. CNTR_NORMAL,
  4154. access_tx_launch_fifo1_cor_err_cnt),
  4155. [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
  4156. CNTR_NORMAL,
  4157. access_tx_launch_fifo0_cor_err_cnt),
  4158. [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
  4159. CNTR_NORMAL,
  4160. access_tx_credit_return_vl_err_cnt),
  4161. [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
  4162. CNTR_NORMAL,
  4163. access_tx_hcrc_insertion_err_cnt),
  4164. [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
  4165. CNTR_NORMAL,
  4166. access_tx_egress_fifo_unc_err_cnt),
  4167. [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
  4168. CNTR_NORMAL,
  4169. access_tx_read_pio_memory_unc_err_cnt),
  4170. [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
  4171. CNTR_NORMAL,
  4172. access_tx_read_sdma_memory_unc_err_cnt),
  4173. [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
  4174. CNTR_NORMAL,
  4175. access_tx_sb_hdr_unc_err_cnt),
  4176. [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
  4177. CNTR_NORMAL,
  4178. access_tx_credit_return_partiy_err_cnt),
  4179. [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
  4180. 0, 0, CNTR_NORMAL,
  4181. access_tx_launch_fifo8_unc_or_parity_err_cnt),
  4182. [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
  4183. 0, 0, CNTR_NORMAL,
  4184. access_tx_launch_fifo7_unc_or_parity_err_cnt),
  4185. [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
  4186. 0, 0, CNTR_NORMAL,
  4187. access_tx_launch_fifo6_unc_or_parity_err_cnt),
  4188. [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
  4189. 0, 0, CNTR_NORMAL,
  4190. access_tx_launch_fifo5_unc_or_parity_err_cnt),
  4191. [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
  4192. 0, 0, CNTR_NORMAL,
  4193. access_tx_launch_fifo4_unc_or_parity_err_cnt),
  4194. [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
  4195. 0, 0, CNTR_NORMAL,
  4196. access_tx_launch_fifo3_unc_or_parity_err_cnt),
  4197. [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
  4198. 0, 0, CNTR_NORMAL,
  4199. access_tx_launch_fifo2_unc_or_parity_err_cnt),
  4200. [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
  4201. 0, 0, CNTR_NORMAL,
  4202. access_tx_launch_fifo1_unc_or_parity_err_cnt),
  4203. [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
  4204. 0, 0, CNTR_NORMAL,
  4205. access_tx_launch_fifo0_unc_or_parity_err_cnt),
  4206. [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
  4207. 0, 0, CNTR_NORMAL,
  4208. access_tx_sdma15_disallowed_packet_err_cnt),
  4209. [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
  4210. 0, 0, CNTR_NORMAL,
  4211. access_tx_sdma14_disallowed_packet_err_cnt),
  4212. [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
  4213. 0, 0, CNTR_NORMAL,
  4214. access_tx_sdma13_disallowed_packet_err_cnt),
  4215. [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
  4216. 0, 0, CNTR_NORMAL,
  4217. access_tx_sdma12_disallowed_packet_err_cnt),
  4218. [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
  4219. 0, 0, CNTR_NORMAL,
  4220. access_tx_sdma11_disallowed_packet_err_cnt),
  4221. [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
  4222. 0, 0, CNTR_NORMAL,
  4223. access_tx_sdma10_disallowed_packet_err_cnt),
  4224. [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
  4225. 0, 0, CNTR_NORMAL,
  4226. access_tx_sdma9_disallowed_packet_err_cnt),
  4227. [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
  4228. 0, 0, CNTR_NORMAL,
  4229. access_tx_sdma8_disallowed_packet_err_cnt),
  4230. [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
  4231. 0, 0, CNTR_NORMAL,
  4232. access_tx_sdma7_disallowed_packet_err_cnt),
  4233. [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
  4234. 0, 0, CNTR_NORMAL,
  4235. access_tx_sdma6_disallowed_packet_err_cnt),
  4236. [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
  4237. 0, 0, CNTR_NORMAL,
  4238. access_tx_sdma5_disallowed_packet_err_cnt),
  4239. [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
  4240. 0, 0, CNTR_NORMAL,
  4241. access_tx_sdma4_disallowed_packet_err_cnt),
  4242. [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
  4243. 0, 0, CNTR_NORMAL,
  4244. access_tx_sdma3_disallowed_packet_err_cnt),
  4245. [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
  4246. 0, 0, CNTR_NORMAL,
  4247. access_tx_sdma2_disallowed_packet_err_cnt),
  4248. [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
  4249. 0, 0, CNTR_NORMAL,
  4250. access_tx_sdma1_disallowed_packet_err_cnt),
  4251. [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
  4252. 0, 0, CNTR_NORMAL,
  4253. access_tx_sdma0_disallowed_packet_err_cnt),
  4254. [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
  4255. CNTR_NORMAL,
  4256. access_tx_config_parity_err_cnt),
  4257. [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
  4258. CNTR_NORMAL,
  4259. access_tx_sbrd_ctl_csr_parity_err_cnt),
  4260. [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
  4261. CNTR_NORMAL,
  4262. access_tx_launch_csr_parity_err_cnt),
  4263. [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
  4264. CNTR_NORMAL,
  4265. access_tx_illegal_vl_err_cnt),
  4266. [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
  4267. "TxSbrdCtlStateMachineParityErr", 0, 0,
  4268. CNTR_NORMAL,
  4269. access_tx_sbrd_ctl_state_machine_parity_err_cnt),
  4270. [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
  4271. CNTR_NORMAL,
  4272. access_egress_reserved_10_err_cnt),
  4273. [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
  4274. CNTR_NORMAL,
  4275. access_egress_reserved_9_err_cnt),
  4276. [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
  4277. 0, 0, CNTR_NORMAL,
  4278. access_tx_sdma_launch_intf_parity_err_cnt),
  4279. [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
  4280. CNTR_NORMAL,
  4281. access_tx_pio_launch_intf_parity_err_cnt),
  4282. [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
  4283. CNTR_NORMAL,
  4284. access_egress_reserved_6_err_cnt),
  4285. [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
  4286. CNTR_NORMAL,
  4287. access_tx_incorrect_link_state_err_cnt),
  4288. [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
  4289. CNTR_NORMAL,
  4290. access_tx_linkdown_err_cnt),
  4291. [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
  4292. "EgressFifoUnderrunOrParityErr", 0, 0,
  4293. CNTR_NORMAL,
  4294. access_tx_egress_fifi_underrun_or_parity_err_cnt),
  4295. [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
  4296. CNTR_NORMAL,
  4297. access_egress_reserved_2_err_cnt),
  4298. [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
  4299. CNTR_NORMAL,
  4300. access_tx_pkt_integrity_mem_unc_err_cnt),
  4301. [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
  4302. CNTR_NORMAL,
  4303. access_tx_pkt_integrity_mem_cor_err_cnt),
  4304. /* SendErrStatus */
  4305. [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
  4306. CNTR_NORMAL,
  4307. access_send_csr_write_bad_addr_err_cnt),
  4308. [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
  4309. CNTR_NORMAL,
  4310. access_send_csr_read_bad_addr_err_cnt),
  4311. [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
  4312. CNTR_NORMAL,
  4313. access_send_csr_parity_cnt),
  4314. /* SendCtxtErrStatus */
  4315. [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
  4316. CNTR_NORMAL,
  4317. access_pio_write_out_of_bounds_err_cnt),
  4318. [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
  4319. CNTR_NORMAL,
  4320. access_pio_write_overflow_err_cnt),
  4321. [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
  4322. 0, 0, CNTR_NORMAL,
  4323. access_pio_write_crosses_boundary_err_cnt),
  4324. [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
  4325. CNTR_NORMAL,
  4326. access_pio_disallowed_packet_err_cnt),
  4327. [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
  4328. CNTR_NORMAL,
  4329. access_pio_inconsistent_sop_err_cnt),
  4330. /* SendDmaEngErrStatus */
  4331. [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
  4332. 0, 0, CNTR_NORMAL,
  4333. access_sdma_header_request_fifo_cor_err_cnt),
  4334. [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
  4335. CNTR_NORMAL,
  4336. access_sdma_header_storage_cor_err_cnt),
  4337. [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
  4338. CNTR_NORMAL,
  4339. access_sdma_packet_tracking_cor_err_cnt),
  4340. [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
  4341. CNTR_NORMAL,
  4342. access_sdma_assembly_cor_err_cnt),
  4343. [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
  4344. CNTR_NORMAL,
  4345. access_sdma_desc_table_cor_err_cnt),
  4346. [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
  4347. 0, 0, CNTR_NORMAL,
  4348. access_sdma_header_request_fifo_unc_err_cnt),
  4349. [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
  4350. CNTR_NORMAL,
  4351. access_sdma_header_storage_unc_err_cnt),
  4352. [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
  4353. CNTR_NORMAL,
  4354. access_sdma_packet_tracking_unc_err_cnt),
  4355. [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
  4356. CNTR_NORMAL,
  4357. access_sdma_assembly_unc_err_cnt),
  4358. [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
  4359. CNTR_NORMAL,
  4360. access_sdma_desc_table_unc_err_cnt),
  4361. [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
  4362. CNTR_NORMAL,
  4363. access_sdma_timeout_err_cnt),
  4364. [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
  4365. CNTR_NORMAL,
  4366. access_sdma_header_length_err_cnt),
  4367. [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
  4368. CNTR_NORMAL,
  4369. access_sdma_header_address_err_cnt),
  4370. [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
  4371. CNTR_NORMAL,
  4372. access_sdma_header_select_err_cnt),
  4373. [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
  4374. CNTR_NORMAL,
  4375. access_sdma_reserved_9_err_cnt),
  4376. [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
  4377. CNTR_NORMAL,
  4378. access_sdma_packet_desc_overflow_err_cnt),
  4379. [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
  4380. CNTR_NORMAL,
  4381. access_sdma_length_mismatch_err_cnt),
  4382. [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
  4383. CNTR_NORMAL,
  4384. access_sdma_halt_err_cnt),
  4385. [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
  4386. CNTR_NORMAL,
  4387. access_sdma_mem_read_err_cnt),
  4388. [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
  4389. CNTR_NORMAL,
  4390. access_sdma_first_desc_err_cnt),
  4391. [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
  4392. CNTR_NORMAL,
  4393. access_sdma_tail_out_of_bounds_err_cnt),
  4394. [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
  4395. CNTR_NORMAL,
  4396. access_sdma_too_long_err_cnt),
  4397. [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
  4398. CNTR_NORMAL,
  4399. access_sdma_gen_mismatch_err_cnt),
  4400. [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
  4401. CNTR_NORMAL,
  4402. access_sdma_wrong_dw_err_cnt),
  4403. };
  4404. static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
  4405. [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
  4406. CNTR_NORMAL),
  4407. [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
  4408. CNTR_NORMAL),
  4409. [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
  4410. CNTR_NORMAL),
  4411. [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
  4412. CNTR_NORMAL),
  4413. [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
  4414. CNTR_NORMAL),
  4415. [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
  4416. CNTR_NORMAL),
  4417. [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
  4418. CNTR_NORMAL),
  4419. [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
  4420. [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
  4421. [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
  4422. [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
  4423. CNTR_SYNTH | CNTR_VL),
  4424. [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
  4425. CNTR_SYNTH | CNTR_VL),
  4426. [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
  4427. CNTR_SYNTH | CNTR_VL),
  4428. [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
  4429. [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
  4430. [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
  4431. access_sw_link_dn_cnt),
  4432. [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
  4433. access_sw_link_up_cnt),
  4434. [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
  4435. access_sw_unknown_frame_cnt),
  4436. [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
  4437. access_sw_xmit_discards),
  4438. [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
  4439. CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
  4440. access_sw_xmit_discards),
  4441. [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
  4442. access_xmit_constraint_errs),
  4443. [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
  4444. access_rcv_constraint_errs),
  4445. [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
  4446. [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
  4447. [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
  4448. [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
  4449. [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
  4450. [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
  4451. [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
  4452. [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
  4453. [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
  4454. [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
  4455. [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
  4456. [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
  4457. [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
  4458. access_sw_cpu_rc_acks),
  4459. [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
  4460. access_sw_cpu_rc_qacks),
  4461. [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
  4462. access_sw_cpu_rc_delayed_comp),
  4463. [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
  4464. [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
  4465. [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
  4466. [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
  4467. [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
  4468. [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
  4469. [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
  4470. [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
  4471. [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
  4472. [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
  4473. [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
  4474. [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
  4475. [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
  4476. [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
  4477. [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
  4478. [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
  4479. [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
  4480. [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
  4481. [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
  4482. [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
  4483. [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
  4484. [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
  4485. [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
  4486. [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
  4487. [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
  4488. [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
  4489. [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
  4490. [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
  4491. [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
  4492. [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
  4493. [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
  4494. [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
  4495. [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
  4496. [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
  4497. [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
  4498. [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
  4499. [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
  4500. [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
  4501. [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
  4502. [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
  4503. [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
  4504. [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
  4505. [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
  4506. [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
  4507. [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
  4508. [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
  4509. [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
  4510. [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
  4511. [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
  4512. [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
  4513. [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
  4514. [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
  4515. [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
  4516. [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
  4517. [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
  4518. [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
  4519. [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
  4520. [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
  4521. [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
  4522. [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
  4523. [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
  4524. [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
  4525. [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
  4526. [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
  4527. [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
  4528. [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
  4529. [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
  4530. [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
  4531. [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
  4532. [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
  4533. [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
  4534. [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
  4535. [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
  4536. [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
  4537. [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
  4538. [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
  4539. [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
  4540. [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
  4541. [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
  4542. [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
  4543. };
  4544. /* ======================================================================== */
  4545. /* return true if this is chip revision revision a */
  4546. int is_ax(struct hfi1_devdata *dd)
  4547. {
  4548. u8 chip_rev_minor =
  4549. dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
  4550. & CCE_REVISION_CHIP_REV_MINOR_MASK;
  4551. return (chip_rev_minor & 0xf0) == 0;
  4552. }
  4553. /* return true if this is chip revision revision b */
  4554. int is_bx(struct hfi1_devdata *dd)
  4555. {
  4556. u8 chip_rev_minor =
  4557. dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
  4558. & CCE_REVISION_CHIP_REV_MINOR_MASK;
  4559. return (chip_rev_minor & 0xF0) == 0x10;
  4560. }
  4561. /*
  4562. * Append string s to buffer buf. Arguments curp and len are the current
  4563. * position and remaining length, respectively.
  4564. *
  4565. * return 0 on success, 1 on out of room
  4566. */
  4567. static int append_str(char *buf, char **curp, int *lenp, const char *s)
  4568. {
  4569. char *p = *curp;
  4570. int len = *lenp;
  4571. int result = 0; /* success */
  4572. char c;
  4573. /* add a comma, if first in the buffer */
  4574. if (p != buf) {
  4575. if (len == 0) {
  4576. result = 1; /* out of room */
  4577. goto done;
  4578. }
  4579. *p++ = ',';
  4580. len--;
  4581. }
  4582. /* copy the string */
  4583. while ((c = *s++) != 0) {
  4584. if (len == 0) {
  4585. result = 1; /* out of room */
  4586. goto done;
  4587. }
  4588. *p++ = c;
  4589. len--;
  4590. }
  4591. done:
  4592. /* write return values */
  4593. *curp = p;
  4594. *lenp = len;
  4595. return result;
  4596. }
  4597. /*
  4598. * Using the given flag table, print a comma separated string into
  4599. * the buffer. End in '*' if the buffer is too short.
  4600. */
  4601. static char *flag_string(char *buf, int buf_len, u64 flags,
  4602. struct flag_table *table, int table_size)
  4603. {
  4604. char extra[32];
  4605. char *p = buf;
  4606. int len = buf_len;
  4607. int no_room = 0;
  4608. int i;
  4609. /* make sure there is at least 2 so we can form "*" */
  4610. if (len < 2)
  4611. return "";
  4612. len--; /* leave room for a nul */
  4613. for (i = 0; i < table_size; i++) {
  4614. if (flags & table[i].flag) {
  4615. no_room = append_str(buf, &p, &len, table[i].str);
  4616. if (no_room)
  4617. break;
  4618. flags &= ~table[i].flag;
  4619. }
  4620. }
  4621. /* any undocumented bits left? */
  4622. if (!no_room && flags) {
  4623. snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
  4624. no_room = append_str(buf, &p, &len, extra);
  4625. }
  4626. /* add * if ran out of room */
  4627. if (no_room) {
  4628. /* may need to back up to add space for a '*' */
  4629. if (len == 0)
  4630. --p;
  4631. *p++ = '*';
  4632. }
  4633. /* add final nul - space already allocated above */
  4634. *p = 0;
  4635. return buf;
  4636. }
  4637. /* first 8 CCE error interrupt source names */
  4638. static const char * const cce_misc_names[] = {
  4639. "CceErrInt", /* 0 */
  4640. "RxeErrInt", /* 1 */
  4641. "MiscErrInt", /* 2 */
  4642. "Reserved3", /* 3 */
  4643. "PioErrInt", /* 4 */
  4644. "SDmaErrInt", /* 5 */
  4645. "EgressErrInt", /* 6 */
  4646. "TxeErrInt" /* 7 */
  4647. };
  4648. /*
  4649. * Return the miscellaneous error interrupt name.
  4650. */
  4651. static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
  4652. {
  4653. if (source < ARRAY_SIZE(cce_misc_names))
  4654. strncpy(buf, cce_misc_names[source], bsize);
  4655. else
  4656. snprintf(buf, bsize, "Reserved%u",
  4657. source + IS_GENERAL_ERR_START);
  4658. return buf;
  4659. }
  4660. /*
  4661. * Return the SDMA engine error interrupt name.
  4662. */
  4663. static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
  4664. {
  4665. snprintf(buf, bsize, "SDmaEngErrInt%u", source);
  4666. return buf;
  4667. }
  4668. /*
  4669. * Return the send context error interrupt name.
  4670. */
  4671. static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
  4672. {
  4673. snprintf(buf, bsize, "SendCtxtErrInt%u", source);
  4674. return buf;
  4675. }
  4676. static const char * const various_names[] = {
  4677. "PbcInt",
  4678. "GpioAssertInt",
  4679. "Qsfp1Int",
  4680. "Qsfp2Int",
  4681. "TCritInt"
  4682. };
  4683. /*
  4684. * Return the various interrupt name.
  4685. */
  4686. static char *is_various_name(char *buf, size_t bsize, unsigned int source)
  4687. {
  4688. if (source < ARRAY_SIZE(various_names))
  4689. strncpy(buf, various_names[source], bsize);
  4690. else
  4691. snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
  4692. return buf;
  4693. }
  4694. /*
  4695. * Return the DC interrupt name.
  4696. */
  4697. static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
  4698. {
  4699. static const char * const dc_int_names[] = {
  4700. "common",
  4701. "lcb",
  4702. "8051",
  4703. "lbm" /* local block merge */
  4704. };
  4705. if (source < ARRAY_SIZE(dc_int_names))
  4706. snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
  4707. else
  4708. snprintf(buf, bsize, "DCInt%u", source);
  4709. return buf;
  4710. }
  4711. static const char * const sdma_int_names[] = {
  4712. "SDmaInt",
  4713. "SdmaIdleInt",
  4714. "SdmaProgressInt",
  4715. };
  4716. /*
  4717. * Return the SDMA engine interrupt name.
  4718. */
  4719. static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
  4720. {
  4721. /* what interrupt */
  4722. unsigned int what = source / TXE_NUM_SDMA_ENGINES;
  4723. /* which engine */
  4724. unsigned int which = source % TXE_NUM_SDMA_ENGINES;
  4725. if (likely(what < 3))
  4726. snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
  4727. else
  4728. snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
  4729. return buf;
  4730. }
  4731. /*
  4732. * Return the receive available interrupt name.
  4733. */
  4734. static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
  4735. {
  4736. snprintf(buf, bsize, "RcvAvailInt%u", source);
  4737. return buf;
  4738. }
  4739. /*
  4740. * Return the receive urgent interrupt name.
  4741. */
  4742. static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
  4743. {
  4744. snprintf(buf, bsize, "RcvUrgentInt%u", source);
  4745. return buf;
  4746. }
  4747. /*
  4748. * Return the send credit interrupt name.
  4749. */
  4750. static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
  4751. {
  4752. snprintf(buf, bsize, "SendCreditInt%u", source);
  4753. return buf;
  4754. }
  4755. /*
  4756. * Return the reserved interrupt name.
  4757. */
  4758. static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
  4759. {
  4760. snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
  4761. return buf;
  4762. }
  4763. static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
  4764. {
  4765. return flag_string(buf, buf_len, flags,
  4766. cce_err_status_flags,
  4767. ARRAY_SIZE(cce_err_status_flags));
  4768. }
  4769. static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
  4770. {
  4771. return flag_string(buf, buf_len, flags,
  4772. rxe_err_status_flags,
  4773. ARRAY_SIZE(rxe_err_status_flags));
  4774. }
  4775. static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
  4776. {
  4777. return flag_string(buf, buf_len, flags, misc_err_status_flags,
  4778. ARRAY_SIZE(misc_err_status_flags));
  4779. }
  4780. static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
  4781. {
  4782. return flag_string(buf, buf_len, flags,
  4783. pio_err_status_flags,
  4784. ARRAY_SIZE(pio_err_status_flags));
  4785. }
  4786. static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
  4787. {
  4788. return flag_string(buf, buf_len, flags,
  4789. sdma_err_status_flags,
  4790. ARRAY_SIZE(sdma_err_status_flags));
  4791. }
  4792. static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
  4793. {
  4794. return flag_string(buf, buf_len, flags,
  4795. egress_err_status_flags,
  4796. ARRAY_SIZE(egress_err_status_flags));
  4797. }
  4798. static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
  4799. {
  4800. return flag_string(buf, buf_len, flags,
  4801. egress_err_info_flags,
  4802. ARRAY_SIZE(egress_err_info_flags));
  4803. }
  4804. static char *send_err_status_string(char *buf, int buf_len, u64 flags)
  4805. {
  4806. return flag_string(buf, buf_len, flags,
  4807. send_err_status_flags,
  4808. ARRAY_SIZE(send_err_status_flags));
  4809. }
  4810. static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4811. {
  4812. char buf[96];
  4813. int i = 0;
  4814. /*
  4815. * For most these errors, there is nothing that can be done except
  4816. * report or record it.
  4817. */
  4818. dd_dev_info(dd, "CCE Error: %s\n",
  4819. cce_err_status_string(buf, sizeof(buf), reg));
  4820. if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
  4821. is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
  4822. /* this error requires a manual drop into SPC freeze mode */
  4823. /* then a fix up */
  4824. start_freeze_handling(dd->pport, FREEZE_SELF);
  4825. }
  4826. for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
  4827. if (reg & (1ull << i)) {
  4828. incr_cntr64(&dd->cce_err_status_cnt[i]);
  4829. /* maintain a counter over all cce_err_status errors */
  4830. incr_cntr64(&dd->sw_cce_err_status_aggregate);
  4831. }
  4832. }
  4833. }
  4834. /*
  4835. * Check counters for receive errors that do not have an interrupt
  4836. * associated with them.
  4837. */
  4838. #define RCVERR_CHECK_TIME 10
  4839. static void update_rcverr_timer(struct timer_list *t)
  4840. {
  4841. struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
  4842. struct hfi1_pportdata *ppd = dd->pport;
  4843. u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
  4844. if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
  4845. ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
  4846. dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
  4847. set_link_down_reason(
  4848. ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
  4849. OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
  4850. queue_work(ppd->link_wq, &ppd->link_bounce_work);
  4851. }
  4852. dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
  4853. mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
  4854. }
  4855. static int init_rcverr(struct hfi1_devdata *dd)
  4856. {
  4857. timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
  4858. /* Assume the hardware counter has been reset */
  4859. dd->rcv_ovfl_cnt = 0;
  4860. return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
  4861. }
  4862. static void free_rcverr(struct hfi1_devdata *dd)
  4863. {
  4864. if (dd->rcverr_timer.function)
  4865. del_timer_sync(&dd->rcverr_timer);
  4866. }
  4867. static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4868. {
  4869. char buf[96];
  4870. int i = 0;
  4871. dd_dev_info(dd, "Receive Error: %s\n",
  4872. rxe_err_status_string(buf, sizeof(buf), reg));
  4873. if (reg & ALL_RXE_FREEZE_ERR) {
  4874. int flags = 0;
  4875. /*
  4876. * Freeze mode recovery is disabled for the errors
  4877. * in RXE_FREEZE_ABORT_MASK
  4878. */
  4879. if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
  4880. flags = FREEZE_ABORT;
  4881. start_freeze_handling(dd->pport, flags);
  4882. }
  4883. for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
  4884. if (reg & (1ull << i))
  4885. incr_cntr64(&dd->rcv_err_status_cnt[i]);
  4886. }
  4887. }
  4888. static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4889. {
  4890. char buf[96];
  4891. int i = 0;
  4892. dd_dev_info(dd, "Misc Error: %s",
  4893. misc_err_status_string(buf, sizeof(buf), reg));
  4894. for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
  4895. if (reg & (1ull << i))
  4896. incr_cntr64(&dd->misc_err_status_cnt[i]);
  4897. }
  4898. }
  4899. static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4900. {
  4901. char buf[96];
  4902. int i = 0;
  4903. dd_dev_info(dd, "PIO Error: %s\n",
  4904. pio_err_status_string(buf, sizeof(buf), reg));
  4905. if (reg & ALL_PIO_FREEZE_ERR)
  4906. start_freeze_handling(dd->pport, 0);
  4907. for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
  4908. if (reg & (1ull << i))
  4909. incr_cntr64(&dd->send_pio_err_status_cnt[i]);
  4910. }
  4911. }
  4912. static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  4913. {
  4914. char buf[96];
  4915. int i = 0;
  4916. dd_dev_info(dd, "SDMA Error: %s\n",
  4917. sdma_err_status_string(buf, sizeof(buf), reg));
  4918. if (reg & ALL_SDMA_FREEZE_ERR)
  4919. start_freeze_handling(dd->pport, 0);
  4920. for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
  4921. if (reg & (1ull << i))
  4922. incr_cntr64(&dd->send_dma_err_status_cnt[i]);
  4923. }
  4924. }
  4925. static inline void __count_port_discards(struct hfi1_pportdata *ppd)
  4926. {
  4927. incr_cntr64(&ppd->port_xmit_discards);
  4928. }
  4929. static void count_port_inactive(struct hfi1_devdata *dd)
  4930. {
  4931. __count_port_discards(dd->pport);
  4932. }
  4933. /*
  4934. * We have had a "disallowed packet" error during egress. Determine the
  4935. * integrity check which failed, and update relevant error counter, etc.
  4936. *
  4937. * Note that the SEND_EGRESS_ERR_INFO register has only a single
  4938. * bit of state per integrity check, and so we can miss the reason for an
  4939. * egress error if more than one packet fails the same integrity check
  4940. * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
  4941. */
  4942. static void handle_send_egress_err_info(struct hfi1_devdata *dd,
  4943. int vl)
  4944. {
  4945. struct hfi1_pportdata *ppd = dd->pport;
  4946. u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
  4947. u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
  4948. char buf[96];
  4949. /* clear down all observed info as quickly as possible after read */
  4950. write_csr(dd, SEND_EGRESS_ERR_INFO, info);
  4951. dd_dev_info(dd,
  4952. "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
  4953. info, egress_err_info_string(buf, sizeof(buf), info), src);
  4954. /* Eventually add other counters for each bit */
  4955. if (info & PORT_DISCARD_EGRESS_ERRS) {
  4956. int weight, i;
  4957. /*
  4958. * Count all applicable bits as individual errors and
  4959. * attribute them to the packet that triggered this handler.
  4960. * This may not be completely accurate due to limitations
  4961. * on the available hardware error information. There is
  4962. * a single information register and any number of error
  4963. * packets may have occurred and contributed to it before
  4964. * this routine is called. This means that:
  4965. * a) If multiple packets with the same error occur before
  4966. * this routine is called, earlier packets are missed.
  4967. * There is only a single bit for each error type.
  4968. * b) Errors may not be attributed to the correct VL.
  4969. * The driver is attributing all bits in the info register
  4970. * to the packet that triggered this call, but bits
  4971. * could be an accumulation of different packets with
  4972. * different VLs.
  4973. * c) A single error packet may have multiple counts attached
  4974. * to it. There is no way for the driver to know if
  4975. * multiple bits set in the info register are due to a
  4976. * single packet or multiple packets. The driver assumes
  4977. * multiple packets.
  4978. */
  4979. weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
  4980. for (i = 0; i < weight; i++) {
  4981. __count_port_discards(ppd);
  4982. if (vl >= 0 && vl < TXE_NUM_DATA_VL)
  4983. incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
  4984. else if (vl == 15)
  4985. incr_cntr64(&ppd->port_xmit_discards_vl
  4986. [C_VL_15]);
  4987. }
  4988. }
  4989. }
  4990. /*
  4991. * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
  4992. * register. Does it represent a 'port inactive' error?
  4993. */
  4994. static inline int port_inactive_err(u64 posn)
  4995. {
  4996. return (posn >= SEES(TX_LINKDOWN) &&
  4997. posn <= SEES(TX_INCORRECT_LINK_STATE));
  4998. }
  4999. /*
  5000. * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
  5001. * register. Does it represent a 'disallowed packet' error?
  5002. */
  5003. static inline int disallowed_pkt_err(int posn)
  5004. {
  5005. return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
  5006. posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
  5007. }
  5008. /*
  5009. * Input value is a bit position of one of the SDMA engine disallowed
  5010. * packet errors. Return which engine. Use of this must be guarded by
  5011. * disallowed_pkt_err().
  5012. */
  5013. static inline int disallowed_pkt_engine(int posn)
  5014. {
  5015. return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
  5016. }
  5017. /*
  5018. * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
  5019. * be done.
  5020. */
  5021. static int engine_to_vl(struct hfi1_devdata *dd, int engine)
  5022. {
  5023. struct sdma_vl_map *m;
  5024. int vl;
  5025. /* range check */
  5026. if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
  5027. return -1;
  5028. rcu_read_lock();
  5029. m = rcu_dereference(dd->sdma_map);
  5030. vl = m->engine_to_vl[engine];
  5031. rcu_read_unlock();
  5032. return vl;
  5033. }
  5034. /*
  5035. * Translate the send context (sofware index) into a VL. Return -1 if the
  5036. * translation cannot be done.
  5037. */
  5038. static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
  5039. {
  5040. struct send_context_info *sci;
  5041. struct send_context *sc;
  5042. int i;
  5043. sci = &dd->send_contexts[sw_index];
  5044. /* there is no information for user (PSM) and ack contexts */
  5045. if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
  5046. return -1;
  5047. sc = sci->sc;
  5048. if (!sc)
  5049. return -1;
  5050. if (dd->vld[15].sc == sc)
  5051. return 15;
  5052. for (i = 0; i < num_vls; i++)
  5053. if (dd->vld[i].sc == sc)
  5054. return i;
  5055. return -1;
  5056. }
  5057. static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  5058. {
  5059. u64 reg_copy = reg, handled = 0;
  5060. char buf[96];
  5061. int i = 0;
  5062. if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
  5063. start_freeze_handling(dd->pport, 0);
  5064. else if (is_ax(dd) &&
  5065. (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
  5066. (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
  5067. start_freeze_handling(dd->pport, 0);
  5068. while (reg_copy) {
  5069. int posn = fls64(reg_copy);
  5070. /* fls64() returns a 1-based offset, we want it zero based */
  5071. int shift = posn - 1;
  5072. u64 mask = 1ULL << shift;
  5073. if (port_inactive_err(shift)) {
  5074. count_port_inactive(dd);
  5075. handled |= mask;
  5076. } else if (disallowed_pkt_err(shift)) {
  5077. int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
  5078. handle_send_egress_err_info(dd, vl);
  5079. handled |= mask;
  5080. }
  5081. reg_copy &= ~mask;
  5082. }
  5083. reg &= ~handled;
  5084. if (reg)
  5085. dd_dev_info(dd, "Egress Error: %s\n",
  5086. egress_err_status_string(buf, sizeof(buf), reg));
  5087. for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
  5088. if (reg & (1ull << i))
  5089. incr_cntr64(&dd->send_egress_err_status_cnt[i]);
  5090. }
  5091. }
  5092. static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  5093. {
  5094. char buf[96];
  5095. int i = 0;
  5096. dd_dev_info(dd, "Send Error: %s\n",
  5097. send_err_status_string(buf, sizeof(buf), reg));
  5098. for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
  5099. if (reg & (1ull << i))
  5100. incr_cntr64(&dd->send_err_status_cnt[i]);
  5101. }
  5102. }
  5103. /*
  5104. * The maximum number of times the error clear down will loop before
  5105. * blocking a repeating error. This value is arbitrary.
  5106. */
  5107. #define MAX_CLEAR_COUNT 20
  5108. /*
  5109. * Clear and handle an error register. All error interrupts are funneled
  5110. * through here to have a central location to correctly handle single-
  5111. * or multi-shot errors.
  5112. *
  5113. * For non per-context registers, call this routine with a context value
  5114. * of 0 so the per-context offset is zero.
  5115. *
  5116. * If the handler loops too many times, assume that something is wrong
  5117. * and can't be fixed, so mask the error bits.
  5118. */
  5119. static void interrupt_clear_down(struct hfi1_devdata *dd,
  5120. u32 context,
  5121. const struct err_reg_info *eri)
  5122. {
  5123. u64 reg;
  5124. u32 count;
  5125. /* read in a loop until no more errors are seen */
  5126. count = 0;
  5127. while (1) {
  5128. reg = read_kctxt_csr(dd, context, eri->status);
  5129. if (reg == 0)
  5130. break;
  5131. write_kctxt_csr(dd, context, eri->clear, reg);
  5132. if (likely(eri->handler))
  5133. eri->handler(dd, context, reg);
  5134. count++;
  5135. if (count > MAX_CLEAR_COUNT) {
  5136. u64 mask;
  5137. dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
  5138. eri->desc, reg);
  5139. /*
  5140. * Read-modify-write so any other masked bits
  5141. * remain masked.
  5142. */
  5143. mask = read_kctxt_csr(dd, context, eri->mask);
  5144. mask &= ~reg;
  5145. write_kctxt_csr(dd, context, eri->mask, mask);
  5146. break;
  5147. }
  5148. }
  5149. }
  5150. /*
  5151. * CCE block "misc" interrupt. Source is < 16.
  5152. */
  5153. static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
  5154. {
  5155. const struct err_reg_info *eri = &misc_errs[source];
  5156. if (eri->handler) {
  5157. interrupt_clear_down(dd, 0, eri);
  5158. } else {
  5159. dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
  5160. source);
  5161. }
  5162. }
  5163. static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
  5164. {
  5165. return flag_string(buf, buf_len, flags,
  5166. sc_err_status_flags,
  5167. ARRAY_SIZE(sc_err_status_flags));
  5168. }
  5169. /*
  5170. * Send context error interrupt. Source (hw_context) is < 160.
  5171. *
  5172. * All send context errors cause the send context to halt. The normal
  5173. * clear-down mechanism cannot be used because we cannot clear the
  5174. * error bits until several other long-running items are done first.
  5175. * This is OK because with the context halted, nothing else is going
  5176. * to happen on it anyway.
  5177. */
  5178. static void is_sendctxt_err_int(struct hfi1_devdata *dd,
  5179. unsigned int hw_context)
  5180. {
  5181. struct send_context_info *sci;
  5182. struct send_context *sc;
  5183. char flags[96];
  5184. u64 status;
  5185. u32 sw_index;
  5186. int i = 0;
  5187. unsigned long irq_flags;
  5188. sw_index = dd->hw_to_sw[hw_context];
  5189. if (sw_index >= dd->num_send_contexts) {
  5190. dd_dev_err(dd,
  5191. "out of range sw index %u for send context %u\n",
  5192. sw_index, hw_context);
  5193. return;
  5194. }
  5195. sci = &dd->send_contexts[sw_index];
  5196. spin_lock_irqsave(&dd->sc_lock, irq_flags);
  5197. sc = sci->sc;
  5198. if (!sc) {
  5199. dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
  5200. sw_index, hw_context);
  5201. spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
  5202. return;
  5203. }
  5204. /* tell the software that a halt has begun */
  5205. sc_stop(sc, SCF_HALTED);
  5206. status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
  5207. dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
  5208. send_context_err_status_string(flags, sizeof(flags),
  5209. status));
  5210. if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
  5211. handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
  5212. /*
  5213. * Automatically restart halted kernel contexts out of interrupt
  5214. * context. User contexts must ask the driver to restart the context.
  5215. */
  5216. if (sc->type != SC_USER)
  5217. queue_work(dd->pport->hfi1_wq, &sc->halt_work);
  5218. spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
  5219. /*
  5220. * Update the counters for the corresponding status bits.
  5221. * Note that these particular counters are aggregated over all
  5222. * 160 contexts.
  5223. */
  5224. for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
  5225. if (status & (1ull << i))
  5226. incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
  5227. }
  5228. }
  5229. static void handle_sdma_eng_err(struct hfi1_devdata *dd,
  5230. unsigned int source, u64 status)
  5231. {
  5232. struct sdma_engine *sde;
  5233. int i = 0;
  5234. sde = &dd->per_sdma[source];
  5235. #ifdef CONFIG_SDMA_VERBOSITY
  5236. dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  5237. slashstrip(__FILE__), __LINE__, __func__);
  5238. dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
  5239. sde->this_idx, source, (unsigned long long)status);
  5240. #endif
  5241. sde->err_cnt++;
  5242. sdma_engine_error(sde, status);
  5243. /*
  5244. * Update the counters for the corresponding status bits.
  5245. * Note that these particular counters are aggregated over
  5246. * all 16 DMA engines.
  5247. */
  5248. for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
  5249. if (status & (1ull << i))
  5250. incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
  5251. }
  5252. }
  5253. /*
  5254. * CCE block SDMA error interrupt. Source is < 16.
  5255. */
  5256. static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
  5257. {
  5258. #ifdef CONFIG_SDMA_VERBOSITY
  5259. struct sdma_engine *sde = &dd->per_sdma[source];
  5260. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  5261. slashstrip(__FILE__), __LINE__, __func__);
  5262. dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
  5263. source);
  5264. sdma_dumpstate(sde);
  5265. #endif
  5266. interrupt_clear_down(dd, source, &sdma_eng_err);
  5267. }
  5268. /*
  5269. * CCE block "various" interrupt. Source is < 8.
  5270. */
  5271. static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
  5272. {
  5273. const struct err_reg_info *eri = &various_err[source];
  5274. /*
  5275. * TCritInt cannot go through interrupt_clear_down()
  5276. * because it is not a second tier interrupt. The handler
  5277. * should be called directly.
  5278. */
  5279. if (source == TCRIT_INT_SOURCE)
  5280. handle_temp_err(dd);
  5281. else if (eri->handler)
  5282. interrupt_clear_down(dd, 0, eri);
  5283. else
  5284. dd_dev_info(dd,
  5285. "%s: Unimplemented/reserved interrupt %d\n",
  5286. __func__, source);
  5287. }
  5288. static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
  5289. {
  5290. /* src_ctx is always zero */
  5291. struct hfi1_pportdata *ppd = dd->pport;
  5292. unsigned long flags;
  5293. u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
  5294. if (reg & QSFP_HFI0_MODPRST_N) {
  5295. if (!qsfp_mod_present(ppd)) {
  5296. dd_dev_info(dd, "%s: QSFP module removed\n",
  5297. __func__);
  5298. ppd->driver_link_ready = 0;
  5299. /*
  5300. * Cable removed, reset all our information about the
  5301. * cache and cable capabilities
  5302. */
  5303. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  5304. /*
  5305. * We don't set cache_refresh_required here as we expect
  5306. * an interrupt when a cable is inserted
  5307. */
  5308. ppd->qsfp_info.cache_valid = 0;
  5309. ppd->qsfp_info.reset_needed = 0;
  5310. ppd->qsfp_info.limiting_active = 0;
  5311. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
  5312. flags);
  5313. /* Invert the ModPresent pin now to detect plug-in */
  5314. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
  5315. ASIC_QSFP1_INVERT, qsfp_int_mgmt);
  5316. if ((ppd->offline_disabled_reason >
  5317. HFI1_ODR_MASK(
  5318. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
  5319. (ppd->offline_disabled_reason ==
  5320. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
  5321. ppd->offline_disabled_reason =
  5322. HFI1_ODR_MASK(
  5323. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
  5324. if (ppd->host_link_state == HLS_DN_POLL) {
  5325. /*
  5326. * The link is still in POLL. This means
  5327. * that the normal link down processing
  5328. * will not happen. We have to do it here
  5329. * before turning the DC off.
  5330. */
  5331. queue_work(ppd->link_wq, &ppd->link_down_work);
  5332. }
  5333. } else {
  5334. dd_dev_info(dd, "%s: QSFP module inserted\n",
  5335. __func__);
  5336. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  5337. ppd->qsfp_info.cache_valid = 0;
  5338. ppd->qsfp_info.cache_refresh_required = 1;
  5339. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
  5340. flags);
  5341. /*
  5342. * Stop inversion of ModPresent pin to detect
  5343. * removal of the cable
  5344. */
  5345. qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
  5346. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
  5347. ASIC_QSFP1_INVERT, qsfp_int_mgmt);
  5348. ppd->offline_disabled_reason =
  5349. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
  5350. }
  5351. }
  5352. if (reg & QSFP_HFI0_INT_N) {
  5353. dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
  5354. __func__);
  5355. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  5356. ppd->qsfp_info.check_interrupt_flags = 1;
  5357. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
  5358. }
  5359. /* Schedule the QSFP work only if there is a cable attached. */
  5360. if (qsfp_mod_present(ppd))
  5361. queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
  5362. }
  5363. static int request_host_lcb_access(struct hfi1_devdata *dd)
  5364. {
  5365. int ret;
  5366. ret = do_8051_command(dd, HCMD_MISC,
  5367. (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
  5368. LOAD_DATA_FIELD_ID_SHIFT, NULL);
  5369. if (ret != HCMD_SUCCESS) {
  5370. dd_dev_err(dd, "%s: command failed with error %d\n",
  5371. __func__, ret);
  5372. }
  5373. return ret == HCMD_SUCCESS ? 0 : -EBUSY;
  5374. }
  5375. static int request_8051_lcb_access(struct hfi1_devdata *dd)
  5376. {
  5377. int ret;
  5378. ret = do_8051_command(dd, HCMD_MISC,
  5379. (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
  5380. LOAD_DATA_FIELD_ID_SHIFT, NULL);
  5381. if (ret != HCMD_SUCCESS) {
  5382. dd_dev_err(dd, "%s: command failed with error %d\n",
  5383. __func__, ret);
  5384. }
  5385. return ret == HCMD_SUCCESS ? 0 : -EBUSY;
  5386. }
  5387. /*
  5388. * Set the LCB selector - allow host access. The DCC selector always
  5389. * points to the host.
  5390. */
  5391. static inline void set_host_lcb_access(struct hfi1_devdata *dd)
  5392. {
  5393. write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
  5394. DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
  5395. DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
  5396. }
  5397. /*
  5398. * Clear the LCB selector - allow 8051 access. The DCC selector always
  5399. * points to the host.
  5400. */
  5401. static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
  5402. {
  5403. write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
  5404. DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
  5405. }
  5406. /*
  5407. * Acquire LCB access from the 8051. If the host already has access,
  5408. * just increment a counter. Otherwise, inform the 8051 that the
  5409. * host is taking access.
  5410. *
  5411. * Returns:
  5412. * 0 on success
  5413. * -EBUSY if the 8051 has control and cannot be disturbed
  5414. * -errno if unable to acquire access from the 8051
  5415. */
  5416. int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
  5417. {
  5418. struct hfi1_pportdata *ppd = dd->pport;
  5419. int ret = 0;
  5420. /*
  5421. * Use the host link state lock so the operation of this routine
  5422. * { link state check, selector change, count increment } can occur
  5423. * as a unit against a link state change. Otherwise there is a
  5424. * race between the state change and the count increment.
  5425. */
  5426. if (sleep_ok) {
  5427. mutex_lock(&ppd->hls_lock);
  5428. } else {
  5429. while (!mutex_trylock(&ppd->hls_lock))
  5430. udelay(1);
  5431. }
  5432. /* this access is valid only when the link is up */
  5433. if (ppd->host_link_state & HLS_DOWN) {
  5434. dd_dev_info(dd, "%s: link state %s not up\n",
  5435. __func__, link_state_name(ppd->host_link_state));
  5436. ret = -EBUSY;
  5437. goto done;
  5438. }
  5439. if (dd->lcb_access_count == 0) {
  5440. ret = request_host_lcb_access(dd);
  5441. if (ret) {
  5442. dd_dev_err(dd,
  5443. "%s: unable to acquire LCB access, err %d\n",
  5444. __func__, ret);
  5445. goto done;
  5446. }
  5447. set_host_lcb_access(dd);
  5448. }
  5449. dd->lcb_access_count++;
  5450. done:
  5451. mutex_unlock(&ppd->hls_lock);
  5452. return ret;
  5453. }
  5454. /*
  5455. * Release LCB access by decrementing the use count. If the count is moving
  5456. * from 1 to 0, inform 8051 that it has control back.
  5457. *
  5458. * Returns:
  5459. * 0 on success
  5460. * -errno if unable to release access to the 8051
  5461. */
  5462. int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
  5463. {
  5464. int ret = 0;
  5465. /*
  5466. * Use the host link state lock because the acquire needed it.
  5467. * Here, we only need to keep { selector change, count decrement }
  5468. * as a unit.
  5469. */
  5470. if (sleep_ok) {
  5471. mutex_lock(&dd->pport->hls_lock);
  5472. } else {
  5473. while (!mutex_trylock(&dd->pport->hls_lock))
  5474. udelay(1);
  5475. }
  5476. if (dd->lcb_access_count == 0) {
  5477. dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
  5478. __func__);
  5479. goto done;
  5480. }
  5481. if (dd->lcb_access_count == 1) {
  5482. set_8051_lcb_access(dd);
  5483. ret = request_8051_lcb_access(dd);
  5484. if (ret) {
  5485. dd_dev_err(dd,
  5486. "%s: unable to release LCB access, err %d\n",
  5487. __func__, ret);
  5488. /* restore host access if the grant didn't work */
  5489. set_host_lcb_access(dd);
  5490. goto done;
  5491. }
  5492. }
  5493. dd->lcb_access_count--;
  5494. done:
  5495. mutex_unlock(&dd->pport->hls_lock);
  5496. return ret;
  5497. }
  5498. /*
  5499. * Initialize LCB access variables and state. Called during driver load,
  5500. * after most of the initialization is finished.
  5501. *
  5502. * The DC default is LCB access on for the host. The driver defaults to
  5503. * leaving access to the 8051. Assign access now - this constrains the call
  5504. * to this routine to be after all LCB set-up is done. In particular, after
  5505. * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
  5506. */
  5507. static void init_lcb_access(struct hfi1_devdata *dd)
  5508. {
  5509. dd->lcb_access_count = 0;
  5510. }
  5511. /*
  5512. * Write a response back to a 8051 request.
  5513. */
  5514. static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
  5515. {
  5516. write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
  5517. DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
  5518. (u64)return_code <<
  5519. DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
  5520. (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
  5521. }
  5522. /*
  5523. * Handle host requests from the 8051.
  5524. */
  5525. static void handle_8051_request(struct hfi1_pportdata *ppd)
  5526. {
  5527. struct hfi1_devdata *dd = ppd->dd;
  5528. u64 reg;
  5529. u16 data = 0;
  5530. u8 type;
  5531. reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
  5532. if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
  5533. return; /* no request */
  5534. /* zero out COMPLETED so the response is seen */
  5535. write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
  5536. /* extract request details */
  5537. type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
  5538. & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
  5539. data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
  5540. & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
  5541. switch (type) {
  5542. case HREQ_LOAD_CONFIG:
  5543. case HREQ_SAVE_CONFIG:
  5544. case HREQ_READ_CONFIG:
  5545. case HREQ_SET_TX_EQ_ABS:
  5546. case HREQ_SET_TX_EQ_REL:
  5547. case HREQ_ENABLE:
  5548. dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
  5549. type);
  5550. hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
  5551. break;
  5552. case HREQ_LCB_RESET:
  5553. /* Put the LCB, RX FPE and TX FPE into reset */
  5554. write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
  5555. /* Make sure the write completed */
  5556. (void)read_csr(dd, DCC_CFG_RESET);
  5557. /* Hold the reset long enough to take effect */
  5558. udelay(1);
  5559. /* Take the LCB, RX FPE and TX FPE out of reset */
  5560. write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
  5561. hreq_response(dd, HREQ_SUCCESS, 0);
  5562. break;
  5563. case HREQ_CONFIG_DONE:
  5564. hreq_response(dd, HREQ_SUCCESS, 0);
  5565. break;
  5566. case HREQ_INTERFACE_TEST:
  5567. hreq_response(dd, HREQ_SUCCESS, data);
  5568. break;
  5569. default:
  5570. dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
  5571. hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
  5572. break;
  5573. }
  5574. }
  5575. /*
  5576. * Set up allocation unit vaulue.
  5577. */
  5578. void set_up_vau(struct hfi1_devdata *dd, u8 vau)
  5579. {
  5580. u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  5581. /* do not modify other values in the register */
  5582. reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
  5583. reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
  5584. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  5585. }
  5586. /*
  5587. * Set up initial VL15 credits of the remote. Assumes the rest of
  5588. * the CM credit registers are zero from a previous global or credit reset.
  5589. * Shared limit for VL15 will always be 0.
  5590. */
  5591. void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
  5592. {
  5593. u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  5594. /* set initial values for total and shared credit limit */
  5595. reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
  5596. SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
  5597. /*
  5598. * Set total limit to be equal to VL15 credits.
  5599. * Leave shared limit at 0.
  5600. */
  5601. reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
  5602. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  5603. write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
  5604. << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
  5605. }
  5606. /*
  5607. * Zero all credit details from the previous connection and
  5608. * reset the CM manager's internal counters.
  5609. */
  5610. void reset_link_credits(struct hfi1_devdata *dd)
  5611. {
  5612. int i;
  5613. /* remove all previous VL credit limits */
  5614. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  5615. write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
  5616. write_csr(dd, SEND_CM_CREDIT_VL15, 0);
  5617. write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
  5618. /* reset the CM block */
  5619. pio_send_control(dd, PSC_CM_RESET);
  5620. /* reset cached value */
  5621. dd->vl15buf_cached = 0;
  5622. }
  5623. /* convert a vCU to a CU */
  5624. static u32 vcu_to_cu(u8 vcu)
  5625. {
  5626. return 1 << vcu;
  5627. }
  5628. /* convert a CU to a vCU */
  5629. static u8 cu_to_vcu(u32 cu)
  5630. {
  5631. return ilog2(cu);
  5632. }
  5633. /* convert a vAU to an AU */
  5634. static u32 vau_to_au(u8 vau)
  5635. {
  5636. return 8 * (1 << vau);
  5637. }
  5638. static void set_linkup_defaults(struct hfi1_pportdata *ppd)
  5639. {
  5640. ppd->sm_trap_qp = 0x0;
  5641. ppd->sa_qp = 0x1;
  5642. }
  5643. /*
  5644. * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
  5645. */
  5646. static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
  5647. {
  5648. u64 reg;
  5649. /* clear lcb run: LCB_CFG_RUN.EN = 0 */
  5650. write_csr(dd, DC_LCB_CFG_RUN, 0);
  5651. /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
  5652. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
  5653. 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
  5654. /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
  5655. dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
  5656. reg = read_csr(dd, DCC_CFG_RESET);
  5657. write_csr(dd, DCC_CFG_RESET, reg |
  5658. DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
  5659. (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
  5660. if (!abort) {
  5661. udelay(1); /* must hold for the longer of 16cclks or 20ns */
  5662. write_csr(dd, DCC_CFG_RESET, reg);
  5663. write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
  5664. }
  5665. }
  5666. /*
  5667. * This routine should be called after the link has been transitioned to
  5668. * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
  5669. * reset).
  5670. *
  5671. * The expectation is that the caller of this routine would have taken
  5672. * care of properly transitioning the link into the correct state.
  5673. * NOTE: the caller needs to acquire the dd->dc8051_lock lock
  5674. * before calling this function.
  5675. */
  5676. static void _dc_shutdown(struct hfi1_devdata *dd)
  5677. {
  5678. lockdep_assert_held(&dd->dc8051_lock);
  5679. if (dd->dc_shutdown)
  5680. return;
  5681. dd->dc_shutdown = 1;
  5682. /* Shutdown the LCB */
  5683. lcb_shutdown(dd, 1);
  5684. /*
  5685. * Going to OFFLINE would have causes the 8051 to put the
  5686. * SerDes into reset already. Just need to shut down the 8051,
  5687. * itself.
  5688. */
  5689. write_csr(dd, DC_DC8051_CFG_RST, 0x1);
  5690. }
  5691. static void dc_shutdown(struct hfi1_devdata *dd)
  5692. {
  5693. mutex_lock(&dd->dc8051_lock);
  5694. _dc_shutdown(dd);
  5695. mutex_unlock(&dd->dc8051_lock);
  5696. }
  5697. /*
  5698. * Calling this after the DC has been brought out of reset should not
  5699. * do any damage.
  5700. * NOTE: the caller needs to acquire the dd->dc8051_lock lock
  5701. * before calling this function.
  5702. */
  5703. static void _dc_start(struct hfi1_devdata *dd)
  5704. {
  5705. lockdep_assert_held(&dd->dc8051_lock);
  5706. if (!dd->dc_shutdown)
  5707. return;
  5708. /* Take the 8051 out of reset */
  5709. write_csr(dd, DC_DC8051_CFG_RST, 0ull);
  5710. /* Wait until 8051 is ready */
  5711. if (wait_fm_ready(dd, TIMEOUT_8051_START))
  5712. dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
  5713. __func__);
  5714. /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
  5715. write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
  5716. /* lcb_shutdown() with abort=1 does not restore these */
  5717. write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
  5718. dd->dc_shutdown = 0;
  5719. }
  5720. static void dc_start(struct hfi1_devdata *dd)
  5721. {
  5722. mutex_lock(&dd->dc8051_lock);
  5723. _dc_start(dd);
  5724. mutex_unlock(&dd->dc8051_lock);
  5725. }
  5726. /*
  5727. * These LCB adjustments are for the Aurora SerDes core in the FPGA.
  5728. */
  5729. static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
  5730. {
  5731. u64 rx_radr, tx_radr;
  5732. u32 version;
  5733. if (dd->icode != ICODE_FPGA_EMULATION)
  5734. return;
  5735. /*
  5736. * These LCB defaults on emulator _s are good, nothing to do here:
  5737. * LCB_CFG_TX_FIFOS_RADR
  5738. * LCB_CFG_RX_FIFOS_RADR
  5739. * LCB_CFG_LN_DCLK
  5740. * LCB_CFG_IGNORE_LOST_RCLK
  5741. */
  5742. if (is_emulator_s(dd))
  5743. return;
  5744. /* else this is _p */
  5745. version = emulator_rev(dd);
  5746. if (!is_ax(dd))
  5747. version = 0x2d; /* all B0 use 0x2d or higher settings */
  5748. if (version <= 0x12) {
  5749. /* release 0x12 and below */
  5750. /*
  5751. * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
  5752. * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
  5753. * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
  5754. */
  5755. rx_radr =
  5756. 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5757. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5758. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5759. /*
  5760. * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
  5761. * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
  5762. */
  5763. tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5764. } else if (version <= 0x18) {
  5765. /* release 0x13 up to 0x18 */
  5766. /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
  5767. rx_radr =
  5768. 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5769. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5770. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5771. tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5772. } else if (version == 0x19) {
  5773. /* release 0x19 */
  5774. /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
  5775. rx_radr =
  5776. 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5777. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5778. | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5779. tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5780. } else if (version == 0x1a) {
  5781. /* release 0x1a */
  5782. /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
  5783. rx_radr =
  5784. 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5785. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5786. | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5787. tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5788. write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
  5789. } else {
  5790. /* release 0x1b and higher */
  5791. /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
  5792. rx_radr =
  5793. 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
  5794. | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
  5795. | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
  5796. tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
  5797. }
  5798. write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
  5799. /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
  5800. write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
  5801. DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
  5802. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
  5803. }
  5804. /*
  5805. * Handle a SMA idle message
  5806. *
  5807. * This is a work-queue function outside of the interrupt.
  5808. */
  5809. void handle_sma_message(struct work_struct *work)
  5810. {
  5811. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  5812. sma_message_work);
  5813. struct hfi1_devdata *dd = ppd->dd;
  5814. u64 msg;
  5815. int ret;
  5816. /*
  5817. * msg is bytes 1-4 of the 40-bit idle message - the command code
  5818. * is stripped off
  5819. */
  5820. ret = read_idle_sma(dd, &msg);
  5821. if (ret)
  5822. return;
  5823. dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
  5824. /*
  5825. * React to the SMA message. Byte[1] (0 for us) is the command.
  5826. */
  5827. switch (msg & 0xff) {
  5828. case SMA_IDLE_ARM:
  5829. /*
  5830. * See OPAv1 table 9-14 - HFI and External Switch Ports Key
  5831. * State Transitions
  5832. *
  5833. * Only expected in INIT or ARMED, discard otherwise.
  5834. */
  5835. if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
  5836. ppd->neighbor_normal = 1;
  5837. break;
  5838. case SMA_IDLE_ACTIVE:
  5839. /*
  5840. * See OPAv1 table 9-14 - HFI and External Switch Ports Key
  5841. * State Transitions
  5842. *
  5843. * Can activate the node. Discard otherwise.
  5844. */
  5845. if (ppd->host_link_state == HLS_UP_ARMED &&
  5846. ppd->is_active_optimize_enabled) {
  5847. ppd->neighbor_normal = 1;
  5848. ret = set_link_state(ppd, HLS_UP_ACTIVE);
  5849. if (ret)
  5850. dd_dev_err(
  5851. dd,
  5852. "%s: received Active SMA idle message, couldn't set link to Active\n",
  5853. __func__);
  5854. }
  5855. break;
  5856. default:
  5857. dd_dev_err(dd,
  5858. "%s: received unexpected SMA idle message 0x%llx\n",
  5859. __func__, msg);
  5860. break;
  5861. }
  5862. }
  5863. static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
  5864. {
  5865. u64 rcvctrl;
  5866. unsigned long flags;
  5867. spin_lock_irqsave(&dd->rcvctrl_lock, flags);
  5868. rcvctrl = read_csr(dd, RCV_CTRL);
  5869. rcvctrl |= add;
  5870. rcvctrl &= ~clear;
  5871. write_csr(dd, RCV_CTRL, rcvctrl);
  5872. spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
  5873. }
  5874. static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
  5875. {
  5876. adjust_rcvctrl(dd, add, 0);
  5877. }
  5878. static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
  5879. {
  5880. adjust_rcvctrl(dd, 0, clear);
  5881. }
  5882. /*
  5883. * Called from all interrupt handlers to start handling an SPC freeze.
  5884. */
  5885. void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
  5886. {
  5887. struct hfi1_devdata *dd = ppd->dd;
  5888. struct send_context *sc;
  5889. int i;
  5890. int sc_flags;
  5891. if (flags & FREEZE_SELF)
  5892. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
  5893. /* enter frozen mode */
  5894. dd->flags |= HFI1_FROZEN;
  5895. /* notify all SDMA engines that they are going into a freeze */
  5896. sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
  5897. sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
  5898. SCF_LINK_DOWN : 0);
  5899. /* do halt pre-handling on all enabled send contexts */
  5900. for (i = 0; i < dd->num_send_contexts; i++) {
  5901. sc = dd->send_contexts[i].sc;
  5902. if (sc && (sc->flags & SCF_ENABLED))
  5903. sc_stop(sc, sc_flags);
  5904. }
  5905. /* Send context are frozen. Notify user space */
  5906. hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
  5907. if (flags & FREEZE_ABORT) {
  5908. dd_dev_err(dd,
  5909. "Aborted freeze recovery. Please REBOOT system\n");
  5910. return;
  5911. }
  5912. /* queue non-interrupt handler */
  5913. queue_work(ppd->hfi1_wq, &ppd->freeze_work);
  5914. }
  5915. /*
  5916. * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
  5917. * depending on the "freeze" parameter.
  5918. *
  5919. * No need to return an error if it times out, our only option
  5920. * is to proceed anyway.
  5921. */
  5922. static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
  5923. {
  5924. unsigned long timeout;
  5925. u64 reg;
  5926. timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
  5927. while (1) {
  5928. reg = read_csr(dd, CCE_STATUS);
  5929. if (freeze) {
  5930. /* waiting until all indicators are set */
  5931. if ((reg & ALL_FROZE) == ALL_FROZE)
  5932. return; /* all done */
  5933. } else {
  5934. /* waiting until all indicators are clear */
  5935. if ((reg & ALL_FROZE) == 0)
  5936. return; /* all done */
  5937. }
  5938. if (time_after(jiffies, timeout)) {
  5939. dd_dev_err(dd,
  5940. "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
  5941. freeze ? "" : "un", reg & ALL_FROZE,
  5942. freeze ? ALL_FROZE : 0ull);
  5943. return;
  5944. }
  5945. usleep_range(80, 120);
  5946. }
  5947. }
  5948. /*
  5949. * Do all freeze handling for the RXE block.
  5950. */
  5951. static void rxe_freeze(struct hfi1_devdata *dd)
  5952. {
  5953. int i;
  5954. struct hfi1_ctxtdata *rcd;
  5955. /* disable port */
  5956. clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  5957. /* disable all receive contexts */
  5958. for (i = 0; i < dd->num_rcv_contexts; i++) {
  5959. rcd = hfi1_rcd_get_by_index(dd, i);
  5960. hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
  5961. hfi1_rcd_put(rcd);
  5962. }
  5963. }
  5964. /*
  5965. * Unfreeze handling for the RXE block - kernel contexts only.
  5966. * This will also enable the port. User contexts will do unfreeze
  5967. * handling on a per-context basis as they call into the driver.
  5968. *
  5969. */
  5970. static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
  5971. {
  5972. u32 rcvmask;
  5973. u16 i;
  5974. struct hfi1_ctxtdata *rcd;
  5975. /* enable all kernel contexts */
  5976. for (i = 0; i < dd->num_rcv_contexts; i++) {
  5977. rcd = hfi1_rcd_get_by_index(dd, i);
  5978. /* Ensure all non-user contexts(including vnic) are enabled */
  5979. if (!rcd ||
  5980. (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
  5981. hfi1_rcd_put(rcd);
  5982. continue;
  5983. }
  5984. rcvmask = HFI1_RCVCTRL_CTXT_ENB;
  5985. /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
  5986. rcvmask |= rcd->rcvhdrtail_kvaddr ?
  5987. HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
  5988. hfi1_rcvctrl(dd, rcvmask, rcd);
  5989. hfi1_rcd_put(rcd);
  5990. }
  5991. /* enable port */
  5992. add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  5993. }
  5994. /*
  5995. * Non-interrupt SPC freeze handling.
  5996. *
  5997. * This is a work-queue function outside of the triggering interrupt.
  5998. */
  5999. void handle_freeze(struct work_struct *work)
  6000. {
  6001. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6002. freeze_work);
  6003. struct hfi1_devdata *dd = ppd->dd;
  6004. /* wait for freeze indicators on all affected blocks */
  6005. wait_for_freeze_status(dd, 1);
  6006. /* SPC is now frozen */
  6007. /* do send PIO freeze steps */
  6008. pio_freeze(dd);
  6009. /* do send DMA freeze steps */
  6010. sdma_freeze(dd);
  6011. /* do send egress freeze steps - nothing to do */
  6012. /* do receive freeze steps */
  6013. rxe_freeze(dd);
  6014. /*
  6015. * Unfreeze the hardware - clear the freeze, wait for each
  6016. * block's frozen bit to clear, then clear the frozen flag.
  6017. */
  6018. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
  6019. wait_for_freeze_status(dd, 0);
  6020. if (is_ax(dd)) {
  6021. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
  6022. wait_for_freeze_status(dd, 1);
  6023. write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
  6024. wait_for_freeze_status(dd, 0);
  6025. }
  6026. /* do send PIO unfreeze steps for kernel contexts */
  6027. pio_kernel_unfreeze(dd);
  6028. /* do send DMA unfreeze steps */
  6029. sdma_unfreeze(dd);
  6030. /* do send egress unfreeze steps - nothing to do */
  6031. /* do receive unfreeze steps for kernel contexts */
  6032. rxe_kernel_unfreeze(dd);
  6033. /*
  6034. * The unfreeze procedure touches global device registers when
  6035. * it disables and re-enables RXE. Mark the device unfrozen
  6036. * after all that is done so other parts of the driver waiting
  6037. * for the device to unfreeze don't do things out of order.
  6038. *
  6039. * The above implies that the meaning of HFI1_FROZEN flag is
  6040. * "Device has gone into freeze mode and freeze mode handling
  6041. * is still in progress."
  6042. *
  6043. * The flag will be removed when freeze mode processing has
  6044. * completed.
  6045. */
  6046. dd->flags &= ~HFI1_FROZEN;
  6047. wake_up(&dd->event_queue);
  6048. /* no longer frozen */
  6049. }
  6050. /**
  6051. * update_xmit_counters - update PortXmitWait/PortVlXmitWait
  6052. * counters.
  6053. * @ppd: info of physical Hfi port
  6054. * @link_width: new link width after link up or downgrade
  6055. *
  6056. * Update the PortXmitWait and PortVlXmitWait counters after
  6057. * a link up or downgrade event to reflect a link width change.
  6058. */
  6059. static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
  6060. {
  6061. int i;
  6062. u16 tx_width;
  6063. u16 link_speed;
  6064. tx_width = tx_link_width(link_width);
  6065. link_speed = get_link_speed(ppd->link_speed_active);
  6066. /*
  6067. * There are C_VL_COUNT number of PortVLXmitWait counters.
  6068. * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
  6069. */
  6070. for (i = 0; i < C_VL_COUNT + 1; i++)
  6071. get_xmit_wait_counters(ppd, tx_width, link_speed, i);
  6072. }
  6073. /*
  6074. * Handle a link up interrupt from the 8051.
  6075. *
  6076. * This is a work-queue function outside of the interrupt.
  6077. */
  6078. void handle_link_up(struct work_struct *work)
  6079. {
  6080. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6081. link_up_work);
  6082. struct hfi1_devdata *dd = ppd->dd;
  6083. set_link_state(ppd, HLS_UP_INIT);
  6084. /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
  6085. read_ltp_rtt(dd);
  6086. /*
  6087. * OPA specifies that certain counters are cleared on a transition
  6088. * to link up, so do that.
  6089. */
  6090. clear_linkup_counters(dd);
  6091. /*
  6092. * And (re)set link up default values.
  6093. */
  6094. set_linkup_defaults(ppd);
  6095. /*
  6096. * Set VL15 credits. Use cached value from verify cap interrupt.
  6097. * In case of quick linkup or simulator, vl15 value will be set by
  6098. * handle_linkup_change. VerifyCap interrupt handler will not be
  6099. * called in those scenarios.
  6100. */
  6101. if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
  6102. set_up_vl15(dd, dd->vl15buf_cached);
  6103. /* enforce link speed enabled */
  6104. if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
  6105. /* oops - current speed is not enabled, bounce */
  6106. dd_dev_err(dd,
  6107. "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
  6108. ppd->link_speed_active, ppd->link_speed_enabled);
  6109. set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
  6110. OPA_LINKDOWN_REASON_SPEED_POLICY);
  6111. set_link_state(ppd, HLS_DN_OFFLINE);
  6112. start_link(ppd);
  6113. }
  6114. }
  6115. /*
  6116. * Several pieces of LNI information were cached for SMA in ppd.
  6117. * Reset these on link down
  6118. */
  6119. static void reset_neighbor_info(struct hfi1_pportdata *ppd)
  6120. {
  6121. ppd->neighbor_guid = 0;
  6122. ppd->neighbor_port_number = 0;
  6123. ppd->neighbor_type = 0;
  6124. ppd->neighbor_fm_security = 0;
  6125. }
  6126. static const char * const link_down_reason_strs[] = {
  6127. [OPA_LINKDOWN_REASON_NONE] = "None",
  6128. [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
  6129. [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
  6130. [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
  6131. [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
  6132. [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
  6133. [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
  6134. [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
  6135. [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
  6136. [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
  6137. [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
  6138. [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
  6139. [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
  6140. [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
  6141. [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
  6142. [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
  6143. [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
  6144. [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
  6145. [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
  6146. [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
  6147. [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
  6148. [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
  6149. [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
  6150. [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
  6151. [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
  6152. [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
  6153. [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
  6154. [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
  6155. [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
  6156. [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
  6157. [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
  6158. [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
  6159. [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
  6160. "Excessive buffer overrun",
  6161. [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
  6162. [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
  6163. [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
  6164. [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
  6165. [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
  6166. [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
  6167. [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
  6168. [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
  6169. "Local media not installed",
  6170. [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
  6171. [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
  6172. [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
  6173. "End to end not installed",
  6174. [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
  6175. [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
  6176. [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
  6177. [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
  6178. [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
  6179. [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
  6180. };
  6181. /* return the neighbor link down reason string */
  6182. static const char *link_down_reason_str(u8 reason)
  6183. {
  6184. const char *str = NULL;
  6185. if (reason < ARRAY_SIZE(link_down_reason_strs))
  6186. str = link_down_reason_strs[reason];
  6187. if (!str)
  6188. str = "(invalid)";
  6189. return str;
  6190. }
  6191. /*
  6192. * Handle a link down interrupt from the 8051.
  6193. *
  6194. * This is a work-queue function outside of the interrupt.
  6195. */
  6196. void handle_link_down(struct work_struct *work)
  6197. {
  6198. u8 lcl_reason, neigh_reason = 0;
  6199. u8 link_down_reason;
  6200. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6201. link_down_work);
  6202. int was_up;
  6203. static const char ldr_str[] = "Link down reason: ";
  6204. if ((ppd->host_link_state &
  6205. (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
  6206. ppd->port_type == PORT_TYPE_FIXED)
  6207. ppd->offline_disabled_reason =
  6208. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
  6209. /* Go offline first, then deal with reading/writing through 8051 */
  6210. was_up = !!(ppd->host_link_state & HLS_UP);
  6211. set_link_state(ppd, HLS_DN_OFFLINE);
  6212. xchg(&ppd->is_link_down_queued, 0);
  6213. if (was_up) {
  6214. lcl_reason = 0;
  6215. /* link down reason is only valid if the link was up */
  6216. read_link_down_reason(ppd->dd, &link_down_reason);
  6217. switch (link_down_reason) {
  6218. case LDR_LINK_TRANSFER_ACTIVE_LOW:
  6219. /* the link went down, no idle message reason */
  6220. dd_dev_info(ppd->dd, "%sUnexpected link down\n",
  6221. ldr_str);
  6222. break;
  6223. case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
  6224. /*
  6225. * The neighbor reason is only valid if an idle message
  6226. * was received for it.
  6227. */
  6228. read_planned_down_reason_code(ppd->dd, &neigh_reason);
  6229. dd_dev_info(ppd->dd,
  6230. "%sNeighbor link down message %d, %s\n",
  6231. ldr_str, neigh_reason,
  6232. link_down_reason_str(neigh_reason));
  6233. break;
  6234. case LDR_RECEIVED_HOST_OFFLINE_REQ:
  6235. dd_dev_info(ppd->dd,
  6236. "%sHost requested link to go offline\n",
  6237. ldr_str);
  6238. break;
  6239. default:
  6240. dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
  6241. ldr_str, link_down_reason);
  6242. break;
  6243. }
  6244. /*
  6245. * If no reason, assume peer-initiated but missed
  6246. * LinkGoingDown idle flits.
  6247. */
  6248. if (neigh_reason == 0)
  6249. lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
  6250. } else {
  6251. /* went down while polling or going up */
  6252. lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
  6253. }
  6254. set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
  6255. /* inform the SMA when the link transitions from up to down */
  6256. if (was_up && ppd->local_link_down_reason.sma == 0 &&
  6257. ppd->neigh_link_down_reason.sma == 0) {
  6258. ppd->local_link_down_reason.sma =
  6259. ppd->local_link_down_reason.latest;
  6260. ppd->neigh_link_down_reason.sma =
  6261. ppd->neigh_link_down_reason.latest;
  6262. }
  6263. reset_neighbor_info(ppd);
  6264. /* disable the port */
  6265. clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  6266. /*
  6267. * If there is no cable attached, turn the DC off. Otherwise,
  6268. * start the link bring up.
  6269. */
  6270. if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
  6271. dc_shutdown(ppd->dd);
  6272. else
  6273. start_link(ppd);
  6274. }
  6275. void handle_link_bounce(struct work_struct *work)
  6276. {
  6277. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6278. link_bounce_work);
  6279. /*
  6280. * Only do something if the link is currently up.
  6281. */
  6282. if (ppd->host_link_state & HLS_UP) {
  6283. set_link_state(ppd, HLS_DN_OFFLINE);
  6284. start_link(ppd);
  6285. } else {
  6286. dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
  6287. __func__, link_state_name(ppd->host_link_state));
  6288. }
  6289. }
  6290. /*
  6291. * Mask conversion: Capability exchange to Port LTP. The capability
  6292. * exchange has an implicit 16b CRC that is mandatory.
  6293. */
  6294. static int cap_to_port_ltp(int cap)
  6295. {
  6296. int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
  6297. if (cap & CAP_CRC_14B)
  6298. port_ltp |= PORT_LTP_CRC_MODE_14;
  6299. if (cap & CAP_CRC_48B)
  6300. port_ltp |= PORT_LTP_CRC_MODE_48;
  6301. if (cap & CAP_CRC_12B_16B_PER_LANE)
  6302. port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
  6303. return port_ltp;
  6304. }
  6305. /*
  6306. * Convert an OPA Port LTP mask to capability mask
  6307. */
  6308. int port_ltp_to_cap(int port_ltp)
  6309. {
  6310. int cap_mask = 0;
  6311. if (port_ltp & PORT_LTP_CRC_MODE_14)
  6312. cap_mask |= CAP_CRC_14B;
  6313. if (port_ltp & PORT_LTP_CRC_MODE_48)
  6314. cap_mask |= CAP_CRC_48B;
  6315. if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
  6316. cap_mask |= CAP_CRC_12B_16B_PER_LANE;
  6317. return cap_mask;
  6318. }
  6319. /*
  6320. * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
  6321. */
  6322. static int lcb_to_port_ltp(int lcb_crc)
  6323. {
  6324. int port_ltp = 0;
  6325. if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
  6326. port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
  6327. else if (lcb_crc == LCB_CRC_48B)
  6328. port_ltp = PORT_LTP_CRC_MODE_48;
  6329. else if (lcb_crc == LCB_CRC_14B)
  6330. port_ltp = PORT_LTP_CRC_MODE_14;
  6331. else
  6332. port_ltp = PORT_LTP_CRC_MODE_16;
  6333. return port_ltp;
  6334. }
  6335. static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
  6336. {
  6337. if (ppd->pkeys[2] != 0) {
  6338. ppd->pkeys[2] = 0;
  6339. (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
  6340. hfi1_event_pkey_change(ppd->dd, ppd->port);
  6341. }
  6342. }
  6343. /*
  6344. * Convert the given link width to the OPA link width bitmask.
  6345. */
  6346. static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
  6347. {
  6348. switch (width) {
  6349. case 0:
  6350. /*
  6351. * Simulator and quick linkup do not set the width.
  6352. * Just set it to 4x without complaint.
  6353. */
  6354. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
  6355. return OPA_LINK_WIDTH_4X;
  6356. return 0; /* no lanes up */
  6357. case 1: return OPA_LINK_WIDTH_1X;
  6358. case 2: return OPA_LINK_WIDTH_2X;
  6359. case 3: return OPA_LINK_WIDTH_3X;
  6360. default:
  6361. dd_dev_info(dd, "%s: invalid width %d, using 4\n",
  6362. __func__, width);
  6363. /* fall through */
  6364. case 4: return OPA_LINK_WIDTH_4X;
  6365. }
  6366. }
  6367. /*
  6368. * Do a population count on the bottom nibble.
  6369. */
  6370. static const u8 bit_counts[16] = {
  6371. 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
  6372. };
  6373. static inline u8 nibble_to_count(u8 nibble)
  6374. {
  6375. return bit_counts[nibble & 0xf];
  6376. }
  6377. /*
  6378. * Read the active lane information from the 8051 registers and return
  6379. * their widths.
  6380. *
  6381. * Active lane information is found in these 8051 registers:
  6382. * enable_lane_tx
  6383. * enable_lane_rx
  6384. */
  6385. static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
  6386. u16 *rx_width)
  6387. {
  6388. u16 tx, rx;
  6389. u8 enable_lane_rx;
  6390. u8 enable_lane_tx;
  6391. u8 tx_polarity_inversion;
  6392. u8 rx_polarity_inversion;
  6393. u8 max_rate;
  6394. /* read the active lanes */
  6395. read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
  6396. &rx_polarity_inversion, &max_rate);
  6397. read_local_lni(dd, &enable_lane_rx);
  6398. /* convert to counts */
  6399. tx = nibble_to_count(enable_lane_tx);
  6400. rx = nibble_to_count(enable_lane_rx);
  6401. /*
  6402. * Set link_speed_active here, overriding what was set in
  6403. * handle_verify_cap(). The ASIC 8051 firmware does not correctly
  6404. * set the max_rate field in handle_verify_cap until v0.19.
  6405. */
  6406. if ((dd->icode == ICODE_RTL_SILICON) &&
  6407. (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
  6408. /* max_rate: 0 = 12.5G, 1 = 25G */
  6409. switch (max_rate) {
  6410. case 0:
  6411. dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
  6412. break;
  6413. default:
  6414. dd_dev_err(dd,
  6415. "%s: unexpected max rate %d, using 25Gb\n",
  6416. __func__, (int)max_rate);
  6417. /* fall through */
  6418. case 1:
  6419. dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
  6420. break;
  6421. }
  6422. }
  6423. dd_dev_info(dd,
  6424. "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
  6425. enable_lane_tx, tx, enable_lane_rx, rx);
  6426. *tx_width = link_width_to_bits(dd, tx);
  6427. *rx_width = link_width_to_bits(dd, rx);
  6428. }
  6429. /*
  6430. * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
  6431. * Valid after the end of VerifyCap and during LinkUp. Does not change
  6432. * after link up. I.e. look elsewhere for downgrade information.
  6433. *
  6434. * Bits are:
  6435. * + bits [7:4] contain the number of active transmitters
  6436. * + bits [3:0] contain the number of active receivers
  6437. * These are numbers 1 through 4 and can be different values if the
  6438. * link is asymmetric.
  6439. *
  6440. * verify_cap_local_fm_link_width[0] retains its original value.
  6441. */
  6442. static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
  6443. u16 *rx_width)
  6444. {
  6445. u16 widths, tx, rx;
  6446. u8 misc_bits, local_flags;
  6447. u16 active_tx, active_rx;
  6448. read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
  6449. tx = widths >> 12;
  6450. rx = (widths >> 8) & 0xf;
  6451. *tx_width = link_width_to_bits(dd, tx);
  6452. *rx_width = link_width_to_bits(dd, rx);
  6453. /* print the active widths */
  6454. get_link_widths(dd, &active_tx, &active_rx);
  6455. }
  6456. /*
  6457. * Set ppd->link_width_active and ppd->link_width_downgrade_active using
  6458. * hardware information when the link first comes up.
  6459. *
  6460. * The link width is not available until after VerifyCap.AllFramesReceived
  6461. * (the trigger for handle_verify_cap), so this is outside that routine
  6462. * and should be called when the 8051 signals linkup.
  6463. */
  6464. void get_linkup_link_widths(struct hfi1_pportdata *ppd)
  6465. {
  6466. u16 tx_width, rx_width;
  6467. /* get end-of-LNI link widths */
  6468. get_linkup_widths(ppd->dd, &tx_width, &rx_width);
  6469. /* use tx_width as the link is supposed to be symmetric on link up */
  6470. ppd->link_width_active = tx_width;
  6471. /* link width downgrade active (LWD.A) starts out matching LW.A */
  6472. ppd->link_width_downgrade_tx_active = ppd->link_width_active;
  6473. ppd->link_width_downgrade_rx_active = ppd->link_width_active;
  6474. /* per OPA spec, on link up LWD.E resets to LWD.S */
  6475. ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
  6476. /* cache the active egress rate (units {10^6 bits/sec]) */
  6477. ppd->current_egress_rate = active_egress_rate(ppd);
  6478. }
  6479. /*
  6480. * Handle a verify capabilities interrupt from the 8051.
  6481. *
  6482. * This is a work-queue function outside of the interrupt.
  6483. */
  6484. void handle_verify_cap(struct work_struct *work)
  6485. {
  6486. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6487. link_vc_work);
  6488. struct hfi1_devdata *dd = ppd->dd;
  6489. u64 reg;
  6490. u8 power_management;
  6491. u8 continuous;
  6492. u8 vcu;
  6493. u8 vau;
  6494. u8 z;
  6495. u16 vl15buf;
  6496. u16 link_widths;
  6497. u16 crc_mask;
  6498. u16 crc_val;
  6499. u16 device_id;
  6500. u16 active_tx, active_rx;
  6501. u8 partner_supported_crc;
  6502. u8 remote_tx_rate;
  6503. u8 device_rev;
  6504. set_link_state(ppd, HLS_VERIFY_CAP);
  6505. lcb_shutdown(dd, 0);
  6506. adjust_lcb_for_fpga_serdes(dd);
  6507. read_vc_remote_phy(dd, &power_management, &continuous);
  6508. read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
  6509. &partner_supported_crc);
  6510. read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
  6511. read_remote_device_id(dd, &device_id, &device_rev);
  6512. /* print the active widths */
  6513. get_link_widths(dd, &active_tx, &active_rx);
  6514. dd_dev_info(dd,
  6515. "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
  6516. (int)power_management, (int)continuous);
  6517. dd_dev_info(dd,
  6518. "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
  6519. (int)vau, (int)z, (int)vcu, (int)vl15buf,
  6520. (int)partner_supported_crc);
  6521. dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
  6522. (u32)remote_tx_rate, (u32)link_widths);
  6523. dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
  6524. (u32)device_id, (u32)device_rev);
  6525. /*
  6526. * The peer vAU value just read is the peer receiver value. HFI does
  6527. * not support a transmit vAU of 0 (AU == 8). We advertised that
  6528. * with Z=1 in the fabric capabilities sent to the peer. The peer
  6529. * will see our Z=1, and, if it advertised a vAU of 0, will move its
  6530. * receive to vAU of 1 (AU == 16). Do the same here. We do not care
  6531. * about the peer Z value - our sent vAU is 3 (hardwired) and is not
  6532. * subject to the Z value exception.
  6533. */
  6534. if (vau == 0)
  6535. vau = 1;
  6536. set_up_vau(dd, vau);
  6537. /*
  6538. * Set VL15 credits to 0 in global credit register. Cache remote VL15
  6539. * credits value and wait for link-up interrupt ot set it.
  6540. */
  6541. set_up_vl15(dd, 0);
  6542. dd->vl15buf_cached = vl15buf;
  6543. /* set up the LCB CRC mode */
  6544. crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
  6545. /* order is important: use the lowest bit in common */
  6546. if (crc_mask & CAP_CRC_14B)
  6547. crc_val = LCB_CRC_14B;
  6548. else if (crc_mask & CAP_CRC_48B)
  6549. crc_val = LCB_CRC_48B;
  6550. else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
  6551. crc_val = LCB_CRC_12B_16B_PER_LANE;
  6552. else
  6553. crc_val = LCB_CRC_16B;
  6554. dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
  6555. write_csr(dd, DC_LCB_CFG_CRC_MODE,
  6556. (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
  6557. /* set (14b only) or clear sideband credit */
  6558. reg = read_csr(dd, SEND_CM_CTRL);
  6559. if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
  6560. write_csr(dd, SEND_CM_CTRL,
  6561. reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
  6562. } else {
  6563. write_csr(dd, SEND_CM_CTRL,
  6564. reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
  6565. }
  6566. ppd->link_speed_active = 0; /* invalid value */
  6567. if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
  6568. /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
  6569. switch (remote_tx_rate) {
  6570. case 0:
  6571. ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
  6572. break;
  6573. case 1:
  6574. ppd->link_speed_active = OPA_LINK_SPEED_25G;
  6575. break;
  6576. }
  6577. } else {
  6578. /* actual rate is highest bit of the ANDed rates */
  6579. u8 rate = remote_tx_rate & ppd->local_tx_rate;
  6580. if (rate & 2)
  6581. ppd->link_speed_active = OPA_LINK_SPEED_25G;
  6582. else if (rate & 1)
  6583. ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
  6584. }
  6585. if (ppd->link_speed_active == 0) {
  6586. dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
  6587. __func__, (int)remote_tx_rate);
  6588. ppd->link_speed_active = OPA_LINK_SPEED_25G;
  6589. }
  6590. /*
  6591. * Cache the values of the supported, enabled, and active
  6592. * LTP CRC modes to return in 'portinfo' queries. But the bit
  6593. * flags that are returned in the portinfo query differ from
  6594. * what's in the link_crc_mask, crc_sizes, and crc_val
  6595. * variables. Convert these here.
  6596. */
  6597. ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
  6598. /* supported crc modes */
  6599. ppd->port_ltp_crc_mode |=
  6600. cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
  6601. /* enabled crc modes */
  6602. ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
  6603. /* active crc mode */
  6604. /* set up the remote credit return table */
  6605. assign_remote_cm_au_table(dd, vcu);
  6606. /*
  6607. * The LCB is reset on entry to handle_verify_cap(), so this must
  6608. * be applied on every link up.
  6609. *
  6610. * Adjust LCB error kill enable to kill the link if
  6611. * these RBUF errors are seen:
  6612. * REPLAY_BUF_MBE_SMASK
  6613. * FLIT_INPUT_BUF_MBE_SMASK
  6614. */
  6615. if (is_ax(dd)) { /* fixed in B0 */
  6616. reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
  6617. reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
  6618. | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
  6619. write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
  6620. }
  6621. /* pull LCB fifos out of reset - all fifo clocks must be stable */
  6622. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
  6623. /* give 8051 access to the LCB CSRs */
  6624. write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
  6625. set_8051_lcb_access(dd);
  6626. /* tell the 8051 to go to LinkUp */
  6627. set_link_state(ppd, HLS_GOING_UP);
  6628. }
  6629. /**
  6630. * apply_link_downgrade_policy - Apply the link width downgrade enabled
  6631. * policy against the current active link widths.
  6632. * @ppd: info of physical Hfi port
  6633. * @refresh_widths: True indicates link downgrade event
  6634. * @return: True indicates a successful link downgrade. False indicates
  6635. * link downgrade event failed and the link will bounce back to
  6636. * default link width.
  6637. *
  6638. * Called when the enabled policy changes or the active link widths
  6639. * change.
  6640. * Refresh_widths indicates that a link downgrade occurred. The
  6641. * link_downgraded variable is set by refresh_widths and
  6642. * determines the success/failure of the policy application.
  6643. */
  6644. bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
  6645. bool refresh_widths)
  6646. {
  6647. int do_bounce = 0;
  6648. int tries;
  6649. u16 lwde;
  6650. u16 tx, rx;
  6651. bool link_downgraded = refresh_widths;
  6652. /* use the hls lock to avoid a race with actual link up */
  6653. tries = 0;
  6654. retry:
  6655. mutex_lock(&ppd->hls_lock);
  6656. /* only apply if the link is up */
  6657. if (ppd->host_link_state & HLS_DOWN) {
  6658. /* still going up..wait and retry */
  6659. if (ppd->host_link_state & HLS_GOING_UP) {
  6660. if (++tries < 1000) {
  6661. mutex_unlock(&ppd->hls_lock);
  6662. usleep_range(100, 120); /* arbitrary */
  6663. goto retry;
  6664. }
  6665. dd_dev_err(ppd->dd,
  6666. "%s: giving up waiting for link state change\n",
  6667. __func__);
  6668. }
  6669. goto done;
  6670. }
  6671. lwde = ppd->link_width_downgrade_enabled;
  6672. if (refresh_widths) {
  6673. get_link_widths(ppd->dd, &tx, &rx);
  6674. ppd->link_width_downgrade_tx_active = tx;
  6675. ppd->link_width_downgrade_rx_active = rx;
  6676. }
  6677. if (ppd->link_width_downgrade_tx_active == 0 ||
  6678. ppd->link_width_downgrade_rx_active == 0) {
  6679. /* the 8051 reported a dead link as a downgrade */
  6680. dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
  6681. link_downgraded = false;
  6682. } else if (lwde == 0) {
  6683. /* downgrade is disabled */
  6684. /* bounce if not at starting active width */
  6685. if ((ppd->link_width_active !=
  6686. ppd->link_width_downgrade_tx_active) ||
  6687. (ppd->link_width_active !=
  6688. ppd->link_width_downgrade_rx_active)) {
  6689. dd_dev_err(ppd->dd,
  6690. "Link downgrade is disabled and link has downgraded, downing link\n");
  6691. dd_dev_err(ppd->dd,
  6692. " original 0x%x, tx active 0x%x, rx active 0x%x\n",
  6693. ppd->link_width_active,
  6694. ppd->link_width_downgrade_tx_active,
  6695. ppd->link_width_downgrade_rx_active);
  6696. do_bounce = 1;
  6697. link_downgraded = false;
  6698. }
  6699. } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
  6700. (lwde & ppd->link_width_downgrade_rx_active) == 0) {
  6701. /* Tx or Rx is outside the enabled policy */
  6702. dd_dev_err(ppd->dd,
  6703. "Link is outside of downgrade allowed, downing link\n");
  6704. dd_dev_err(ppd->dd,
  6705. " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
  6706. lwde, ppd->link_width_downgrade_tx_active,
  6707. ppd->link_width_downgrade_rx_active);
  6708. do_bounce = 1;
  6709. link_downgraded = false;
  6710. }
  6711. done:
  6712. mutex_unlock(&ppd->hls_lock);
  6713. if (do_bounce) {
  6714. set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
  6715. OPA_LINKDOWN_REASON_WIDTH_POLICY);
  6716. set_link_state(ppd, HLS_DN_OFFLINE);
  6717. start_link(ppd);
  6718. }
  6719. return link_downgraded;
  6720. }
  6721. /*
  6722. * Handle a link downgrade interrupt from the 8051.
  6723. *
  6724. * This is a work-queue function outside of the interrupt.
  6725. */
  6726. void handle_link_downgrade(struct work_struct *work)
  6727. {
  6728. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  6729. link_downgrade_work);
  6730. dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
  6731. if (apply_link_downgrade_policy(ppd, true))
  6732. update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
  6733. }
  6734. static char *dcc_err_string(char *buf, int buf_len, u64 flags)
  6735. {
  6736. return flag_string(buf, buf_len, flags, dcc_err_flags,
  6737. ARRAY_SIZE(dcc_err_flags));
  6738. }
  6739. static char *lcb_err_string(char *buf, int buf_len, u64 flags)
  6740. {
  6741. return flag_string(buf, buf_len, flags, lcb_err_flags,
  6742. ARRAY_SIZE(lcb_err_flags));
  6743. }
  6744. static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
  6745. {
  6746. return flag_string(buf, buf_len, flags, dc8051_err_flags,
  6747. ARRAY_SIZE(dc8051_err_flags));
  6748. }
  6749. static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
  6750. {
  6751. return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
  6752. ARRAY_SIZE(dc8051_info_err_flags));
  6753. }
  6754. static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
  6755. {
  6756. return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
  6757. ARRAY_SIZE(dc8051_info_host_msg_flags));
  6758. }
  6759. static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
  6760. {
  6761. struct hfi1_pportdata *ppd = dd->pport;
  6762. u64 info, err, host_msg;
  6763. int queue_link_down = 0;
  6764. char buf[96];
  6765. /* look at the flags */
  6766. if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
  6767. /* 8051 information set by firmware */
  6768. /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
  6769. info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
  6770. err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
  6771. & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
  6772. host_msg = (info >>
  6773. DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
  6774. & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
  6775. /*
  6776. * Handle error flags.
  6777. */
  6778. if (err & FAILED_LNI) {
  6779. /*
  6780. * LNI error indications are cleared by the 8051
  6781. * only when starting polling. Only pay attention
  6782. * to them when in the states that occur during
  6783. * LNI.
  6784. */
  6785. if (ppd->host_link_state
  6786. & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
  6787. queue_link_down = 1;
  6788. dd_dev_info(dd, "Link error: %s\n",
  6789. dc8051_info_err_string(buf,
  6790. sizeof(buf),
  6791. err &
  6792. FAILED_LNI));
  6793. }
  6794. err &= ~(u64)FAILED_LNI;
  6795. }
  6796. /* unknown frames can happen durning LNI, just count */
  6797. if (err & UNKNOWN_FRAME) {
  6798. ppd->unknown_frame_count++;
  6799. err &= ~(u64)UNKNOWN_FRAME;
  6800. }
  6801. if (err) {
  6802. /* report remaining errors, but do not do anything */
  6803. dd_dev_err(dd, "8051 info error: %s\n",
  6804. dc8051_info_err_string(buf, sizeof(buf),
  6805. err));
  6806. }
  6807. /*
  6808. * Handle host message flags.
  6809. */
  6810. if (host_msg & HOST_REQ_DONE) {
  6811. /*
  6812. * Presently, the driver does a busy wait for
  6813. * host requests to complete. This is only an
  6814. * informational message.
  6815. * NOTE: The 8051 clears the host message
  6816. * information *on the next 8051 command*.
  6817. * Therefore, when linkup is achieved,
  6818. * this flag will still be set.
  6819. */
  6820. host_msg &= ~(u64)HOST_REQ_DONE;
  6821. }
  6822. if (host_msg & BC_SMA_MSG) {
  6823. queue_work(ppd->link_wq, &ppd->sma_message_work);
  6824. host_msg &= ~(u64)BC_SMA_MSG;
  6825. }
  6826. if (host_msg & LINKUP_ACHIEVED) {
  6827. dd_dev_info(dd, "8051: Link up\n");
  6828. queue_work(ppd->link_wq, &ppd->link_up_work);
  6829. host_msg &= ~(u64)LINKUP_ACHIEVED;
  6830. }
  6831. if (host_msg & EXT_DEVICE_CFG_REQ) {
  6832. handle_8051_request(ppd);
  6833. host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
  6834. }
  6835. if (host_msg & VERIFY_CAP_FRAME) {
  6836. queue_work(ppd->link_wq, &ppd->link_vc_work);
  6837. host_msg &= ~(u64)VERIFY_CAP_FRAME;
  6838. }
  6839. if (host_msg & LINK_GOING_DOWN) {
  6840. const char *extra = "";
  6841. /* no downgrade action needed if going down */
  6842. if (host_msg & LINK_WIDTH_DOWNGRADED) {
  6843. host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
  6844. extra = " (ignoring downgrade)";
  6845. }
  6846. dd_dev_info(dd, "8051: Link down%s\n", extra);
  6847. queue_link_down = 1;
  6848. host_msg &= ~(u64)LINK_GOING_DOWN;
  6849. }
  6850. if (host_msg & LINK_WIDTH_DOWNGRADED) {
  6851. queue_work(ppd->link_wq, &ppd->link_downgrade_work);
  6852. host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
  6853. }
  6854. if (host_msg) {
  6855. /* report remaining messages, but do not do anything */
  6856. dd_dev_info(dd, "8051 info host message: %s\n",
  6857. dc8051_info_host_msg_string(buf,
  6858. sizeof(buf),
  6859. host_msg));
  6860. }
  6861. reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
  6862. }
  6863. if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
  6864. /*
  6865. * Lost the 8051 heartbeat. If this happens, we
  6866. * receive constant interrupts about it. Disable
  6867. * the interrupt after the first.
  6868. */
  6869. dd_dev_err(dd, "Lost 8051 heartbeat\n");
  6870. write_csr(dd, DC_DC8051_ERR_EN,
  6871. read_csr(dd, DC_DC8051_ERR_EN) &
  6872. ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
  6873. reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
  6874. }
  6875. if (reg) {
  6876. /* report the error, but do not do anything */
  6877. dd_dev_err(dd, "8051 error: %s\n",
  6878. dc8051_err_string(buf, sizeof(buf), reg));
  6879. }
  6880. if (queue_link_down) {
  6881. /*
  6882. * if the link is already going down or disabled, do not
  6883. * queue another. If there's a link down entry already
  6884. * queued, don't queue another one.
  6885. */
  6886. if ((ppd->host_link_state &
  6887. (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
  6888. ppd->link_enabled == 0) {
  6889. dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
  6890. __func__, ppd->host_link_state,
  6891. ppd->link_enabled);
  6892. } else {
  6893. if (xchg(&ppd->is_link_down_queued, 1) == 1)
  6894. dd_dev_info(dd,
  6895. "%s: link down request already queued\n",
  6896. __func__);
  6897. else
  6898. queue_work(ppd->link_wq, &ppd->link_down_work);
  6899. }
  6900. }
  6901. }
  6902. static const char * const fm_config_txt[] = {
  6903. [0] =
  6904. "BadHeadDist: Distance violation between two head flits",
  6905. [1] =
  6906. "BadTailDist: Distance violation between two tail flits",
  6907. [2] =
  6908. "BadCtrlDist: Distance violation between two credit control flits",
  6909. [3] =
  6910. "BadCrdAck: Credits return for unsupported VL",
  6911. [4] =
  6912. "UnsupportedVLMarker: Received VL Marker",
  6913. [5] =
  6914. "BadPreempt: Exceeded the preemption nesting level",
  6915. [6] =
  6916. "BadControlFlit: Received unsupported control flit",
  6917. /* no 7 */
  6918. [8] =
  6919. "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
  6920. };
  6921. static const char * const port_rcv_txt[] = {
  6922. [1] =
  6923. "BadPktLen: Illegal PktLen",
  6924. [2] =
  6925. "PktLenTooLong: Packet longer than PktLen",
  6926. [3] =
  6927. "PktLenTooShort: Packet shorter than PktLen",
  6928. [4] =
  6929. "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
  6930. [5] =
  6931. "BadDLID: Illegal DLID (0, doesn't match HFI)",
  6932. [6] =
  6933. "BadL2: Illegal L2 opcode",
  6934. [7] =
  6935. "BadSC: Unsupported SC",
  6936. [9] =
  6937. "BadRC: Illegal RC",
  6938. [11] =
  6939. "PreemptError: Preempting with same VL",
  6940. [12] =
  6941. "PreemptVL15: Preempting a VL15 packet",
  6942. };
  6943. #define OPA_LDR_FMCONFIG_OFFSET 16
  6944. #define OPA_LDR_PORTRCV_OFFSET 0
  6945. static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  6946. {
  6947. u64 info, hdr0, hdr1;
  6948. const char *extra;
  6949. char buf[96];
  6950. struct hfi1_pportdata *ppd = dd->pport;
  6951. u8 lcl_reason = 0;
  6952. int do_bounce = 0;
  6953. if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
  6954. if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
  6955. info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
  6956. dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
  6957. /* set status bit */
  6958. dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
  6959. }
  6960. reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
  6961. }
  6962. if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
  6963. struct hfi1_pportdata *ppd = dd->pport;
  6964. /* this counter saturates at (2^32) - 1 */
  6965. if (ppd->link_downed < (u32)UINT_MAX)
  6966. ppd->link_downed++;
  6967. reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
  6968. }
  6969. if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
  6970. u8 reason_valid = 1;
  6971. info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
  6972. if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
  6973. dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
  6974. /* set status bit */
  6975. dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
  6976. }
  6977. switch (info) {
  6978. case 0:
  6979. case 1:
  6980. case 2:
  6981. case 3:
  6982. case 4:
  6983. case 5:
  6984. case 6:
  6985. extra = fm_config_txt[info];
  6986. break;
  6987. case 8:
  6988. extra = fm_config_txt[info];
  6989. if (ppd->port_error_action &
  6990. OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
  6991. do_bounce = 1;
  6992. /*
  6993. * lcl_reason cannot be derived from info
  6994. * for this error
  6995. */
  6996. lcl_reason =
  6997. OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
  6998. }
  6999. break;
  7000. default:
  7001. reason_valid = 0;
  7002. snprintf(buf, sizeof(buf), "reserved%lld", info);
  7003. extra = buf;
  7004. break;
  7005. }
  7006. if (reason_valid && !do_bounce) {
  7007. do_bounce = ppd->port_error_action &
  7008. (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
  7009. lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
  7010. }
  7011. /* just report this */
  7012. dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
  7013. extra);
  7014. reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
  7015. }
  7016. if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
  7017. u8 reason_valid = 1;
  7018. info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
  7019. hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
  7020. hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
  7021. if (!(dd->err_info_rcvport.status_and_code &
  7022. OPA_EI_STATUS_SMASK)) {
  7023. dd->err_info_rcvport.status_and_code =
  7024. info & OPA_EI_CODE_SMASK;
  7025. /* set status bit */
  7026. dd->err_info_rcvport.status_and_code |=
  7027. OPA_EI_STATUS_SMASK;
  7028. /*
  7029. * save first 2 flits in the packet that caused
  7030. * the error
  7031. */
  7032. dd->err_info_rcvport.packet_flit1 = hdr0;
  7033. dd->err_info_rcvport.packet_flit2 = hdr1;
  7034. }
  7035. switch (info) {
  7036. case 1:
  7037. case 2:
  7038. case 3:
  7039. case 4:
  7040. case 5:
  7041. case 6:
  7042. case 7:
  7043. case 9:
  7044. case 11:
  7045. case 12:
  7046. extra = port_rcv_txt[info];
  7047. break;
  7048. default:
  7049. reason_valid = 0;
  7050. snprintf(buf, sizeof(buf), "reserved%lld", info);
  7051. extra = buf;
  7052. break;
  7053. }
  7054. if (reason_valid && !do_bounce) {
  7055. do_bounce = ppd->port_error_action &
  7056. (1 << (OPA_LDR_PORTRCV_OFFSET + info));
  7057. lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
  7058. }
  7059. /* just report this */
  7060. dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
  7061. " hdr0 0x%llx, hdr1 0x%llx\n",
  7062. extra, hdr0, hdr1);
  7063. reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
  7064. }
  7065. if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
  7066. /* informative only */
  7067. dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
  7068. reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
  7069. }
  7070. if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
  7071. /* informative only */
  7072. dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
  7073. reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
  7074. }
  7075. if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
  7076. reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
  7077. /* report any remaining errors */
  7078. if (reg)
  7079. dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
  7080. dcc_err_string(buf, sizeof(buf), reg));
  7081. if (lcl_reason == 0)
  7082. lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
  7083. if (do_bounce) {
  7084. dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
  7085. __func__);
  7086. set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
  7087. queue_work(ppd->link_wq, &ppd->link_bounce_work);
  7088. }
  7089. }
  7090. static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
  7091. {
  7092. char buf[96];
  7093. dd_dev_info(dd, "LCB Error: %s\n",
  7094. lcb_err_string(buf, sizeof(buf), reg));
  7095. }
  7096. /*
  7097. * CCE block DC interrupt. Source is < 8.
  7098. */
  7099. static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
  7100. {
  7101. const struct err_reg_info *eri = &dc_errs[source];
  7102. if (eri->handler) {
  7103. interrupt_clear_down(dd, 0, eri);
  7104. } else if (source == 3 /* dc_lbm_int */) {
  7105. /*
  7106. * This indicates that a parity error has occurred on the
  7107. * address/control lines presented to the LBM. The error
  7108. * is a single pulse, there is no associated error flag,
  7109. * and it is non-maskable. This is because if a parity
  7110. * error occurs on the request the request is dropped.
  7111. * This should never occur, but it is nice to know if it
  7112. * ever does.
  7113. */
  7114. dd_dev_err(dd, "Parity error in DC LBM block\n");
  7115. } else {
  7116. dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
  7117. }
  7118. }
  7119. /*
  7120. * TX block send credit interrupt. Source is < 160.
  7121. */
  7122. static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
  7123. {
  7124. sc_group_release_update(dd, source);
  7125. }
  7126. /*
  7127. * TX block SDMA interrupt. Source is < 48.
  7128. *
  7129. * SDMA interrupts are grouped by type:
  7130. *
  7131. * 0 - N-1 = SDma
  7132. * N - 2N-1 = SDmaProgress
  7133. * 2N - 3N-1 = SDmaIdle
  7134. */
  7135. static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
  7136. {
  7137. /* what interrupt */
  7138. unsigned int what = source / TXE_NUM_SDMA_ENGINES;
  7139. /* which engine */
  7140. unsigned int which = source % TXE_NUM_SDMA_ENGINES;
  7141. #ifdef CONFIG_SDMA_VERBOSITY
  7142. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
  7143. slashstrip(__FILE__), __LINE__, __func__);
  7144. sdma_dumpstate(&dd->per_sdma[which]);
  7145. #endif
  7146. if (likely(what < 3 && which < dd->num_sdma)) {
  7147. sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
  7148. } else {
  7149. /* should not happen */
  7150. dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
  7151. }
  7152. }
  7153. /**
  7154. * is_rcv_avail_int() - User receive context available IRQ handler
  7155. * @dd: valid dd
  7156. * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
  7157. *
  7158. * RX block receive available interrupt. Source is < 160.
  7159. *
  7160. * This is the general interrupt handler for user (PSM) receive contexts,
  7161. * and can only be used for non-threaded IRQs.
  7162. */
  7163. static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
  7164. {
  7165. struct hfi1_ctxtdata *rcd;
  7166. char *err_detail;
  7167. if (likely(source < dd->num_rcv_contexts)) {
  7168. rcd = hfi1_rcd_get_by_index(dd, source);
  7169. if (rcd) {
  7170. handle_user_interrupt(rcd);
  7171. hfi1_rcd_put(rcd);
  7172. return; /* OK */
  7173. }
  7174. /* received an interrupt, but no rcd */
  7175. err_detail = "dataless";
  7176. } else {
  7177. /* received an interrupt, but are not using that context */
  7178. err_detail = "out of range";
  7179. }
  7180. dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
  7181. err_detail, source);
  7182. }
  7183. /**
  7184. * is_rcv_urgent_int() - User receive context urgent IRQ handler
  7185. * @dd: valid dd
  7186. * @source: logical IRQ source (ofse from IS_RCVURGENT_START)
  7187. *
  7188. * RX block receive urgent interrupt. Source is < 160.
  7189. *
  7190. * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
  7191. */
  7192. static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
  7193. {
  7194. struct hfi1_ctxtdata *rcd;
  7195. char *err_detail;
  7196. if (likely(source < dd->num_rcv_contexts)) {
  7197. rcd = hfi1_rcd_get_by_index(dd, source);
  7198. if (rcd) {
  7199. handle_user_interrupt(rcd);
  7200. hfi1_rcd_put(rcd);
  7201. return; /* OK */
  7202. }
  7203. /* received an interrupt, but no rcd */
  7204. err_detail = "dataless";
  7205. } else {
  7206. /* received an interrupt, but are not using that context */
  7207. err_detail = "out of range";
  7208. }
  7209. dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
  7210. err_detail, source);
  7211. }
  7212. /*
  7213. * Reserved range interrupt. Should not be called in normal operation.
  7214. */
  7215. static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
  7216. {
  7217. char name[64];
  7218. dd_dev_err(dd, "unexpected %s interrupt\n",
  7219. is_reserved_name(name, sizeof(name), source));
  7220. }
  7221. static const struct is_table is_table[] = {
  7222. /*
  7223. * start end
  7224. * name func interrupt func
  7225. */
  7226. { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
  7227. is_misc_err_name, is_misc_err_int },
  7228. { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
  7229. is_sdma_eng_err_name, is_sdma_eng_err_int },
  7230. { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
  7231. is_sendctxt_err_name, is_sendctxt_err_int },
  7232. { IS_SDMA_START, IS_SDMA_END,
  7233. is_sdma_eng_name, is_sdma_eng_int },
  7234. { IS_VARIOUS_START, IS_VARIOUS_END,
  7235. is_various_name, is_various_int },
  7236. { IS_DC_START, IS_DC_END,
  7237. is_dc_name, is_dc_int },
  7238. { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
  7239. is_rcv_avail_name, is_rcv_avail_int },
  7240. { IS_RCVURGENT_START, IS_RCVURGENT_END,
  7241. is_rcv_urgent_name, is_rcv_urgent_int },
  7242. { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
  7243. is_send_credit_name, is_send_credit_int},
  7244. { IS_RESERVED_START, IS_RESERVED_END,
  7245. is_reserved_name, is_reserved_int},
  7246. };
  7247. /*
  7248. * Interrupt source interrupt - called when the given source has an interrupt.
  7249. * Source is a bit index into an array of 64-bit integers.
  7250. */
  7251. static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
  7252. {
  7253. const struct is_table *entry;
  7254. /* avoids a double compare by walking the table in-order */
  7255. for (entry = &is_table[0]; entry->is_name; entry++) {
  7256. if (source < entry->end) {
  7257. trace_hfi1_interrupt(dd, entry, source);
  7258. entry->is_int(dd, source - entry->start);
  7259. return;
  7260. }
  7261. }
  7262. /* fell off the end */
  7263. dd_dev_err(dd, "invalid interrupt source %u\n", source);
  7264. }
  7265. /**
  7266. * gerneral_interrupt() - General interrupt handler
  7267. * @irq: MSIx IRQ vector
  7268. * @data: hfi1 devdata
  7269. *
  7270. * This is able to correctly handle all non-threaded interrupts. Receive
  7271. * context DATA IRQs are threaded and are not supported by this handler.
  7272. *
  7273. */
  7274. static irqreturn_t general_interrupt(int irq, void *data)
  7275. {
  7276. struct hfi1_devdata *dd = data;
  7277. u64 regs[CCE_NUM_INT_CSRS];
  7278. u32 bit;
  7279. int i;
  7280. irqreturn_t handled = IRQ_NONE;
  7281. this_cpu_inc(*dd->int_counter);
  7282. /* phase 1: scan and clear all handled interrupts */
  7283. for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
  7284. if (dd->gi_mask[i] == 0) {
  7285. regs[i] = 0; /* used later */
  7286. continue;
  7287. }
  7288. regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
  7289. dd->gi_mask[i];
  7290. /* only clear if anything is set */
  7291. if (regs[i])
  7292. write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
  7293. }
  7294. /* phase 2: call the appropriate handler */
  7295. for_each_set_bit(bit, (unsigned long *)&regs[0],
  7296. CCE_NUM_INT_CSRS * 64) {
  7297. is_interrupt(dd, bit);
  7298. handled = IRQ_HANDLED;
  7299. }
  7300. return handled;
  7301. }
  7302. static irqreturn_t sdma_interrupt(int irq, void *data)
  7303. {
  7304. struct sdma_engine *sde = data;
  7305. struct hfi1_devdata *dd = sde->dd;
  7306. u64 status;
  7307. #ifdef CONFIG_SDMA_VERBOSITY
  7308. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  7309. slashstrip(__FILE__), __LINE__, __func__);
  7310. sdma_dumpstate(sde);
  7311. #endif
  7312. this_cpu_inc(*dd->int_counter);
  7313. /* This read_csr is really bad in the hot path */
  7314. status = read_csr(dd,
  7315. CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
  7316. & sde->imask;
  7317. if (likely(status)) {
  7318. /* clear the interrupt(s) */
  7319. write_csr(dd,
  7320. CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
  7321. status);
  7322. /* handle the interrupt(s) */
  7323. sdma_engine_interrupt(sde, status);
  7324. } else {
  7325. dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
  7326. sde->this_idx);
  7327. }
  7328. return IRQ_HANDLED;
  7329. }
  7330. /*
  7331. * Clear the receive interrupt. Use a read of the interrupt clear CSR
  7332. * to insure that the write completed. This does NOT guarantee that
  7333. * queued DMA writes to memory from the chip are pushed.
  7334. */
  7335. static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
  7336. {
  7337. struct hfi1_devdata *dd = rcd->dd;
  7338. u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
  7339. mmiowb(); /* make sure everything before is written */
  7340. write_csr(dd, addr, rcd->imask);
  7341. /* force the above write on the chip and get a value back */
  7342. (void)read_csr(dd, addr);
  7343. }
  7344. /* force the receive interrupt */
  7345. void force_recv_intr(struct hfi1_ctxtdata *rcd)
  7346. {
  7347. write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
  7348. }
  7349. /*
  7350. * Return non-zero if a packet is present.
  7351. *
  7352. * This routine is called when rechecking for packets after the RcvAvail
  7353. * interrupt has been cleared down. First, do a quick check of memory for
  7354. * a packet present. If not found, use an expensive CSR read of the context
  7355. * tail to determine the actual tail. The CSR read is necessary because there
  7356. * is no method to push pending DMAs to memory other than an interrupt and we
  7357. * are trying to determine if we need to force an interrupt.
  7358. */
  7359. static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
  7360. {
  7361. u32 tail;
  7362. int present;
  7363. if (!rcd->rcvhdrtail_kvaddr)
  7364. present = (rcd->seq_cnt ==
  7365. rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
  7366. else /* is RDMA rtail */
  7367. present = (rcd->head != get_rcvhdrtail(rcd));
  7368. if (present)
  7369. return 1;
  7370. /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
  7371. tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
  7372. return rcd->head != tail;
  7373. }
  7374. /*
  7375. * Receive packet IRQ handler. This routine expects to be on its own IRQ.
  7376. * This routine will try to handle packets immediately (latency), but if
  7377. * it finds too many, it will invoke the thread handler (bandwitdh). The
  7378. * chip receive interrupt is *not* cleared down until this or the thread (if
  7379. * invoked) is finished. The intent is to avoid extra interrupts while we
  7380. * are processing packets anyway.
  7381. */
  7382. static irqreturn_t receive_context_interrupt(int irq, void *data)
  7383. {
  7384. struct hfi1_ctxtdata *rcd = data;
  7385. struct hfi1_devdata *dd = rcd->dd;
  7386. int disposition;
  7387. int present;
  7388. trace_hfi1_receive_interrupt(dd, rcd);
  7389. this_cpu_inc(*dd->int_counter);
  7390. aspm_ctx_disable(rcd);
  7391. /* receive interrupt remains blocked while processing packets */
  7392. disposition = rcd->do_interrupt(rcd, 0);
  7393. /*
  7394. * Too many packets were seen while processing packets in this
  7395. * IRQ handler. Invoke the handler thread. The receive interrupt
  7396. * remains blocked.
  7397. */
  7398. if (disposition == RCV_PKT_LIMIT)
  7399. return IRQ_WAKE_THREAD;
  7400. /*
  7401. * The packet processor detected no more packets. Clear the receive
  7402. * interrupt and recheck for a packet packet that may have arrived
  7403. * after the previous check and interrupt clear. If a packet arrived,
  7404. * force another interrupt.
  7405. */
  7406. clear_recv_intr(rcd);
  7407. present = check_packet_present(rcd);
  7408. if (present)
  7409. force_recv_intr(rcd);
  7410. return IRQ_HANDLED;
  7411. }
  7412. /*
  7413. * Receive packet thread handler. This expects to be invoked with the
  7414. * receive interrupt still blocked.
  7415. */
  7416. static irqreturn_t receive_context_thread(int irq, void *data)
  7417. {
  7418. struct hfi1_ctxtdata *rcd = data;
  7419. int present;
  7420. /* receive interrupt is still blocked from the IRQ handler */
  7421. (void)rcd->do_interrupt(rcd, 1);
  7422. /*
  7423. * The packet processor will only return if it detected no more
  7424. * packets. Hold IRQs here so we can safely clear the interrupt and
  7425. * recheck for a packet that may have arrived after the previous
  7426. * check and the interrupt clear. If a packet arrived, force another
  7427. * interrupt.
  7428. */
  7429. local_irq_disable();
  7430. clear_recv_intr(rcd);
  7431. present = check_packet_present(rcd);
  7432. if (present)
  7433. force_recv_intr(rcd);
  7434. local_irq_enable();
  7435. return IRQ_HANDLED;
  7436. }
  7437. /* ========================================================================= */
  7438. u32 read_physical_state(struct hfi1_devdata *dd)
  7439. {
  7440. u64 reg;
  7441. reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
  7442. return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
  7443. & DC_DC8051_STS_CUR_STATE_PORT_MASK;
  7444. }
  7445. u32 read_logical_state(struct hfi1_devdata *dd)
  7446. {
  7447. u64 reg;
  7448. reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
  7449. return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
  7450. & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
  7451. }
  7452. static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
  7453. {
  7454. u64 reg;
  7455. reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
  7456. /* clear current state, set new state */
  7457. reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
  7458. reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
  7459. write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
  7460. }
  7461. /*
  7462. * Use the 8051 to read a LCB CSR.
  7463. */
  7464. static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
  7465. {
  7466. u32 regno;
  7467. int ret;
  7468. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  7469. if (acquire_lcb_access(dd, 0) == 0) {
  7470. *data = read_csr(dd, addr);
  7471. release_lcb_access(dd, 0);
  7472. return 0;
  7473. }
  7474. return -EBUSY;
  7475. }
  7476. /* register is an index of LCB registers: (offset - base) / 8 */
  7477. regno = (addr - DC_LCB_CFG_RUN) >> 3;
  7478. ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
  7479. if (ret != HCMD_SUCCESS)
  7480. return -EBUSY;
  7481. return 0;
  7482. }
  7483. /*
  7484. * Provide a cache for some of the LCB registers in case the LCB is
  7485. * unavailable.
  7486. * (The LCB is unavailable in certain link states, for example.)
  7487. */
  7488. struct lcb_datum {
  7489. u32 off;
  7490. u64 val;
  7491. };
  7492. static struct lcb_datum lcb_cache[] = {
  7493. { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
  7494. { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
  7495. { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
  7496. };
  7497. static void update_lcb_cache(struct hfi1_devdata *dd)
  7498. {
  7499. int i;
  7500. int ret;
  7501. u64 val;
  7502. for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
  7503. ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
  7504. /* Update if we get good data */
  7505. if (likely(ret != -EBUSY))
  7506. lcb_cache[i].val = val;
  7507. }
  7508. }
  7509. static int read_lcb_cache(u32 off, u64 *val)
  7510. {
  7511. int i;
  7512. for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
  7513. if (lcb_cache[i].off == off) {
  7514. *val = lcb_cache[i].val;
  7515. return 0;
  7516. }
  7517. }
  7518. pr_warn("%s bad offset 0x%x\n", __func__, off);
  7519. return -1;
  7520. }
  7521. /*
  7522. * Read an LCB CSR. Access may not be in host control, so check.
  7523. * Return 0 on success, -EBUSY on failure.
  7524. */
  7525. int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
  7526. {
  7527. struct hfi1_pportdata *ppd = dd->pport;
  7528. /* if up, go through the 8051 for the value */
  7529. if (ppd->host_link_state & HLS_UP)
  7530. return read_lcb_via_8051(dd, addr, data);
  7531. /* if going up or down, check the cache, otherwise, no access */
  7532. if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
  7533. if (read_lcb_cache(addr, data))
  7534. return -EBUSY;
  7535. return 0;
  7536. }
  7537. /* otherwise, host has access */
  7538. *data = read_csr(dd, addr);
  7539. return 0;
  7540. }
  7541. /*
  7542. * Use the 8051 to write a LCB CSR.
  7543. */
  7544. static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
  7545. {
  7546. u32 regno;
  7547. int ret;
  7548. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
  7549. (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
  7550. if (acquire_lcb_access(dd, 0) == 0) {
  7551. write_csr(dd, addr, data);
  7552. release_lcb_access(dd, 0);
  7553. return 0;
  7554. }
  7555. return -EBUSY;
  7556. }
  7557. /* register is an index of LCB registers: (offset - base) / 8 */
  7558. regno = (addr - DC_LCB_CFG_RUN) >> 3;
  7559. ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
  7560. if (ret != HCMD_SUCCESS)
  7561. return -EBUSY;
  7562. return 0;
  7563. }
  7564. /*
  7565. * Write an LCB CSR. Access may not be in host control, so check.
  7566. * Return 0 on success, -EBUSY on failure.
  7567. */
  7568. int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
  7569. {
  7570. struct hfi1_pportdata *ppd = dd->pport;
  7571. /* if up, go through the 8051 for the value */
  7572. if (ppd->host_link_state & HLS_UP)
  7573. return write_lcb_via_8051(dd, addr, data);
  7574. /* if going up or down, no access */
  7575. if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
  7576. return -EBUSY;
  7577. /* otherwise, host has access */
  7578. write_csr(dd, addr, data);
  7579. return 0;
  7580. }
  7581. /*
  7582. * Returns:
  7583. * < 0 = Linux error, not able to get access
  7584. * > 0 = 8051 command RETURN_CODE
  7585. */
  7586. static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
  7587. u64 *out_data)
  7588. {
  7589. u64 reg, completed;
  7590. int return_code;
  7591. unsigned long timeout;
  7592. hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
  7593. mutex_lock(&dd->dc8051_lock);
  7594. /* We can't send any commands to the 8051 if it's in reset */
  7595. if (dd->dc_shutdown) {
  7596. return_code = -ENODEV;
  7597. goto fail;
  7598. }
  7599. /*
  7600. * If an 8051 host command timed out previously, then the 8051 is
  7601. * stuck.
  7602. *
  7603. * On first timeout, attempt to reset and restart the entire DC
  7604. * block (including 8051). (Is this too big of a hammer?)
  7605. *
  7606. * If the 8051 times out a second time, the reset did not bring it
  7607. * back to healthy life. In that case, fail any subsequent commands.
  7608. */
  7609. if (dd->dc8051_timed_out) {
  7610. if (dd->dc8051_timed_out > 1) {
  7611. dd_dev_err(dd,
  7612. "Previous 8051 host command timed out, skipping command %u\n",
  7613. type);
  7614. return_code = -ENXIO;
  7615. goto fail;
  7616. }
  7617. _dc_shutdown(dd);
  7618. _dc_start(dd);
  7619. }
  7620. /*
  7621. * If there is no timeout, then the 8051 command interface is
  7622. * waiting for a command.
  7623. */
  7624. /*
  7625. * When writing a LCB CSR, out_data contains the full value to
  7626. * to be written, while in_data contains the relative LCB
  7627. * address in 7:0. Do the work here, rather than the caller,
  7628. * of distrubting the write data to where it needs to go:
  7629. *
  7630. * Write data
  7631. * 39:00 -> in_data[47:8]
  7632. * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
  7633. * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
  7634. */
  7635. if (type == HCMD_WRITE_LCB_CSR) {
  7636. in_data |= ((*out_data) & 0xffffffffffull) << 8;
  7637. /* must preserve COMPLETED - it is tied to hardware */
  7638. reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
  7639. reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
  7640. reg |= ((((*out_data) >> 40) & 0xff) <<
  7641. DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
  7642. | ((((*out_data) >> 48) & 0xffff) <<
  7643. DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
  7644. write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
  7645. }
  7646. /*
  7647. * Do two writes: the first to stabilize the type and req_data, the
  7648. * second to activate.
  7649. */
  7650. reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
  7651. << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
  7652. | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
  7653. << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
  7654. write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
  7655. reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
  7656. write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
  7657. /* wait for completion, alternate: interrupt */
  7658. timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
  7659. while (1) {
  7660. reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
  7661. completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
  7662. if (completed)
  7663. break;
  7664. if (time_after(jiffies, timeout)) {
  7665. dd->dc8051_timed_out++;
  7666. dd_dev_err(dd, "8051 host command %u timeout\n", type);
  7667. if (out_data)
  7668. *out_data = 0;
  7669. return_code = -ETIMEDOUT;
  7670. goto fail;
  7671. }
  7672. udelay(2);
  7673. }
  7674. if (out_data) {
  7675. *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
  7676. & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
  7677. if (type == HCMD_READ_LCB_CSR) {
  7678. /* top 16 bits are in a different register */
  7679. *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
  7680. & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
  7681. << (48
  7682. - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
  7683. }
  7684. }
  7685. return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
  7686. & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
  7687. dd->dc8051_timed_out = 0;
  7688. /*
  7689. * Clear command for next user.
  7690. */
  7691. write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
  7692. fail:
  7693. mutex_unlock(&dd->dc8051_lock);
  7694. return return_code;
  7695. }
  7696. static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
  7697. {
  7698. return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
  7699. }
  7700. int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
  7701. u8 lane_id, u32 config_data)
  7702. {
  7703. u64 data;
  7704. int ret;
  7705. data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
  7706. | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
  7707. | (u64)config_data << LOAD_DATA_DATA_SHIFT;
  7708. ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
  7709. if (ret != HCMD_SUCCESS) {
  7710. dd_dev_err(dd,
  7711. "load 8051 config: field id %d, lane %d, err %d\n",
  7712. (int)field_id, (int)lane_id, ret);
  7713. }
  7714. return ret;
  7715. }
  7716. /*
  7717. * Read the 8051 firmware "registers". Use the RAM directly. Always
  7718. * set the result, even on error.
  7719. * Return 0 on success, -errno on failure
  7720. */
  7721. int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
  7722. u32 *result)
  7723. {
  7724. u64 big_data;
  7725. u32 addr;
  7726. int ret;
  7727. /* address start depends on the lane_id */
  7728. if (lane_id < 4)
  7729. addr = (4 * NUM_GENERAL_FIELDS)
  7730. + (lane_id * 4 * NUM_LANE_FIELDS);
  7731. else
  7732. addr = 0;
  7733. addr += field_id * 4;
  7734. /* read is in 8-byte chunks, hardware will truncate the address down */
  7735. ret = read_8051_data(dd, addr, 8, &big_data);
  7736. if (ret == 0) {
  7737. /* extract the 4 bytes we want */
  7738. if (addr & 0x4)
  7739. *result = (u32)(big_data >> 32);
  7740. else
  7741. *result = (u32)big_data;
  7742. } else {
  7743. *result = 0;
  7744. dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
  7745. __func__, lane_id, field_id);
  7746. }
  7747. return ret;
  7748. }
  7749. static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
  7750. u8 continuous)
  7751. {
  7752. u32 frame;
  7753. frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
  7754. | power_management << POWER_MANAGEMENT_SHIFT;
  7755. return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
  7756. GENERAL_CONFIG, frame);
  7757. }
  7758. static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
  7759. u16 vl15buf, u8 crc_sizes)
  7760. {
  7761. u32 frame;
  7762. frame = (u32)vau << VAU_SHIFT
  7763. | (u32)z << Z_SHIFT
  7764. | (u32)vcu << VCU_SHIFT
  7765. | (u32)vl15buf << VL15BUF_SHIFT
  7766. | (u32)crc_sizes << CRC_SIZES_SHIFT;
  7767. return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
  7768. GENERAL_CONFIG, frame);
  7769. }
  7770. static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
  7771. u8 *flag_bits, u16 *link_widths)
  7772. {
  7773. u32 frame;
  7774. read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
  7775. &frame);
  7776. *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
  7777. *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
  7778. *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
  7779. }
  7780. static int write_vc_local_link_mode(struct hfi1_devdata *dd,
  7781. u8 misc_bits,
  7782. u8 flag_bits,
  7783. u16 link_widths)
  7784. {
  7785. u32 frame;
  7786. frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
  7787. | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
  7788. | (u32)link_widths << LINK_WIDTH_SHIFT;
  7789. return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
  7790. frame);
  7791. }
  7792. static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
  7793. u8 device_rev)
  7794. {
  7795. u32 frame;
  7796. frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
  7797. | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
  7798. return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
  7799. }
  7800. static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
  7801. u8 *device_rev)
  7802. {
  7803. u32 frame;
  7804. read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
  7805. *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
  7806. *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
  7807. & REMOTE_DEVICE_REV_MASK;
  7808. }
  7809. int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
  7810. {
  7811. u32 frame;
  7812. u32 mask;
  7813. mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
  7814. read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
  7815. /* Clear, then set field */
  7816. frame &= ~mask;
  7817. frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
  7818. return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
  7819. frame);
  7820. }
  7821. void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
  7822. u8 *ver_patch)
  7823. {
  7824. u32 frame;
  7825. read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
  7826. *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
  7827. STS_FM_VERSION_MAJOR_MASK;
  7828. *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
  7829. STS_FM_VERSION_MINOR_MASK;
  7830. read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
  7831. *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
  7832. STS_FM_VERSION_PATCH_MASK;
  7833. }
  7834. static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
  7835. u8 *continuous)
  7836. {
  7837. u32 frame;
  7838. read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
  7839. *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
  7840. & POWER_MANAGEMENT_MASK;
  7841. *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
  7842. & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
  7843. }
  7844. static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
  7845. u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
  7846. {
  7847. u32 frame;
  7848. read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
  7849. *vau = (frame >> VAU_SHIFT) & VAU_MASK;
  7850. *z = (frame >> Z_SHIFT) & Z_MASK;
  7851. *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
  7852. *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
  7853. *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
  7854. }
  7855. static void read_vc_remote_link_width(struct hfi1_devdata *dd,
  7856. u8 *remote_tx_rate,
  7857. u16 *link_widths)
  7858. {
  7859. u32 frame;
  7860. read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
  7861. &frame);
  7862. *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
  7863. & REMOTE_TX_RATE_MASK;
  7864. *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
  7865. }
  7866. static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
  7867. {
  7868. u32 frame;
  7869. read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
  7870. *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
  7871. }
  7872. static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
  7873. {
  7874. read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
  7875. }
  7876. static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
  7877. {
  7878. read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
  7879. }
  7880. void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
  7881. {
  7882. u32 frame;
  7883. int ret;
  7884. *link_quality = 0;
  7885. if (dd->pport->host_link_state & HLS_UP) {
  7886. ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
  7887. &frame);
  7888. if (ret == 0)
  7889. *link_quality = (frame >> LINK_QUALITY_SHIFT)
  7890. & LINK_QUALITY_MASK;
  7891. }
  7892. }
  7893. static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
  7894. {
  7895. u32 frame;
  7896. read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
  7897. *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
  7898. }
  7899. static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
  7900. {
  7901. u32 frame;
  7902. read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
  7903. *ldr = (frame & 0xff);
  7904. }
  7905. static int read_tx_settings(struct hfi1_devdata *dd,
  7906. u8 *enable_lane_tx,
  7907. u8 *tx_polarity_inversion,
  7908. u8 *rx_polarity_inversion,
  7909. u8 *max_rate)
  7910. {
  7911. u32 frame;
  7912. int ret;
  7913. ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
  7914. *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
  7915. & ENABLE_LANE_TX_MASK;
  7916. *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
  7917. & TX_POLARITY_INVERSION_MASK;
  7918. *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
  7919. & RX_POLARITY_INVERSION_MASK;
  7920. *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
  7921. return ret;
  7922. }
  7923. static int write_tx_settings(struct hfi1_devdata *dd,
  7924. u8 enable_lane_tx,
  7925. u8 tx_polarity_inversion,
  7926. u8 rx_polarity_inversion,
  7927. u8 max_rate)
  7928. {
  7929. u32 frame;
  7930. /* no need to mask, all variable sizes match field widths */
  7931. frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
  7932. | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
  7933. | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
  7934. | max_rate << MAX_RATE_SHIFT;
  7935. return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
  7936. }
  7937. /*
  7938. * Read an idle LCB message.
  7939. *
  7940. * Returns 0 on success, -EINVAL on error
  7941. */
  7942. static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
  7943. {
  7944. int ret;
  7945. ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
  7946. if (ret != HCMD_SUCCESS) {
  7947. dd_dev_err(dd, "read idle message: type %d, err %d\n",
  7948. (u32)type, ret);
  7949. return -EINVAL;
  7950. }
  7951. dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
  7952. /* return only the payload as we already know the type */
  7953. *data_out >>= IDLE_PAYLOAD_SHIFT;
  7954. return 0;
  7955. }
  7956. /*
  7957. * Read an idle SMA message. To be done in response to a notification from
  7958. * the 8051.
  7959. *
  7960. * Returns 0 on success, -EINVAL on error
  7961. */
  7962. static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
  7963. {
  7964. return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
  7965. data);
  7966. }
  7967. /*
  7968. * Send an idle LCB message.
  7969. *
  7970. * Returns 0 on success, -EINVAL on error
  7971. */
  7972. static int send_idle_message(struct hfi1_devdata *dd, u64 data)
  7973. {
  7974. int ret;
  7975. dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
  7976. ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
  7977. if (ret != HCMD_SUCCESS) {
  7978. dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
  7979. data, ret);
  7980. return -EINVAL;
  7981. }
  7982. return 0;
  7983. }
  7984. /*
  7985. * Send an idle SMA message.
  7986. *
  7987. * Returns 0 on success, -EINVAL on error
  7988. */
  7989. int send_idle_sma(struct hfi1_devdata *dd, u64 message)
  7990. {
  7991. u64 data;
  7992. data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
  7993. ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
  7994. return send_idle_message(dd, data);
  7995. }
  7996. /*
  7997. * Initialize the LCB then do a quick link up. This may or may not be
  7998. * in loopback.
  7999. *
  8000. * return 0 on success, -errno on error
  8001. */
  8002. static int do_quick_linkup(struct hfi1_devdata *dd)
  8003. {
  8004. int ret;
  8005. lcb_shutdown(dd, 0);
  8006. if (loopback) {
  8007. /* LCB_CFG_LOOPBACK.VAL = 2 */
  8008. /* LCB_CFG_LANE_WIDTH.VAL = 0 */
  8009. write_csr(dd, DC_LCB_CFG_LOOPBACK,
  8010. IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
  8011. write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
  8012. }
  8013. /* start the LCBs */
  8014. /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
  8015. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
  8016. /* simulator only loopback steps */
  8017. if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  8018. /* LCB_CFG_RUN.EN = 1 */
  8019. write_csr(dd, DC_LCB_CFG_RUN,
  8020. 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
  8021. ret = wait_link_transfer_active(dd, 10);
  8022. if (ret)
  8023. return ret;
  8024. write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
  8025. 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
  8026. }
  8027. if (!loopback) {
  8028. /*
  8029. * When doing quick linkup and not in loopback, both
  8030. * sides must be done with LCB set-up before either
  8031. * starts the quick linkup. Put a delay here so that
  8032. * both sides can be started and have a chance to be
  8033. * done with LCB set up before resuming.
  8034. */
  8035. dd_dev_err(dd,
  8036. "Pausing for peer to be finished with LCB set up\n");
  8037. msleep(5000);
  8038. dd_dev_err(dd, "Continuing with quick linkup\n");
  8039. }
  8040. write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
  8041. set_8051_lcb_access(dd);
  8042. /*
  8043. * State "quick" LinkUp request sets the physical link state to
  8044. * LinkUp without a verify capability sequence.
  8045. * This state is in simulator v37 and later.
  8046. */
  8047. ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
  8048. if (ret != HCMD_SUCCESS) {
  8049. dd_dev_err(dd,
  8050. "%s: set physical link state to quick LinkUp failed with return %d\n",
  8051. __func__, ret);
  8052. set_host_lcb_access(dd);
  8053. write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
  8054. if (ret >= 0)
  8055. ret = -EINVAL;
  8056. return ret;
  8057. }
  8058. return 0; /* success */
  8059. }
  8060. /*
  8061. * Do all special steps to set up loopback.
  8062. */
  8063. static int init_loopback(struct hfi1_devdata *dd)
  8064. {
  8065. dd_dev_info(dd, "Entering loopback mode\n");
  8066. /* all loopbacks should disable self GUID check */
  8067. write_csr(dd, DC_DC8051_CFG_MODE,
  8068. (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
  8069. /*
  8070. * The simulator has only one loopback option - LCB. Switch
  8071. * to that option, which includes quick link up.
  8072. *
  8073. * Accept all valid loopback values.
  8074. */
  8075. if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
  8076. (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
  8077. loopback == LOOPBACK_CABLE)) {
  8078. loopback = LOOPBACK_LCB;
  8079. quick_linkup = 1;
  8080. return 0;
  8081. }
  8082. /*
  8083. * SerDes loopback init sequence is handled in set_local_link_attributes
  8084. */
  8085. if (loopback == LOOPBACK_SERDES)
  8086. return 0;
  8087. /* LCB loopback - handled at poll time */
  8088. if (loopback == LOOPBACK_LCB) {
  8089. quick_linkup = 1; /* LCB is always quick linkup */
  8090. /* not supported in emulation due to emulation RTL changes */
  8091. if (dd->icode == ICODE_FPGA_EMULATION) {
  8092. dd_dev_err(dd,
  8093. "LCB loopback not supported in emulation\n");
  8094. return -EINVAL;
  8095. }
  8096. return 0;
  8097. }
  8098. /* external cable loopback requires no extra steps */
  8099. if (loopback == LOOPBACK_CABLE)
  8100. return 0;
  8101. dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
  8102. return -EINVAL;
  8103. }
  8104. /*
  8105. * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
  8106. * used in the Verify Capability link width attribute.
  8107. */
  8108. static u16 opa_to_vc_link_widths(u16 opa_widths)
  8109. {
  8110. int i;
  8111. u16 result = 0;
  8112. static const struct link_bits {
  8113. u16 from;
  8114. u16 to;
  8115. } opa_link_xlate[] = {
  8116. { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
  8117. { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
  8118. { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
  8119. { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
  8120. };
  8121. for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
  8122. if (opa_widths & opa_link_xlate[i].from)
  8123. result |= opa_link_xlate[i].to;
  8124. }
  8125. return result;
  8126. }
  8127. /*
  8128. * Set link attributes before moving to polling.
  8129. */
  8130. static int set_local_link_attributes(struct hfi1_pportdata *ppd)
  8131. {
  8132. struct hfi1_devdata *dd = ppd->dd;
  8133. u8 enable_lane_tx;
  8134. u8 tx_polarity_inversion;
  8135. u8 rx_polarity_inversion;
  8136. int ret;
  8137. u32 misc_bits = 0;
  8138. /* reset our fabric serdes to clear any lingering problems */
  8139. fabric_serdes_reset(dd);
  8140. /* set the local tx rate - need to read-modify-write */
  8141. ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
  8142. &rx_polarity_inversion, &ppd->local_tx_rate);
  8143. if (ret)
  8144. goto set_local_link_attributes_fail;
  8145. if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
  8146. /* set the tx rate to the fastest enabled */
  8147. if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
  8148. ppd->local_tx_rate = 1;
  8149. else
  8150. ppd->local_tx_rate = 0;
  8151. } else {
  8152. /* set the tx rate to all enabled */
  8153. ppd->local_tx_rate = 0;
  8154. if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
  8155. ppd->local_tx_rate |= 2;
  8156. if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
  8157. ppd->local_tx_rate |= 1;
  8158. }
  8159. enable_lane_tx = 0xF; /* enable all four lanes */
  8160. ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
  8161. rx_polarity_inversion, ppd->local_tx_rate);
  8162. if (ret != HCMD_SUCCESS)
  8163. goto set_local_link_attributes_fail;
  8164. ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
  8165. if (ret != HCMD_SUCCESS) {
  8166. dd_dev_err(dd,
  8167. "Failed to set host interface version, return 0x%x\n",
  8168. ret);
  8169. goto set_local_link_attributes_fail;
  8170. }
  8171. /*
  8172. * DC supports continuous updates.
  8173. */
  8174. ret = write_vc_local_phy(dd,
  8175. 0 /* no power management */,
  8176. 1 /* continuous updates */);
  8177. if (ret != HCMD_SUCCESS)
  8178. goto set_local_link_attributes_fail;
  8179. /* z=1 in the next call: AU of 0 is not supported by the hardware */
  8180. ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
  8181. ppd->port_crc_mode_enabled);
  8182. if (ret != HCMD_SUCCESS)
  8183. goto set_local_link_attributes_fail;
  8184. /*
  8185. * SerDes loopback init sequence requires
  8186. * setting bit 0 of MISC_CONFIG_BITS
  8187. */
  8188. if (loopback == LOOPBACK_SERDES)
  8189. misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
  8190. /*
  8191. * An external device configuration request is used to reset the LCB
  8192. * to retry to obtain operational lanes when the first attempt is
  8193. * unsuccesful.
  8194. */
  8195. if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
  8196. misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
  8197. ret = write_vc_local_link_mode(dd, misc_bits, 0,
  8198. opa_to_vc_link_widths(
  8199. ppd->link_width_enabled));
  8200. if (ret != HCMD_SUCCESS)
  8201. goto set_local_link_attributes_fail;
  8202. /* let peer know who we are */
  8203. ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
  8204. if (ret == HCMD_SUCCESS)
  8205. return 0;
  8206. set_local_link_attributes_fail:
  8207. dd_dev_err(dd,
  8208. "Failed to set local link attributes, return 0x%x\n",
  8209. ret);
  8210. return ret;
  8211. }
  8212. /*
  8213. * Call this to start the link.
  8214. * Do not do anything if the link is disabled.
  8215. * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
  8216. */
  8217. int start_link(struct hfi1_pportdata *ppd)
  8218. {
  8219. /*
  8220. * Tune the SerDes to a ballpark setting for optimal signal and bit
  8221. * error rate. Needs to be done before starting the link.
  8222. */
  8223. tune_serdes(ppd);
  8224. if (!ppd->driver_link_ready) {
  8225. dd_dev_info(ppd->dd,
  8226. "%s: stopping link start because driver is not ready\n",
  8227. __func__);
  8228. return 0;
  8229. }
  8230. /*
  8231. * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
  8232. * pkey table can be configured properly if the HFI unit is connected
  8233. * to switch port with MgmtAllowed=NO
  8234. */
  8235. clear_full_mgmt_pkey(ppd);
  8236. return set_link_state(ppd, HLS_DN_POLL);
  8237. }
  8238. static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
  8239. {
  8240. struct hfi1_devdata *dd = ppd->dd;
  8241. u64 mask;
  8242. unsigned long timeout;
  8243. /*
  8244. * Some QSFP cables have a quirk that asserts the IntN line as a side
  8245. * effect of power up on plug-in. We ignore this false positive
  8246. * interrupt until the module has finished powering up by waiting for
  8247. * a minimum timeout of the module inrush initialization time of
  8248. * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
  8249. * module have stabilized.
  8250. */
  8251. msleep(500);
  8252. /*
  8253. * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
  8254. */
  8255. timeout = jiffies + msecs_to_jiffies(2000);
  8256. while (1) {
  8257. mask = read_csr(dd, dd->hfi1_id ?
  8258. ASIC_QSFP2_IN : ASIC_QSFP1_IN);
  8259. if (!(mask & QSFP_HFI0_INT_N))
  8260. break;
  8261. if (time_after(jiffies, timeout)) {
  8262. dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
  8263. __func__);
  8264. break;
  8265. }
  8266. udelay(2);
  8267. }
  8268. }
  8269. static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
  8270. {
  8271. struct hfi1_devdata *dd = ppd->dd;
  8272. u64 mask;
  8273. mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
  8274. if (enable) {
  8275. /*
  8276. * Clear the status register to avoid an immediate interrupt
  8277. * when we re-enable the IntN pin
  8278. */
  8279. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
  8280. QSFP_HFI0_INT_N);
  8281. mask |= (u64)QSFP_HFI0_INT_N;
  8282. } else {
  8283. mask &= ~(u64)QSFP_HFI0_INT_N;
  8284. }
  8285. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
  8286. }
  8287. int reset_qsfp(struct hfi1_pportdata *ppd)
  8288. {
  8289. struct hfi1_devdata *dd = ppd->dd;
  8290. u64 mask, qsfp_mask;
  8291. /* Disable INT_N from triggering QSFP interrupts */
  8292. set_qsfp_int_n(ppd, 0);
  8293. /* Reset the QSFP */
  8294. mask = (u64)QSFP_HFI0_RESET_N;
  8295. qsfp_mask = read_csr(dd,
  8296. dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
  8297. qsfp_mask &= ~mask;
  8298. write_csr(dd,
  8299. dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
  8300. udelay(10);
  8301. qsfp_mask |= mask;
  8302. write_csr(dd,
  8303. dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
  8304. wait_for_qsfp_init(ppd);
  8305. /*
  8306. * Allow INT_N to trigger the QSFP interrupt to watch
  8307. * for alarms and warnings
  8308. */
  8309. set_qsfp_int_n(ppd, 1);
  8310. /*
  8311. * After the reset, AOC transmitters are enabled by default. They need
  8312. * to be turned off to complete the QSFP setup before they can be
  8313. * enabled again.
  8314. */
  8315. return set_qsfp_tx(ppd, 0);
  8316. }
  8317. static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
  8318. u8 *qsfp_interrupt_status)
  8319. {
  8320. struct hfi1_devdata *dd = ppd->dd;
  8321. if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
  8322. (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
  8323. dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
  8324. __func__);
  8325. if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
  8326. (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
  8327. dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
  8328. __func__);
  8329. /*
  8330. * The remaining alarms/warnings don't matter if the link is down.
  8331. */
  8332. if (ppd->host_link_state & HLS_DOWN)
  8333. return 0;
  8334. if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
  8335. (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
  8336. dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
  8337. __func__);
  8338. if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
  8339. (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
  8340. dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
  8341. __func__);
  8342. /* Byte 2 is vendor specific */
  8343. if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
  8344. (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
  8345. dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
  8346. __func__);
  8347. if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
  8348. (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
  8349. dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
  8350. __func__);
  8351. if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
  8352. (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
  8353. dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
  8354. __func__);
  8355. if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
  8356. (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
  8357. dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
  8358. __func__);
  8359. if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
  8360. (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
  8361. dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
  8362. __func__);
  8363. if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
  8364. (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
  8365. dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
  8366. __func__);
  8367. if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
  8368. (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
  8369. dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
  8370. __func__);
  8371. if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
  8372. (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
  8373. dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
  8374. __func__);
  8375. if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
  8376. (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
  8377. dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
  8378. __func__);
  8379. if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
  8380. (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
  8381. dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
  8382. __func__);
  8383. if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
  8384. (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
  8385. dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
  8386. __func__);
  8387. if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
  8388. (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
  8389. dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
  8390. __func__);
  8391. /* Bytes 9-10 and 11-12 are reserved */
  8392. /* Bytes 13-15 are vendor specific */
  8393. return 0;
  8394. }
  8395. /* This routine will only be scheduled if the QSFP module present is asserted */
  8396. void qsfp_event(struct work_struct *work)
  8397. {
  8398. struct qsfp_data *qd;
  8399. struct hfi1_pportdata *ppd;
  8400. struct hfi1_devdata *dd;
  8401. qd = container_of(work, struct qsfp_data, qsfp_work);
  8402. ppd = qd->ppd;
  8403. dd = ppd->dd;
  8404. /* Sanity check */
  8405. if (!qsfp_mod_present(ppd))
  8406. return;
  8407. if (ppd->host_link_state == HLS_DN_DISABLE) {
  8408. dd_dev_info(ppd->dd,
  8409. "%s: stopping link start because link is disabled\n",
  8410. __func__);
  8411. return;
  8412. }
  8413. /*
  8414. * Turn DC back on after cable has been re-inserted. Up until
  8415. * now, the DC has been in reset to save power.
  8416. */
  8417. dc_start(dd);
  8418. if (qd->cache_refresh_required) {
  8419. set_qsfp_int_n(ppd, 0);
  8420. wait_for_qsfp_init(ppd);
  8421. /*
  8422. * Allow INT_N to trigger the QSFP interrupt to watch
  8423. * for alarms and warnings
  8424. */
  8425. set_qsfp_int_n(ppd, 1);
  8426. start_link(ppd);
  8427. }
  8428. if (qd->check_interrupt_flags) {
  8429. u8 qsfp_interrupt_status[16] = {0,};
  8430. if (one_qsfp_read(ppd, dd->hfi1_id, 6,
  8431. &qsfp_interrupt_status[0], 16) != 16) {
  8432. dd_dev_info(dd,
  8433. "%s: Failed to read status of QSFP module\n",
  8434. __func__);
  8435. } else {
  8436. unsigned long flags;
  8437. handle_qsfp_error_conditions(
  8438. ppd, qsfp_interrupt_status);
  8439. spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
  8440. ppd->qsfp_info.check_interrupt_flags = 0;
  8441. spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
  8442. flags);
  8443. }
  8444. }
  8445. }
  8446. static void init_qsfp_int(struct hfi1_devdata *dd)
  8447. {
  8448. struct hfi1_pportdata *ppd = dd->pport;
  8449. u64 qsfp_mask, cce_int_mask;
  8450. const int qsfp1_int_smask = QSFP1_INT % 64;
  8451. const int qsfp2_int_smask = QSFP2_INT % 64;
  8452. /*
  8453. * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
  8454. * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
  8455. * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
  8456. * the index of the appropriate CSR in the CCEIntMask CSR array
  8457. */
  8458. cce_int_mask = read_csr(dd, CCE_INT_MASK +
  8459. (8 * (QSFP1_INT / 64)));
  8460. if (dd->hfi1_id) {
  8461. cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
  8462. write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
  8463. cce_int_mask);
  8464. } else {
  8465. cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
  8466. write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
  8467. cce_int_mask);
  8468. }
  8469. qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
  8470. /* Clear current status to avoid spurious interrupts */
  8471. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
  8472. qsfp_mask);
  8473. write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
  8474. qsfp_mask);
  8475. set_qsfp_int_n(ppd, 0);
  8476. /* Handle active low nature of INT_N and MODPRST_N pins */
  8477. if (qsfp_mod_present(ppd))
  8478. qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
  8479. write_csr(dd,
  8480. dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
  8481. qsfp_mask);
  8482. }
  8483. /*
  8484. * Do a one-time initialize of the LCB block.
  8485. */
  8486. static void init_lcb(struct hfi1_devdata *dd)
  8487. {
  8488. /* simulator does not correctly handle LCB cclk loopback, skip */
  8489. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  8490. return;
  8491. /* the DC has been reset earlier in the driver load */
  8492. /* set LCB for cclk loopback on the port */
  8493. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
  8494. write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
  8495. write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
  8496. write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
  8497. write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
  8498. write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
  8499. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
  8500. }
  8501. /*
  8502. * Perform a test read on the QSFP. Return 0 on success, -ERRNO
  8503. * on error.
  8504. */
  8505. static int test_qsfp_read(struct hfi1_pportdata *ppd)
  8506. {
  8507. int ret;
  8508. u8 status;
  8509. /*
  8510. * Report success if not a QSFP or, if it is a QSFP, but the cable is
  8511. * not present
  8512. */
  8513. if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
  8514. return 0;
  8515. /* read byte 2, the status byte */
  8516. ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
  8517. if (ret < 0)
  8518. return ret;
  8519. if (ret != 1)
  8520. return -EIO;
  8521. return 0; /* success */
  8522. }
  8523. /*
  8524. * Values for QSFP retry.
  8525. *
  8526. * Give up after 10s (20 x 500ms). The overall timeout was empirically
  8527. * arrived at from experience on a large cluster.
  8528. */
  8529. #define MAX_QSFP_RETRIES 20
  8530. #define QSFP_RETRY_WAIT 500 /* msec */
  8531. /*
  8532. * Try a QSFP read. If it fails, schedule a retry for later.
  8533. * Called on first link activation after driver load.
  8534. */
  8535. static void try_start_link(struct hfi1_pportdata *ppd)
  8536. {
  8537. if (test_qsfp_read(ppd)) {
  8538. /* read failed */
  8539. if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
  8540. dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
  8541. return;
  8542. }
  8543. dd_dev_info(ppd->dd,
  8544. "QSFP not responding, waiting and retrying %d\n",
  8545. (int)ppd->qsfp_retry_count);
  8546. ppd->qsfp_retry_count++;
  8547. queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
  8548. msecs_to_jiffies(QSFP_RETRY_WAIT));
  8549. return;
  8550. }
  8551. ppd->qsfp_retry_count = 0;
  8552. start_link(ppd);
  8553. }
  8554. /*
  8555. * Workqueue function to start the link after a delay.
  8556. */
  8557. void handle_start_link(struct work_struct *work)
  8558. {
  8559. struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
  8560. start_link_work.work);
  8561. try_start_link(ppd);
  8562. }
  8563. int bringup_serdes(struct hfi1_pportdata *ppd)
  8564. {
  8565. struct hfi1_devdata *dd = ppd->dd;
  8566. u64 guid;
  8567. int ret;
  8568. if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
  8569. add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
  8570. guid = ppd->guids[HFI1_PORT_GUID_INDEX];
  8571. if (!guid) {
  8572. if (dd->base_guid)
  8573. guid = dd->base_guid + ppd->port - 1;
  8574. ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
  8575. }
  8576. /* Set linkinit_reason on power up per OPA spec */
  8577. ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
  8578. /* one-time init of the LCB */
  8579. init_lcb(dd);
  8580. if (loopback) {
  8581. ret = init_loopback(dd);
  8582. if (ret < 0)
  8583. return ret;
  8584. }
  8585. get_port_type(ppd);
  8586. if (ppd->port_type == PORT_TYPE_QSFP) {
  8587. set_qsfp_int_n(ppd, 0);
  8588. wait_for_qsfp_init(ppd);
  8589. set_qsfp_int_n(ppd, 1);
  8590. }
  8591. try_start_link(ppd);
  8592. return 0;
  8593. }
  8594. void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
  8595. {
  8596. struct hfi1_devdata *dd = ppd->dd;
  8597. /*
  8598. * Shut down the link and keep it down. First turn off that the
  8599. * driver wants to allow the link to be up (driver_link_ready).
  8600. * Then make sure the link is not automatically restarted
  8601. * (link_enabled). Cancel any pending restart. And finally
  8602. * go offline.
  8603. */
  8604. ppd->driver_link_ready = 0;
  8605. ppd->link_enabled = 0;
  8606. ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
  8607. flush_delayed_work(&ppd->start_link_work);
  8608. cancel_delayed_work_sync(&ppd->start_link_work);
  8609. ppd->offline_disabled_reason =
  8610. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
  8611. set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
  8612. OPA_LINKDOWN_REASON_REBOOT);
  8613. set_link_state(ppd, HLS_DN_OFFLINE);
  8614. /* disable the port */
  8615. clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  8616. cancel_work_sync(&ppd->freeze_work);
  8617. }
  8618. static inline int init_cpu_counters(struct hfi1_devdata *dd)
  8619. {
  8620. struct hfi1_pportdata *ppd;
  8621. int i;
  8622. ppd = (struct hfi1_pportdata *)(dd + 1);
  8623. for (i = 0; i < dd->num_pports; i++, ppd++) {
  8624. ppd->ibport_data.rvp.rc_acks = NULL;
  8625. ppd->ibport_data.rvp.rc_qacks = NULL;
  8626. ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
  8627. ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
  8628. ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
  8629. if (!ppd->ibport_data.rvp.rc_acks ||
  8630. !ppd->ibport_data.rvp.rc_delayed_comp ||
  8631. !ppd->ibport_data.rvp.rc_qacks)
  8632. return -ENOMEM;
  8633. }
  8634. return 0;
  8635. }
  8636. /*
  8637. * index is the index into the receive array
  8638. */
  8639. void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
  8640. u32 type, unsigned long pa, u16 order)
  8641. {
  8642. u64 reg;
  8643. if (!(dd->flags & HFI1_PRESENT))
  8644. goto done;
  8645. if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
  8646. pa = 0;
  8647. order = 0;
  8648. } else if (type > PT_INVALID) {
  8649. dd_dev_err(dd,
  8650. "unexpected receive array type %u for index %u, not handled\n",
  8651. type, index);
  8652. goto done;
  8653. }
  8654. trace_hfi1_put_tid(dd, index, type, pa, order);
  8655. #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
  8656. reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
  8657. | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
  8658. | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
  8659. << RCV_ARRAY_RT_ADDR_SHIFT;
  8660. trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
  8661. writeq(reg, dd->rcvarray_wc + (index * 8));
  8662. if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
  8663. /*
  8664. * Eager entries are written and flushed
  8665. *
  8666. * Expected entries are flushed every 4 writes
  8667. */
  8668. flush_wc();
  8669. done:
  8670. return;
  8671. }
  8672. void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
  8673. {
  8674. struct hfi1_devdata *dd = rcd->dd;
  8675. u32 i;
  8676. /* this could be optimized */
  8677. for (i = rcd->eager_base; i < rcd->eager_base +
  8678. rcd->egrbufs.alloced; i++)
  8679. hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
  8680. for (i = rcd->expected_base;
  8681. i < rcd->expected_base + rcd->expected_count; i++)
  8682. hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
  8683. }
  8684. static const char * const ib_cfg_name_strings[] = {
  8685. "HFI1_IB_CFG_LIDLMC",
  8686. "HFI1_IB_CFG_LWID_DG_ENB",
  8687. "HFI1_IB_CFG_LWID_ENB",
  8688. "HFI1_IB_CFG_LWID",
  8689. "HFI1_IB_CFG_SPD_ENB",
  8690. "HFI1_IB_CFG_SPD",
  8691. "HFI1_IB_CFG_RXPOL_ENB",
  8692. "HFI1_IB_CFG_LREV_ENB",
  8693. "HFI1_IB_CFG_LINKLATENCY",
  8694. "HFI1_IB_CFG_HRTBT",
  8695. "HFI1_IB_CFG_OP_VLS",
  8696. "HFI1_IB_CFG_VL_HIGH_CAP",
  8697. "HFI1_IB_CFG_VL_LOW_CAP",
  8698. "HFI1_IB_CFG_OVERRUN_THRESH",
  8699. "HFI1_IB_CFG_PHYERR_THRESH",
  8700. "HFI1_IB_CFG_LINKDEFAULT",
  8701. "HFI1_IB_CFG_PKEYS",
  8702. "HFI1_IB_CFG_MTU",
  8703. "HFI1_IB_CFG_LSTATE",
  8704. "HFI1_IB_CFG_VL_HIGH_LIMIT",
  8705. "HFI1_IB_CFG_PMA_TICKS",
  8706. "HFI1_IB_CFG_PORT"
  8707. };
  8708. static const char *ib_cfg_name(int which)
  8709. {
  8710. if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
  8711. return "invalid";
  8712. return ib_cfg_name_strings[which];
  8713. }
  8714. int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
  8715. {
  8716. struct hfi1_devdata *dd = ppd->dd;
  8717. int val = 0;
  8718. switch (which) {
  8719. case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
  8720. val = ppd->link_width_enabled;
  8721. break;
  8722. case HFI1_IB_CFG_LWID: /* currently active Link-width */
  8723. val = ppd->link_width_active;
  8724. break;
  8725. case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
  8726. val = ppd->link_speed_enabled;
  8727. break;
  8728. case HFI1_IB_CFG_SPD: /* current Link speed */
  8729. val = ppd->link_speed_active;
  8730. break;
  8731. case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
  8732. case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
  8733. case HFI1_IB_CFG_LINKLATENCY:
  8734. goto unimplemented;
  8735. case HFI1_IB_CFG_OP_VLS:
  8736. val = ppd->actual_vls_operational;
  8737. break;
  8738. case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
  8739. val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
  8740. break;
  8741. case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
  8742. val = VL_ARB_LOW_PRIO_TABLE_SIZE;
  8743. break;
  8744. case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  8745. val = ppd->overrun_threshold;
  8746. break;
  8747. case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  8748. val = ppd->phy_error_threshold;
  8749. break;
  8750. case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  8751. val = HLS_DEFAULT;
  8752. break;
  8753. case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
  8754. case HFI1_IB_CFG_PMA_TICKS:
  8755. default:
  8756. unimplemented:
  8757. if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
  8758. dd_dev_info(
  8759. dd,
  8760. "%s: which %s: not implemented\n",
  8761. __func__,
  8762. ib_cfg_name(which));
  8763. break;
  8764. }
  8765. return val;
  8766. }
  8767. /*
  8768. * The largest MAD packet size.
  8769. */
  8770. #define MAX_MAD_PACKET 2048
  8771. /*
  8772. * Return the maximum header bytes that can go on the _wire_
  8773. * for this device. This count includes the ICRC which is
  8774. * not part of the packet held in memory but it is appended
  8775. * by the HW.
  8776. * This is dependent on the device's receive header entry size.
  8777. * HFI allows this to be set per-receive context, but the
  8778. * driver presently enforces a global value.
  8779. */
  8780. u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
  8781. {
  8782. /*
  8783. * The maximum non-payload (MTU) bytes in LRH.PktLen are
  8784. * the Receive Header Entry Size minus the PBC (or RHF) size
  8785. * plus one DW for the ICRC appended by HW.
  8786. *
  8787. * dd->rcd[0].rcvhdrqentsize is in DW.
  8788. * We use rcd[0] as all context will have the same value. Also,
  8789. * the first kernel context would have been allocated by now so
  8790. * we are guaranteed a valid value.
  8791. */
  8792. return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
  8793. }
  8794. /*
  8795. * Set Send Length
  8796. * @ppd - per port data
  8797. *
  8798. * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
  8799. * registers compare against LRH.PktLen, so use the max bytes included
  8800. * in the LRH.
  8801. *
  8802. * This routine changes all VL values except VL15, which it maintains at
  8803. * the same value.
  8804. */
  8805. static void set_send_length(struct hfi1_pportdata *ppd)
  8806. {
  8807. struct hfi1_devdata *dd = ppd->dd;
  8808. u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
  8809. u32 maxvlmtu = dd->vld[15].mtu;
  8810. u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
  8811. & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
  8812. SEND_LEN_CHECK1_LEN_VL15_SHIFT;
  8813. int i, j;
  8814. u32 thres;
  8815. for (i = 0; i < ppd->vls_supported; i++) {
  8816. if (dd->vld[i].mtu > maxvlmtu)
  8817. maxvlmtu = dd->vld[i].mtu;
  8818. if (i <= 3)
  8819. len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
  8820. & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
  8821. ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
  8822. else
  8823. len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
  8824. & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
  8825. ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
  8826. }
  8827. write_csr(dd, SEND_LEN_CHECK0, len1);
  8828. write_csr(dd, SEND_LEN_CHECK1, len2);
  8829. /* adjust kernel credit return thresholds based on new MTUs */
  8830. /* all kernel receive contexts have the same hdrqentsize */
  8831. for (i = 0; i < ppd->vls_supported; i++) {
  8832. thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
  8833. sc_mtu_to_threshold(dd->vld[i].sc,
  8834. dd->vld[i].mtu,
  8835. dd->rcd[0]->rcvhdrqentsize));
  8836. for (j = 0; j < INIT_SC_PER_VL; j++)
  8837. sc_set_cr_threshold(
  8838. pio_select_send_context_vl(dd, j, i),
  8839. thres);
  8840. }
  8841. thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
  8842. sc_mtu_to_threshold(dd->vld[15].sc,
  8843. dd->vld[15].mtu,
  8844. dd->rcd[0]->rcvhdrqentsize));
  8845. sc_set_cr_threshold(dd->vld[15].sc, thres);
  8846. /* Adjust maximum MTU for the port in DC */
  8847. dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
  8848. (ilog2(maxvlmtu >> 8) + 1);
  8849. len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
  8850. len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
  8851. len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
  8852. DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
  8853. write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
  8854. }
  8855. static void set_lidlmc(struct hfi1_pportdata *ppd)
  8856. {
  8857. int i;
  8858. u64 sreg = 0;
  8859. struct hfi1_devdata *dd = ppd->dd;
  8860. u32 mask = ~((1U << ppd->lmc) - 1);
  8861. u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
  8862. u32 lid;
  8863. /*
  8864. * Program 0 in CSR if port lid is extended. This prevents
  8865. * 9B packets being sent out for large lids.
  8866. */
  8867. lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
  8868. c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
  8869. | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
  8870. c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
  8871. << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
  8872. ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
  8873. << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
  8874. write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
  8875. /*
  8876. * Iterate over all the send contexts and set their SLID check
  8877. */
  8878. sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
  8879. SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
  8880. (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
  8881. SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
  8882. for (i = 0; i < chip_send_contexts(dd); i++) {
  8883. hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
  8884. i, (u32)sreg);
  8885. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
  8886. }
  8887. /* Now we have to do the same thing for the sdma engines */
  8888. sdma_update_lmc(dd, mask, lid);
  8889. }
  8890. static const char *state_completed_string(u32 completed)
  8891. {
  8892. static const char * const state_completed[] = {
  8893. "EstablishComm",
  8894. "OptimizeEQ",
  8895. "VerifyCap"
  8896. };
  8897. if (completed < ARRAY_SIZE(state_completed))
  8898. return state_completed[completed];
  8899. return "unknown";
  8900. }
  8901. static const char all_lanes_dead_timeout_expired[] =
  8902. "All lanes were inactive – was the interconnect media removed?";
  8903. static const char tx_out_of_policy[] =
  8904. "Passing lanes on local port do not meet the local link width policy";
  8905. static const char no_state_complete[] =
  8906. "State timeout occurred before link partner completed the state";
  8907. static const char * const state_complete_reasons[] = {
  8908. [0x00] = "Reason unknown",
  8909. [0x01] = "Link was halted by driver, refer to LinkDownReason",
  8910. [0x02] = "Link partner reported failure",
  8911. [0x10] = "Unable to achieve frame sync on any lane",
  8912. [0x11] =
  8913. "Unable to find a common bit rate with the link partner",
  8914. [0x12] =
  8915. "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
  8916. [0x13] =
  8917. "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
  8918. [0x14] = no_state_complete,
  8919. [0x15] =
  8920. "State timeout occurred before link partner identified equalization presets",
  8921. [0x16] =
  8922. "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
  8923. [0x17] = tx_out_of_policy,
  8924. [0x20] = all_lanes_dead_timeout_expired,
  8925. [0x21] =
  8926. "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
  8927. [0x22] = no_state_complete,
  8928. [0x23] =
  8929. "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
  8930. [0x24] = tx_out_of_policy,
  8931. [0x30] = all_lanes_dead_timeout_expired,
  8932. [0x31] =
  8933. "State timeout occurred waiting for host to process received frames",
  8934. [0x32] = no_state_complete,
  8935. [0x33] =
  8936. "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
  8937. [0x34] = tx_out_of_policy,
  8938. [0x35] = "Negotiated link width is mutually exclusive",
  8939. [0x36] =
  8940. "Timed out before receiving verifycap frames in VerifyCap.Exchange",
  8941. [0x37] = "Unable to resolve secure data exchange",
  8942. };
  8943. static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
  8944. u32 code)
  8945. {
  8946. const char *str = NULL;
  8947. if (code < ARRAY_SIZE(state_complete_reasons))
  8948. str = state_complete_reasons[code];
  8949. if (str)
  8950. return str;
  8951. return "Reserved";
  8952. }
  8953. /* describe the given last state complete frame */
  8954. static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
  8955. const char *prefix)
  8956. {
  8957. struct hfi1_devdata *dd = ppd->dd;
  8958. u32 success;
  8959. u32 state;
  8960. u32 reason;
  8961. u32 lanes;
  8962. /*
  8963. * Decode frame:
  8964. * [ 0: 0] - success
  8965. * [ 3: 1] - state
  8966. * [ 7: 4] - next state timeout
  8967. * [15: 8] - reason code
  8968. * [31:16] - lanes
  8969. */
  8970. success = frame & 0x1;
  8971. state = (frame >> 1) & 0x7;
  8972. reason = (frame >> 8) & 0xff;
  8973. lanes = (frame >> 16) & 0xffff;
  8974. dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
  8975. prefix, frame);
  8976. dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
  8977. state_completed_string(state), state);
  8978. dd_dev_err(dd, " state successfully completed: %s\n",
  8979. success ? "yes" : "no");
  8980. dd_dev_err(dd, " fail reason 0x%x: %s\n",
  8981. reason, state_complete_reason_code_string(ppd, reason));
  8982. dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
  8983. }
  8984. /*
  8985. * Read the last state complete frames and explain them. This routine
  8986. * expects to be called if the link went down during link negotiation
  8987. * and initialization (LNI). That is, anywhere between polling and link up.
  8988. */
  8989. static void check_lni_states(struct hfi1_pportdata *ppd)
  8990. {
  8991. u32 last_local_state;
  8992. u32 last_remote_state;
  8993. read_last_local_state(ppd->dd, &last_local_state);
  8994. read_last_remote_state(ppd->dd, &last_remote_state);
  8995. /*
  8996. * Don't report anything if there is nothing to report. A value of
  8997. * 0 means the link was taken down while polling and there was no
  8998. * training in-process.
  8999. */
  9000. if (last_local_state == 0 && last_remote_state == 0)
  9001. return;
  9002. decode_state_complete(ppd, last_local_state, "transmitted");
  9003. decode_state_complete(ppd, last_remote_state, "received");
  9004. }
  9005. /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
  9006. static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
  9007. {
  9008. u64 reg;
  9009. unsigned long timeout;
  9010. /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
  9011. timeout = jiffies + msecs_to_jiffies(wait_ms);
  9012. while (1) {
  9013. reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
  9014. if (reg)
  9015. break;
  9016. if (time_after(jiffies, timeout)) {
  9017. dd_dev_err(dd,
  9018. "timeout waiting for LINK_TRANSFER_ACTIVE\n");
  9019. return -ETIMEDOUT;
  9020. }
  9021. udelay(2);
  9022. }
  9023. return 0;
  9024. }
  9025. /* called when the logical link state is not down as it should be */
  9026. static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
  9027. {
  9028. struct hfi1_devdata *dd = ppd->dd;
  9029. /*
  9030. * Bring link up in LCB loopback
  9031. */
  9032. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
  9033. write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
  9034. DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
  9035. write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
  9036. write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
  9037. write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
  9038. write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
  9039. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
  9040. (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
  9041. udelay(3);
  9042. write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
  9043. write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
  9044. wait_link_transfer_active(dd, 100);
  9045. /*
  9046. * Bring the link down again.
  9047. */
  9048. write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
  9049. write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
  9050. write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
  9051. dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
  9052. }
  9053. /*
  9054. * Helper for set_link_state(). Do not call except from that routine.
  9055. * Expects ppd->hls_mutex to be held.
  9056. *
  9057. * @rem_reason value to be sent to the neighbor
  9058. *
  9059. * LinkDownReasons only set if transition succeeds.
  9060. */
  9061. static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
  9062. {
  9063. struct hfi1_devdata *dd = ppd->dd;
  9064. u32 previous_state;
  9065. int offline_state_ret;
  9066. int ret;
  9067. update_lcb_cache(dd);
  9068. previous_state = ppd->host_link_state;
  9069. ppd->host_link_state = HLS_GOING_OFFLINE;
  9070. /* start offline transition */
  9071. ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
  9072. if (ret != HCMD_SUCCESS) {
  9073. dd_dev_err(dd,
  9074. "Failed to transition to Offline link state, return %d\n",
  9075. ret);
  9076. return -EINVAL;
  9077. }
  9078. if (ppd->offline_disabled_reason ==
  9079. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
  9080. ppd->offline_disabled_reason =
  9081. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
  9082. offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
  9083. if (offline_state_ret < 0)
  9084. return offline_state_ret;
  9085. /* Disabling AOC transmitters */
  9086. if (ppd->port_type == PORT_TYPE_QSFP &&
  9087. ppd->qsfp_info.limiting_active &&
  9088. qsfp_mod_present(ppd)) {
  9089. int ret;
  9090. ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
  9091. if (ret == 0) {
  9092. set_qsfp_tx(ppd, 0);
  9093. release_chip_resource(dd, qsfp_resource(dd));
  9094. } else {
  9095. /* not fatal, but should warn */
  9096. dd_dev_err(dd,
  9097. "Unable to acquire lock to turn off QSFP TX\n");
  9098. }
  9099. }
  9100. /*
  9101. * Wait for the offline.Quiet transition if it hasn't happened yet. It
  9102. * can take a while for the link to go down.
  9103. */
  9104. if (offline_state_ret != PLS_OFFLINE_QUIET) {
  9105. ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
  9106. if (ret < 0)
  9107. return ret;
  9108. }
  9109. /*
  9110. * Now in charge of LCB - must be after the physical state is
  9111. * offline.quiet and before host_link_state is changed.
  9112. */
  9113. set_host_lcb_access(dd);
  9114. write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
  9115. /* make sure the logical state is also down */
  9116. ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
  9117. if (ret)
  9118. force_logical_link_state_down(ppd);
  9119. ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
  9120. update_statusp(ppd, IB_PORT_DOWN);
  9121. /*
  9122. * The LNI has a mandatory wait time after the physical state
  9123. * moves to Offline.Quiet. The wait time may be different
  9124. * depending on how the link went down. The 8051 firmware
  9125. * will observe the needed wait time and only move to ready
  9126. * when that is completed. The largest of the quiet timeouts
  9127. * is 6s, so wait that long and then at least 0.5s more for
  9128. * other transitions, and another 0.5s for a buffer.
  9129. */
  9130. ret = wait_fm_ready(dd, 7000);
  9131. if (ret) {
  9132. dd_dev_err(dd,
  9133. "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
  9134. /* state is really offline, so make it so */
  9135. ppd->host_link_state = HLS_DN_OFFLINE;
  9136. return ret;
  9137. }
  9138. /*
  9139. * The state is now offline and the 8051 is ready to accept host
  9140. * requests.
  9141. * - change our state
  9142. * - notify others if we were previously in a linkup state
  9143. */
  9144. ppd->host_link_state = HLS_DN_OFFLINE;
  9145. if (previous_state & HLS_UP) {
  9146. /* went down while link was up */
  9147. handle_linkup_change(dd, 0);
  9148. } else if (previous_state
  9149. & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
  9150. /* went down while attempting link up */
  9151. check_lni_states(ppd);
  9152. /* The QSFP doesn't need to be reset on LNI failure */
  9153. ppd->qsfp_info.reset_needed = 0;
  9154. }
  9155. /* the active link width (downgrade) is 0 on link down */
  9156. ppd->link_width_active = 0;
  9157. ppd->link_width_downgrade_tx_active = 0;
  9158. ppd->link_width_downgrade_rx_active = 0;
  9159. ppd->current_egress_rate = 0;
  9160. return 0;
  9161. }
  9162. /* return the link state name */
  9163. static const char *link_state_name(u32 state)
  9164. {
  9165. const char *name;
  9166. int n = ilog2(state);
  9167. static const char * const names[] = {
  9168. [__HLS_UP_INIT_BP] = "INIT",
  9169. [__HLS_UP_ARMED_BP] = "ARMED",
  9170. [__HLS_UP_ACTIVE_BP] = "ACTIVE",
  9171. [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
  9172. [__HLS_DN_POLL_BP] = "POLL",
  9173. [__HLS_DN_DISABLE_BP] = "DISABLE",
  9174. [__HLS_DN_OFFLINE_BP] = "OFFLINE",
  9175. [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
  9176. [__HLS_GOING_UP_BP] = "GOING_UP",
  9177. [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
  9178. [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
  9179. };
  9180. name = n < ARRAY_SIZE(names) ? names[n] : NULL;
  9181. return name ? name : "unknown";
  9182. }
  9183. /* return the link state reason name */
  9184. static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
  9185. {
  9186. if (state == HLS_UP_INIT) {
  9187. switch (ppd->linkinit_reason) {
  9188. case OPA_LINKINIT_REASON_LINKUP:
  9189. return "(LINKUP)";
  9190. case OPA_LINKINIT_REASON_FLAPPING:
  9191. return "(FLAPPING)";
  9192. case OPA_LINKINIT_OUTSIDE_POLICY:
  9193. return "(OUTSIDE_POLICY)";
  9194. case OPA_LINKINIT_QUARANTINED:
  9195. return "(QUARANTINED)";
  9196. case OPA_LINKINIT_INSUFIC_CAPABILITY:
  9197. return "(INSUFIC_CAPABILITY)";
  9198. default:
  9199. break;
  9200. }
  9201. }
  9202. return "";
  9203. }
  9204. /*
  9205. * driver_pstate - convert the driver's notion of a port's
  9206. * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
  9207. * Return -1 (converted to a u32) to indicate error.
  9208. */
  9209. u32 driver_pstate(struct hfi1_pportdata *ppd)
  9210. {
  9211. switch (ppd->host_link_state) {
  9212. case HLS_UP_INIT:
  9213. case HLS_UP_ARMED:
  9214. case HLS_UP_ACTIVE:
  9215. return IB_PORTPHYSSTATE_LINKUP;
  9216. case HLS_DN_POLL:
  9217. return IB_PORTPHYSSTATE_POLLING;
  9218. case HLS_DN_DISABLE:
  9219. return IB_PORTPHYSSTATE_DISABLED;
  9220. case HLS_DN_OFFLINE:
  9221. return OPA_PORTPHYSSTATE_OFFLINE;
  9222. case HLS_VERIFY_CAP:
  9223. return IB_PORTPHYSSTATE_TRAINING;
  9224. case HLS_GOING_UP:
  9225. return IB_PORTPHYSSTATE_TRAINING;
  9226. case HLS_GOING_OFFLINE:
  9227. return OPA_PORTPHYSSTATE_OFFLINE;
  9228. case HLS_LINK_COOLDOWN:
  9229. return OPA_PORTPHYSSTATE_OFFLINE;
  9230. case HLS_DN_DOWNDEF:
  9231. default:
  9232. dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
  9233. ppd->host_link_state);
  9234. return -1;
  9235. }
  9236. }
  9237. /*
  9238. * driver_lstate - convert the driver's notion of a port's
  9239. * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
  9240. * (converted to a u32) to indicate error.
  9241. */
  9242. u32 driver_lstate(struct hfi1_pportdata *ppd)
  9243. {
  9244. if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
  9245. return IB_PORT_DOWN;
  9246. switch (ppd->host_link_state & HLS_UP) {
  9247. case HLS_UP_INIT:
  9248. return IB_PORT_INIT;
  9249. case HLS_UP_ARMED:
  9250. return IB_PORT_ARMED;
  9251. case HLS_UP_ACTIVE:
  9252. return IB_PORT_ACTIVE;
  9253. default:
  9254. dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
  9255. ppd->host_link_state);
  9256. return -1;
  9257. }
  9258. }
  9259. void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
  9260. u8 neigh_reason, u8 rem_reason)
  9261. {
  9262. if (ppd->local_link_down_reason.latest == 0 &&
  9263. ppd->neigh_link_down_reason.latest == 0) {
  9264. ppd->local_link_down_reason.latest = lcl_reason;
  9265. ppd->neigh_link_down_reason.latest = neigh_reason;
  9266. ppd->remote_link_down_reason = rem_reason;
  9267. }
  9268. }
  9269. /**
  9270. * data_vls_operational() - Verify if data VL BCT credits and MTU
  9271. * are both set.
  9272. * @ppd: pointer to hfi1_pportdata structure
  9273. *
  9274. * Return: true - Ok, false -otherwise.
  9275. */
  9276. static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
  9277. {
  9278. int i;
  9279. u64 reg;
  9280. if (!ppd->actual_vls_operational)
  9281. return false;
  9282. for (i = 0; i < ppd->vls_supported; i++) {
  9283. reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
  9284. if ((reg && !ppd->dd->vld[i].mtu) ||
  9285. (!reg && ppd->dd->vld[i].mtu))
  9286. return false;
  9287. }
  9288. return true;
  9289. }
  9290. /*
  9291. * Change the physical and/or logical link state.
  9292. *
  9293. * Do not call this routine while inside an interrupt. It contains
  9294. * calls to routines that can take multiple seconds to finish.
  9295. *
  9296. * Returns 0 on success, -errno on failure.
  9297. */
  9298. int set_link_state(struct hfi1_pportdata *ppd, u32 state)
  9299. {
  9300. struct hfi1_devdata *dd = ppd->dd;
  9301. struct ib_event event = {.device = NULL};
  9302. int ret1, ret = 0;
  9303. int orig_new_state, poll_bounce;
  9304. mutex_lock(&ppd->hls_lock);
  9305. orig_new_state = state;
  9306. if (state == HLS_DN_DOWNDEF)
  9307. state = HLS_DEFAULT;
  9308. /* interpret poll -> poll as a link bounce */
  9309. poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
  9310. state == HLS_DN_POLL;
  9311. dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
  9312. link_state_name(ppd->host_link_state),
  9313. link_state_name(orig_new_state),
  9314. poll_bounce ? "(bounce) " : "",
  9315. link_state_reason_name(ppd, state));
  9316. /*
  9317. * If we're going to a (HLS_*) link state that implies the logical
  9318. * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
  9319. * reset is_sm_config_started to 0.
  9320. */
  9321. if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
  9322. ppd->is_sm_config_started = 0;
  9323. /*
  9324. * Do nothing if the states match. Let a poll to poll link bounce
  9325. * go through.
  9326. */
  9327. if (ppd->host_link_state == state && !poll_bounce)
  9328. goto done;
  9329. switch (state) {
  9330. case HLS_UP_INIT:
  9331. if (ppd->host_link_state == HLS_DN_POLL &&
  9332. (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
  9333. /*
  9334. * Quick link up jumps from polling to here.
  9335. *
  9336. * Whether in normal or loopback mode, the
  9337. * simulator jumps from polling to link up.
  9338. * Accept that here.
  9339. */
  9340. /* OK */
  9341. } else if (ppd->host_link_state != HLS_GOING_UP) {
  9342. goto unexpected;
  9343. }
  9344. /*
  9345. * Wait for Link_Up physical state.
  9346. * Physical and Logical states should already be
  9347. * be transitioned to LinkUp and LinkInit respectively.
  9348. */
  9349. ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
  9350. if (ret) {
  9351. dd_dev_err(dd,
  9352. "%s: physical state did not change to LINK-UP\n",
  9353. __func__);
  9354. break;
  9355. }
  9356. ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
  9357. if (ret) {
  9358. dd_dev_err(dd,
  9359. "%s: logical state did not change to INIT\n",
  9360. __func__);
  9361. break;
  9362. }
  9363. /* clear old transient LINKINIT_REASON code */
  9364. if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
  9365. ppd->linkinit_reason =
  9366. OPA_LINKINIT_REASON_LINKUP;
  9367. /* enable the port */
  9368. add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
  9369. handle_linkup_change(dd, 1);
  9370. pio_kernel_linkup(dd);
  9371. /*
  9372. * After link up, a new link width will have been set.
  9373. * Update the xmit counters with regards to the new
  9374. * link width.
  9375. */
  9376. update_xmit_counters(ppd, ppd->link_width_active);
  9377. ppd->host_link_state = HLS_UP_INIT;
  9378. update_statusp(ppd, IB_PORT_INIT);
  9379. break;
  9380. case HLS_UP_ARMED:
  9381. if (ppd->host_link_state != HLS_UP_INIT)
  9382. goto unexpected;
  9383. if (!data_vls_operational(ppd)) {
  9384. dd_dev_err(dd,
  9385. "%s: Invalid data VL credits or mtu\n",
  9386. __func__);
  9387. ret = -EINVAL;
  9388. break;
  9389. }
  9390. set_logical_state(dd, LSTATE_ARMED);
  9391. ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
  9392. if (ret) {
  9393. dd_dev_err(dd,
  9394. "%s: logical state did not change to ARMED\n",
  9395. __func__);
  9396. break;
  9397. }
  9398. ppd->host_link_state = HLS_UP_ARMED;
  9399. update_statusp(ppd, IB_PORT_ARMED);
  9400. /*
  9401. * The simulator does not currently implement SMA messages,
  9402. * so neighbor_normal is not set. Set it here when we first
  9403. * move to Armed.
  9404. */
  9405. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  9406. ppd->neighbor_normal = 1;
  9407. break;
  9408. case HLS_UP_ACTIVE:
  9409. if (ppd->host_link_state != HLS_UP_ARMED)
  9410. goto unexpected;
  9411. set_logical_state(dd, LSTATE_ACTIVE);
  9412. ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
  9413. if (ret) {
  9414. dd_dev_err(dd,
  9415. "%s: logical state did not change to ACTIVE\n",
  9416. __func__);
  9417. } else {
  9418. /* tell all engines to go running */
  9419. sdma_all_running(dd);
  9420. ppd->host_link_state = HLS_UP_ACTIVE;
  9421. update_statusp(ppd, IB_PORT_ACTIVE);
  9422. /* Signal the IB layer that the port has went active */
  9423. event.device = &dd->verbs_dev.rdi.ibdev;
  9424. event.element.port_num = ppd->port;
  9425. event.event = IB_EVENT_PORT_ACTIVE;
  9426. }
  9427. break;
  9428. case HLS_DN_POLL:
  9429. if ((ppd->host_link_state == HLS_DN_DISABLE ||
  9430. ppd->host_link_state == HLS_DN_OFFLINE) &&
  9431. dd->dc_shutdown)
  9432. dc_start(dd);
  9433. /* Hand LED control to the DC */
  9434. write_csr(dd, DCC_CFG_LED_CNTRL, 0);
  9435. if (ppd->host_link_state != HLS_DN_OFFLINE) {
  9436. u8 tmp = ppd->link_enabled;
  9437. ret = goto_offline(ppd, ppd->remote_link_down_reason);
  9438. if (ret) {
  9439. ppd->link_enabled = tmp;
  9440. break;
  9441. }
  9442. ppd->remote_link_down_reason = 0;
  9443. if (ppd->driver_link_ready)
  9444. ppd->link_enabled = 1;
  9445. }
  9446. set_all_slowpath(ppd->dd);
  9447. ret = set_local_link_attributes(ppd);
  9448. if (ret)
  9449. break;
  9450. ppd->port_error_action = 0;
  9451. if (quick_linkup) {
  9452. /* quick linkup does not go into polling */
  9453. ret = do_quick_linkup(dd);
  9454. } else {
  9455. ret1 = set_physical_link_state(dd, PLS_POLLING);
  9456. if (!ret1)
  9457. ret1 = wait_phys_link_out_of_offline(ppd,
  9458. 3000);
  9459. if (ret1 != HCMD_SUCCESS) {
  9460. dd_dev_err(dd,
  9461. "Failed to transition to Polling link state, return 0x%x\n",
  9462. ret1);
  9463. ret = -EINVAL;
  9464. }
  9465. }
  9466. /*
  9467. * Change the host link state after requesting DC8051 to
  9468. * change its physical state so that we can ignore any
  9469. * interrupt with stale LNI(XX) error, which will not be
  9470. * cleared until DC8051 transitions to Polling state.
  9471. */
  9472. ppd->host_link_state = HLS_DN_POLL;
  9473. ppd->offline_disabled_reason =
  9474. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
  9475. /*
  9476. * If an error occurred above, go back to offline. The
  9477. * caller may reschedule another attempt.
  9478. */
  9479. if (ret)
  9480. goto_offline(ppd, 0);
  9481. else
  9482. log_physical_state(ppd, PLS_POLLING);
  9483. break;
  9484. case HLS_DN_DISABLE:
  9485. /* link is disabled */
  9486. ppd->link_enabled = 0;
  9487. /* allow any state to transition to disabled */
  9488. /* must transition to offline first */
  9489. if (ppd->host_link_state != HLS_DN_OFFLINE) {
  9490. ret = goto_offline(ppd, ppd->remote_link_down_reason);
  9491. if (ret)
  9492. break;
  9493. ppd->remote_link_down_reason = 0;
  9494. }
  9495. if (!dd->dc_shutdown) {
  9496. ret1 = set_physical_link_state(dd, PLS_DISABLED);
  9497. if (ret1 != HCMD_SUCCESS) {
  9498. dd_dev_err(dd,
  9499. "Failed to transition to Disabled link state, return 0x%x\n",
  9500. ret1);
  9501. ret = -EINVAL;
  9502. break;
  9503. }
  9504. ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
  9505. if (ret) {
  9506. dd_dev_err(dd,
  9507. "%s: physical state did not change to DISABLED\n",
  9508. __func__);
  9509. break;
  9510. }
  9511. dc_shutdown(dd);
  9512. }
  9513. ppd->host_link_state = HLS_DN_DISABLE;
  9514. break;
  9515. case HLS_DN_OFFLINE:
  9516. if (ppd->host_link_state == HLS_DN_DISABLE)
  9517. dc_start(dd);
  9518. /* allow any state to transition to offline */
  9519. ret = goto_offline(ppd, ppd->remote_link_down_reason);
  9520. if (!ret)
  9521. ppd->remote_link_down_reason = 0;
  9522. break;
  9523. case HLS_VERIFY_CAP:
  9524. if (ppd->host_link_state != HLS_DN_POLL)
  9525. goto unexpected;
  9526. ppd->host_link_state = HLS_VERIFY_CAP;
  9527. log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
  9528. break;
  9529. case HLS_GOING_UP:
  9530. if (ppd->host_link_state != HLS_VERIFY_CAP)
  9531. goto unexpected;
  9532. ret1 = set_physical_link_state(dd, PLS_LINKUP);
  9533. if (ret1 != HCMD_SUCCESS) {
  9534. dd_dev_err(dd,
  9535. "Failed to transition to link up state, return 0x%x\n",
  9536. ret1);
  9537. ret = -EINVAL;
  9538. break;
  9539. }
  9540. ppd->host_link_state = HLS_GOING_UP;
  9541. break;
  9542. case HLS_GOING_OFFLINE: /* transient within goto_offline() */
  9543. case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
  9544. default:
  9545. dd_dev_info(dd, "%s: state 0x%x: not supported\n",
  9546. __func__, state);
  9547. ret = -EINVAL;
  9548. break;
  9549. }
  9550. goto done;
  9551. unexpected:
  9552. dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
  9553. __func__, link_state_name(ppd->host_link_state),
  9554. link_state_name(state));
  9555. ret = -EINVAL;
  9556. done:
  9557. mutex_unlock(&ppd->hls_lock);
  9558. if (event.device)
  9559. ib_dispatch_event(&event);
  9560. return ret;
  9561. }
  9562. int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
  9563. {
  9564. u64 reg;
  9565. int ret = 0;
  9566. switch (which) {
  9567. case HFI1_IB_CFG_LIDLMC:
  9568. set_lidlmc(ppd);
  9569. break;
  9570. case HFI1_IB_CFG_VL_HIGH_LIMIT:
  9571. /*
  9572. * The VL Arbitrator high limit is sent in units of 4k
  9573. * bytes, while HFI stores it in units of 64 bytes.
  9574. */
  9575. val *= 4096 / 64;
  9576. reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
  9577. << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
  9578. write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
  9579. break;
  9580. case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
  9581. /* HFI only supports POLL as the default link down state */
  9582. if (val != HLS_DN_POLL)
  9583. ret = -EINVAL;
  9584. break;
  9585. case HFI1_IB_CFG_OP_VLS:
  9586. if (ppd->vls_operational != val) {
  9587. ppd->vls_operational = val;
  9588. if (!ppd->port)
  9589. ret = -EINVAL;
  9590. }
  9591. break;
  9592. /*
  9593. * For link width, link width downgrade, and speed enable, always AND
  9594. * the setting with what is actually supported. This has two benefits.
  9595. * First, enabled can't have unsupported values, no matter what the
  9596. * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
  9597. * "fill in with your supported value" have all the bits in the
  9598. * field set, so simply ANDing with supported has the desired result.
  9599. */
  9600. case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
  9601. ppd->link_width_enabled = val & ppd->link_width_supported;
  9602. break;
  9603. case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
  9604. ppd->link_width_downgrade_enabled =
  9605. val & ppd->link_width_downgrade_supported;
  9606. break;
  9607. case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
  9608. ppd->link_speed_enabled = val & ppd->link_speed_supported;
  9609. break;
  9610. case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
  9611. /*
  9612. * HFI does not follow IB specs, save this value
  9613. * so we can report it, if asked.
  9614. */
  9615. ppd->overrun_threshold = val;
  9616. break;
  9617. case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
  9618. /*
  9619. * HFI does not follow IB specs, save this value
  9620. * so we can report it, if asked.
  9621. */
  9622. ppd->phy_error_threshold = val;
  9623. break;
  9624. case HFI1_IB_CFG_MTU:
  9625. set_send_length(ppd);
  9626. break;
  9627. case HFI1_IB_CFG_PKEYS:
  9628. if (HFI1_CAP_IS_KSET(PKEY_CHECK))
  9629. set_partition_keys(ppd);
  9630. break;
  9631. default:
  9632. if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
  9633. dd_dev_info(ppd->dd,
  9634. "%s: which %s, val 0x%x: not implemented\n",
  9635. __func__, ib_cfg_name(which), val);
  9636. break;
  9637. }
  9638. return ret;
  9639. }
  9640. /* begin functions related to vl arbitration table caching */
  9641. static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
  9642. {
  9643. int i;
  9644. BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
  9645. VL_ARB_LOW_PRIO_TABLE_SIZE);
  9646. BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
  9647. VL_ARB_HIGH_PRIO_TABLE_SIZE);
  9648. /*
  9649. * Note that we always return values directly from the
  9650. * 'vl_arb_cache' (and do no CSR reads) in response to a
  9651. * 'Get(VLArbTable)'. This is obviously correct after a
  9652. * 'Set(VLArbTable)', since the cache will then be up to
  9653. * date. But it's also correct prior to any 'Set(VLArbTable)'
  9654. * since then both the cache, and the relevant h/w registers
  9655. * will be zeroed.
  9656. */
  9657. for (i = 0; i < MAX_PRIO_TABLE; i++)
  9658. spin_lock_init(&ppd->vl_arb_cache[i].lock);
  9659. }
  9660. /*
  9661. * vl_arb_lock_cache
  9662. *
  9663. * All other vl_arb_* functions should be called only after locking
  9664. * the cache.
  9665. */
  9666. static inline struct vl_arb_cache *
  9667. vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
  9668. {
  9669. if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
  9670. return NULL;
  9671. spin_lock(&ppd->vl_arb_cache[idx].lock);
  9672. return &ppd->vl_arb_cache[idx];
  9673. }
  9674. static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
  9675. {
  9676. spin_unlock(&ppd->vl_arb_cache[idx].lock);
  9677. }
  9678. static void vl_arb_get_cache(struct vl_arb_cache *cache,
  9679. struct ib_vl_weight_elem *vl)
  9680. {
  9681. memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
  9682. }
  9683. static void vl_arb_set_cache(struct vl_arb_cache *cache,
  9684. struct ib_vl_weight_elem *vl)
  9685. {
  9686. memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
  9687. }
  9688. static int vl_arb_match_cache(struct vl_arb_cache *cache,
  9689. struct ib_vl_weight_elem *vl)
  9690. {
  9691. return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
  9692. }
  9693. /* end functions related to vl arbitration table caching */
  9694. static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
  9695. u32 size, struct ib_vl_weight_elem *vl)
  9696. {
  9697. struct hfi1_devdata *dd = ppd->dd;
  9698. u64 reg;
  9699. unsigned int i, is_up = 0;
  9700. int drain, ret = 0;
  9701. mutex_lock(&ppd->hls_lock);
  9702. if (ppd->host_link_state & HLS_UP)
  9703. is_up = 1;
  9704. drain = !is_ax(dd) && is_up;
  9705. if (drain)
  9706. /*
  9707. * Before adjusting VL arbitration weights, empty per-VL
  9708. * FIFOs, otherwise a packet whose VL weight is being
  9709. * set to 0 could get stuck in a FIFO with no chance to
  9710. * egress.
  9711. */
  9712. ret = stop_drain_data_vls(dd);
  9713. if (ret) {
  9714. dd_dev_err(
  9715. dd,
  9716. "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
  9717. __func__);
  9718. goto err;
  9719. }
  9720. for (i = 0; i < size; i++, vl++) {
  9721. /*
  9722. * NOTE: The low priority shift and mask are used here, but
  9723. * they are the same for both the low and high registers.
  9724. */
  9725. reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
  9726. << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
  9727. | (((u64)vl->weight
  9728. & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
  9729. << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
  9730. write_csr(dd, target + (i * 8), reg);
  9731. }
  9732. pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
  9733. if (drain)
  9734. open_fill_data_vls(dd); /* reopen all VLs */
  9735. err:
  9736. mutex_unlock(&ppd->hls_lock);
  9737. return ret;
  9738. }
  9739. /*
  9740. * Read one credit merge VL register.
  9741. */
  9742. static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
  9743. struct vl_limit *vll)
  9744. {
  9745. u64 reg = read_csr(dd, csr);
  9746. vll->dedicated = cpu_to_be16(
  9747. (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
  9748. & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
  9749. vll->shared = cpu_to_be16(
  9750. (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
  9751. & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
  9752. }
  9753. /*
  9754. * Read the current credit merge limits.
  9755. */
  9756. static int get_buffer_control(struct hfi1_devdata *dd,
  9757. struct buffer_control *bc, u16 *overall_limit)
  9758. {
  9759. u64 reg;
  9760. int i;
  9761. /* not all entries are filled in */
  9762. memset(bc, 0, sizeof(*bc));
  9763. /* OPA and HFI have a 1-1 mapping */
  9764. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  9765. read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
  9766. /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
  9767. read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
  9768. reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  9769. bc->overall_shared_limit = cpu_to_be16(
  9770. (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
  9771. & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
  9772. if (overall_limit)
  9773. *overall_limit = (reg
  9774. >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
  9775. & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
  9776. return sizeof(struct buffer_control);
  9777. }
  9778. static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
  9779. {
  9780. u64 reg;
  9781. int i;
  9782. /* each register contains 16 SC->VLnt mappings, 4 bits each */
  9783. reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
  9784. for (i = 0; i < sizeof(u64); i++) {
  9785. u8 byte = *(((u8 *)&reg) + i);
  9786. dp->vlnt[2 * i] = byte & 0xf;
  9787. dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
  9788. }
  9789. reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
  9790. for (i = 0; i < sizeof(u64); i++) {
  9791. u8 byte = *(((u8 *)&reg) + i);
  9792. dp->vlnt[16 + (2 * i)] = byte & 0xf;
  9793. dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
  9794. }
  9795. return sizeof(struct sc2vlnt);
  9796. }
  9797. static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
  9798. struct ib_vl_weight_elem *vl)
  9799. {
  9800. unsigned int i;
  9801. for (i = 0; i < nelems; i++, vl++) {
  9802. vl->vl = 0xf;
  9803. vl->weight = 0;
  9804. }
  9805. }
  9806. static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
  9807. {
  9808. write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
  9809. DC_SC_VL_VAL(15_0,
  9810. 0, dp->vlnt[0] & 0xf,
  9811. 1, dp->vlnt[1] & 0xf,
  9812. 2, dp->vlnt[2] & 0xf,
  9813. 3, dp->vlnt[3] & 0xf,
  9814. 4, dp->vlnt[4] & 0xf,
  9815. 5, dp->vlnt[5] & 0xf,
  9816. 6, dp->vlnt[6] & 0xf,
  9817. 7, dp->vlnt[7] & 0xf,
  9818. 8, dp->vlnt[8] & 0xf,
  9819. 9, dp->vlnt[9] & 0xf,
  9820. 10, dp->vlnt[10] & 0xf,
  9821. 11, dp->vlnt[11] & 0xf,
  9822. 12, dp->vlnt[12] & 0xf,
  9823. 13, dp->vlnt[13] & 0xf,
  9824. 14, dp->vlnt[14] & 0xf,
  9825. 15, dp->vlnt[15] & 0xf));
  9826. write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
  9827. DC_SC_VL_VAL(31_16,
  9828. 16, dp->vlnt[16] & 0xf,
  9829. 17, dp->vlnt[17] & 0xf,
  9830. 18, dp->vlnt[18] & 0xf,
  9831. 19, dp->vlnt[19] & 0xf,
  9832. 20, dp->vlnt[20] & 0xf,
  9833. 21, dp->vlnt[21] & 0xf,
  9834. 22, dp->vlnt[22] & 0xf,
  9835. 23, dp->vlnt[23] & 0xf,
  9836. 24, dp->vlnt[24] & 0xf,
  9837. 25, dp->vlnt[25] & 0xf,
  9838. 26, dp->vlnt[26] & 0xf,
  9839. 27, dp->vlnt[27] & 0xf,
  9840. 28, dp->vlnt[28] & 0xf,
  9841. 29, dp->vlnt[29] & 0xf,
  9842. 30, dp->vlnt[30] & 0xf,
  9843. 31, dp->vlnt[31] & 0xf));
  9844. }
  9845. static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
  9846. u16 limit)
  9847. {
  9848. if (limit != 0)
  9849. dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
  9850. what, (int)limit, idx);
  9851. }
  9852. /* change only the shared limit portion of SendCmGLobalCredit */
  9853. static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
  9854. {
  9855. u64 reg;
  9856. reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  9857. reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
  9858. reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
  9859. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  9860. }
  9861. /* change only the total credit limit portion of SendCmGLobalCredit */
  9862. static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
  9863. {
  9864. u64 reg;
  9865. reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
  9866. reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
  9867. reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
  9868. write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
  9869. }
  9870. /* set the given per-VL shared limit */
  9871. static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
  9872. {
  9873. u64 reg;
  9874. u32 addr;
  9875. if (vl < TXE_NUM_DATA_VL)
  9876. addr = SEND_CM_CREDIT_VL + (8 * vl);
  9877. else
  9878. addr = SEND_CM_CREDIT_VL15;
  9879. reg = read_csr(dd, addr);
  9880. reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
  9881. reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
  9882. write_csr(dd, addr, reg);
  9883. }
  9884. /* set the given per-VL dedicated limit */
  9885. static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
  9886. {
  9887. u64 reg;
  9888. u32 addr;
  9889. if (vl < TXE_NUM_DATA_VL)
  9890. addr = SEND_CM_CREDIT_VL + (8 * vl);
  9891. else
  9892. addr = SEND_CM_CREDIT_VL15;
  9893. reg = read_csr(dd, addr);
  9894. reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
  9895. reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
  9896. write_csr(dd, addr, reg);
  9897. }
  9898. /* spin until the given per-VL status mask bits clear */
  9899. static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
  9900. const char *which)
  9901. {
  9902. unsigned long timeout;
  9903. u64 reg;
  9904. timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
  9905. while (1) {
  9906. reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
  9907. if (reg == 0)
  9908. return; /* success */
  9909. if (time_after(jiffies, timeout))
  9910. break; /* timed out */
  9911. udelay(1);
  9912. }
  9913. dd_dev_err(dd,
  9914. "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
  9915. which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
  9916. /*
  9917. * If this occurs, it is likely there was a credit loss on the link.
  9918. * The only recovery from that is a link bounce.
  9919. */
  9920. dd_dev_err(dd,
  9921. "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
  9922. }
  9923. /*
  9924. * The number of credits on the VLs may be changed while everything
  9925. * is "live", but the following algorithm must be followed due to
  9926. * how the hardware is actually implemented. In particular,
  9927. * Return_Credit_Status[] is the only correct status check.
  9928. *
  9929. * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
  9930. * set Global_Shared_Credit_Limit = 0
  9931. * use_all_vl = 1
  9932. * mask0 = all VLs that are changing either dedicated or shared limits
  9933. * set Shared_Limit[mask0] = 0
  9934. * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
  9935. * if (changing any dedicated limit)
  9936. * mask1 = all VLs that are lowering dedicated limits
  9937. * lower Dedicated_Limit[mask1]
  9938. * spin until Return_Credit_Status[mask1] == 0
  9939. * raise Dedicated_Limits
  9940. * raise Shared_Limits
  9941. * raise Global_Shared_Credit_Limit
  9942. *
  9943. * lower = if the new limit is lower, set the limit to the new value
  9944. * raise = if the new limit is higher than the current value (may be changed
  9945. * earlier in the algorithm), set the new limit to the new value
  9946. */
  9947. int set_buffer_control(struct hfi1_pportdata *ppd,
  9948. struct buffer_control *new_bc)
  9949. {
  9950. struct hfi1_devdata *dd = ppd->dd;
  9951. u64 changing_mask, ld_mask, stat_mask;
  9952. int change_count;
  9953. int i, use_all_mask;
  9954. int this_shared_changing;
  9955. int vl_count = 0, ret;
  9956. /*
  9957. * A0: add the variable any_shared_limit_changing below and in the
  9958. * algorithm above. If removing A0 support, it can be removed.
  9959. */
  9960. int any_shared_limit_changing;
  9961. struct buffer_control cur_bc;
  9962. u8 changing[OPA_MAX_VLS];
  9963. u8 lowering_dedicated[OPA_MAX_VLS];
  9964. u16 cur_total;
  9965. u32 new_total = 0;
  9966. const u64 all_mask =
  9967. SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
  9968. | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
  9969. | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
  9970. | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
  9971. | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
  9972. | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
  9973. | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
  9974. | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
  9975. | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
  9976. #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
  9977. #define NUM_USABLE_VLS 16 /* look at VL15 and less */
  9978. /* find the new total credits, do sanity check on unused VLs */
  9979. for (i = 0; i < OPA_MAX_VLS; i++) {
  9980. if (valid_vl(i)) {
  9981. new_total += be16_to_cpu(new_bc->vl[i].dedicated);
  9982. continue;
  9983. }
  9984. nonzero_msg(dd, i, "dedicated",
  9985. be16_to_cpu(new_bc->vl[i].dedicated));
  9986. nonzero_msg(dd, i, "shared",
  9987. be16_to_cpu(new_bc->vl[i].shared));
  9988. new_bc->vl[i].dedicated = 0;
  9989. new_bc->vl[i].shared = 0;
  9990. }
  9991. new_total += be16_to_cpu(new_bc->overall_shared_limit);
  9992. /* fetch the current values */
  9993. get_buffer_control(dd, &cur_bc, &cur_total);
  9994. /*
  9995. * Create the masks we will use.
  9996. */
  9997. memset(changing, 0, sizeof(changing));
  9998. memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
  9999. /*
  10000. * NOTE: Assumes that the individual VL bits are adjacent and in
  10001. * increasing order
  10002. */
  10003. stat_mask =
  10004. SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
  10005. changing_mask = 0;
  10006. ld_mask = 0;
  10007. change_count = 0;
  10008. any_shared_limit_changing = 0;
  10009. for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
  10010. if (!valid_vl(i))
  10011. continue;
  10012. this_shared_changing = new_bc->vl[i].shared
  10013. != cur_bc.vl[i].shared;
  10014. if (this_shared_changing)
  10015. any_shared_limit_changing = 1;
  10016. if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
  10017. this_shared_changing) {
  10018. changing[i] = 1;
  10019. changing_mask |= stat_mask;
  10020. change_count++;
  10021. }
  10022. if (be16_to_cpu(new_bc->vl[i].dedicated) <
  10023. be16_to_cpu(cur_bc.vl[i].dedicated)) {
  10024. lowering_dedicated[i] = 1;
  10025. ld_mask |= stat_mask;
  10026. }
  10027. }
  10028. /* bracket the credit change with a total adjustment */
  10029. if (new_total > cur_total)
  10030. set_global_limit(dd, new_total);
  10031. /*
  10032. * Start the credit change algorithm.
  10033. */
  10034. use_all_mask = 0;
  10035. if ((be16_to_cpu(new_bc->overall_shared_limit) <
  10036. be16_to_cpu(cur_bc.overall_shared_limit)) ||
  10037. (is_ax(dd) && any_shared_limit_changing)) {
  10038. set_global_shared(dd, 0);
  10039. cur_bc.overall_shared_limit = 0;
  10040. use_all_mask = 1;
  10041. }
  10042. for (i = 0; i < NUM_USABLE_VLS; i++) {
  10043. if (!valid_vl(i))
  10044. continue;
  10045. if (changing[i]) {
  10046. set_vl_shared(dd, i, 0);
  10047. cur_bc.vl[i].shared = 0;
  10048. }
  10049. }
  10050. wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
  10051. "shared");
  10052. if (change_count > 0) {
  10053. for (i = 0; i < NUM_USABLE_VLS; i++) {
  10054. if (!valid_vl(i))
  10055. continue;
  10056. if (lowering_dedicated[i]) {
  10057. set_vl_dedicated(dd, i,
  10058. be16_to_cpu(new_bc->
  10059. vl[i].dedicated));
  10060. cur_bc.vl[i].dedicated =
  10061. new_bc->vl[i].dedicated;
  10062. }
  10063. }
  10064. wait_for_vl_status_clear(dd, ld_mask, "dedicated");
  10065. /* now raise all dedicated that are going up */
  10066. for (i = 0; i < NUM_USABLE_VLS; i++) {
  10067. if (!valid_vl(i))
  10068. continue;
  10069. if (be16_to_cpu(new_bc->vl[i].dedicated) >
  10070. be16_to_cpu(cur_bc.vl[i].dedicated))
  10071. set_vl_dedicated(dd, i,
  10072. be16_to_cpu(new_bc->
  10073. vl[i].dedicated));
  10074. }
  10075. }
  10076. /* next raise all shared that are going up */
  10077. for (i = 0; i < NUM_USABLE_VLS; i++) {
  10078. if (!valid_vl(i))
  10079. continue;
  10080. if (be16_to_cpu(new_bc->vl[i].shared) >
  10081. be16_to_cpu(cur_bc.vl[i].shared))
  10082. set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
  10083. }
  10084. /* finally raise the global shared */
  10085. if (be16_to_cpu(new_bc->overall_shared_limit) >
  10086. be16_to_cpu(cur_bc.overall_shared_limit))
  10087. set_global_shared(dd,
  10088. be16_to_cpu(new_bc->overall_shared_limit));
  10089. /* bracket the credit change with a total adjustment */
  10090. if (new_total < cur_total)
  10091. set_global_limit(dd, new_total);
  10092. /*
  10093. * Determine the actual number of operational VLS using the number of
  10094. * dedicated and shared credits for each VL.
  10095. */
  10096. if (change_count > 0) {
  10097. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  10098. if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
  10099. be16_to_cpu(new_bc->vl[i].shared) > 0)
  10100. vl_count++;
  10101. ppd->actual_vls_operational = vl_count;
  10102. ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
  10103. ppd->actual_vls_operational :
  10104. ppd->vls_operational,
  10105. NULL);
  10106. if (ret == 0)
  10107. ret = pio_map_init(dd, ppd->port - 1, vl_count ?
  10108. ppd->actual_vls_operational :
  10109. ppd->vls_operational, NULL);
  10110. if (ret)
  10111. return ret;
  10112. }
  10113. return 0;
  10114. }
  10115. /*
  10116. * Read the given fabric manager table. Return the size of the
  10117. * table (in bytes) on success, and a negative error code on
  10118. * failure.
  10119. */
  10120. int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
  10121. {
  10122. int size;
  10123. struct vl_arb_cache *vlc;
  10124. switch (which) {
  10125. case FM_TBL_VL_HIGH_ARB:
  10126. size = 256;
  10127. /*
  10128. * OPA specifies 128 elements (of 2 bytes each), though
  10129. * HFI supports only 16 elements in h/w.
  10130. */
  10131. vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
  10132. vl_arb_get_cache(vlc, t);
  10133. vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
  10134. break;
  10135. case FM_TBL_VL_LOW_ARB:
  10136. size = 256;
  10137. /*
  10138. * OPA specifies 128 elements (of 2 bytes each), though
  10139. * HFI supports only 16 elements in h/w.
  10140. */
  10141. vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
  10142. vl_arb_get_cache(vlc, t);
  10143. vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
  10144. break;
  10145. case FM_TBL_BUFFER_CONTROL:
  10146. size = get_buffer_control(ppd->dd, t, NULL);
  10147. break;
  10148. case FM_TBL_SC2VLNT:
  10149. size = get_sc2vlnt(ppd->dd, t);
  10150. break;
  10151. case FM_TBL_VL_PREEMPT_ELEMS:
  10152. size = 256;
  10153. /* OPA specifies 128 elements, of 2 bytes each */
  10154. get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
  10155. break;
  10156. case FM_TBL_VL_PREEMPT_MATRIX:
  10157. size = 256;
  10158. /*
  10159. * OPA specifies that this is the same size as the VL
  10160. * arbitration tables (i.e., 256 bytes).
  10161. */
  10162. break;
  10163. default:
  10164. return -EINVAL;
  10165. }
  10166. return size;
  10167. }
  10168. /*
  10169. * Write the given fabric manager table.
  10170. */
  10171. int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
  10172. {
  10173. int ret = 0;
  10174. struct vl_arb_cache *vlc;
  10175. switch (which) {
  10176. case FM_TBL_VL_HIGH_ARB:
  10177. vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
  10178. if (vl_arb_match_cache(vlc, t)) {
  10179. vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
  10180. break;
  10181. }
  10182. vl_arb_set_cache(vlc, t);
  10183. vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
  10184. ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
  10185. VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
  10186. break;
  10187. case FM_TBL_VL_LOW_ARB:
  10188. vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
  10189. if (vl_arb_match_cache(vlc, t)) {
  10190. vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
  10191. break;
  10192. }
  10193. vl_arb_set_cache(vlc, t);
  10194. vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
  10195. ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
  10196. VL_ARB_LOW_PRIO_TABLE_SIZE, t);
  10197. break;
  10198. case FM_TBL_BUFFER_CONTROL:
  10199. ret = set_buffer_control(ppd, t);
  10200. break;
  10201. case FM_TBL_SC2VLNT:
  10202. set_sc2vlnt(ppd->dd, t);
  10203. break;
  10204. default:
  10205. ret = -EINVAL;
  10206. }
  10207. return ret;
  10208. }
  10209. /*
  10210. * Disable all data VLs.
  10211. *
  10212. * Return 0 if disabled, non-zero if the VLs cannot be disabled.
  10213. */
  10214. static int disable_data_vls(struct hfi1_devdata *dd)
  10215. {
  10216. if (is_ax(dd))
  10217. return 1;
  10218. pio_send_control(dd, PSC_DATA_VL_DISABLE);
  10219. return 0;
  10220. }
  10221. /*
  10222. * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
  10223. * Just re-enables all data VLs (the "fill" part happens
  10224. * automatically - the name was chosen for symmetry with
  10225. * stop_drain_data_vls()).
  10226. *
  10227. * Return 0 if successful, non-zero if the VLs cannot be enabled.
  10228. */
  10229. int open_fill_data_vls(struct hfi1_devdata *dd)
  10230. {
  10231. if (is_ax(dd))
  10232. return 1;
  10233. pio_send_control(dd, PSC_DATA_VL_ENABLE);
  10234. return 0;
  10235. }
  10236. /*
  10237. * drain_data_vls() - assumes that disable_data_vls() has been called,
  10238. * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
  10239. * engines to drop to 0.
  10240. */
  10241. static void drain_data_vls(struct hfi1_devdata *dd)
  10242. {
  10243. sc_wait(dd);
  10244. sdma_wait(dd);
  10245. pause_for_credit_return(dd);
  10246. }
  10247. /*
  10248. * stop_drain_data_vls() - disable, then drain all per-VL fifos.
  10249. *
  10250. * Use open_fill_data_vls() to resume using data VLs. This pair is
  10251. * meant to be used like this:
  10252. *
  10253. * stop_drain_data_vls(dd);
  10254. * // do things with per-VL resources
  10255. * open_fill_data_vls(dd);
  10256. */
  10257. int stop_drain_data_vls(struct hfi1_devdata *dd)
  10258. {
  10259. int ret;
  10260. ret = disable_data_vls(dd);
  10261. if (ret == 0)
  10262. drain_data_vls(dd);
  10263. return ret;
  10264. }
  10265. /*
  10266. * Convert a nanosecond time to a cclock count. No matter how slow
  10267. * the cclock, a non-zero ns will always have a non-zero result.
  10268. */
  10269. u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
  10270. {
  10271. u32 cclocks;
  10272. if (dd->icode == ICODE_FPGA_EMULATION)
  10273. cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
  10274. else /* simulation pretends to be ASIC */
  10275. cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
  10276. if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
  10277. cclocks = 1;
  10278. return cclocks;
  10279. }
  10280. /*
  10281. * Convert a cclock count to nanoseconds. Not matter how slow
  10282. * the cclock, a non-zero cclocks will always have a non-zero result.
  10283. */
  10284. u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
  10285. {
  10286. u32 ns;
  10287. if (dd->icode == ICODE_FPGA_EMULATION)
  10288. ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
  10289. else /* simulation pretends to be ASIC */
  10290. ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
  10291. if (cclocks && !ns)
  10292. ns = 1;
  10293. return ns;
  10294. }
  10295. /*
  10296. * Dynamically adjust the receive interrupt timeout for a context based on
  10297. * incoming packet rate.
  10298. *
  10299. * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
  10300. */
  10301. static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
  10302. {
  10303. struct hfi1_devdata *dd = rcd->dd;
  10304. u32 timeout = rcd->rcvavail_timeout;
  10305. /*
  10306. * This algorithm doubles or halves the timeout depending on whether
  10307. * the number of packets received in this interrupt were less than or
  10308. * greater equal the interrupt count.
  10309. *
  10310. * The calculations below do not allow a steady state to be achieved.
  10311. * Only at the endpoints it is possible to have an unchanging
  10312. * timeout.
  10313. */
  10314. if (npkts < rcv_intr_count) {
  10315. /*
  10316. * Not enough packets arrived before the timeout, adjust
  10317. * timeout downward.
  10318. */
  10319. if (timeout < 2) /* already at minimum? */
  10320. return;
  10321. timeout >>= 1;
  10322. } else {
  10323. /*
  10324. * More than enough packets arrived before the timeout, adjust
  10325. * timeout upward.
  10326. */
  10327. if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
  10328. return;
  10329. timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
  10330. }
  10331. rcd->rcvavail_timeout = timeout;
  10332. /*
  10333. * timeout cannot be larger than rcv_intr_timeout_csr which has already
  10334. * been verified to be in range
  10335. */
  10336. write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
  10337. (u64)timeout <<
  10338. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
  10339. }
  10340. void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
  10341. u32 intr_adjust, u32 npkts)
  10342. {
  10343. struct hfi1_devdata *dd = rcd->dd;
  10344. u64 reg;
  10345. u32 ctxt = rcd->ctxt;
  10346. /*
  10347. * Need to write timeout register before updating RcvHdrHead to ensure
  10348. * that a new value is used when the HW decides to restart counting.
  10349. */
  10350. if (intr_adjust)
  10351. adjust_rcv_timeout(rcd, npkts);
  10352. if (updegr) {
  10353. reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
  10354. << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
  10355. write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
  10356. }
  10357. mmiowb();
  10358. reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
  10359. (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
  10360. << RCV_HDR_HEAD_HEAD_SHIFT);
  10361. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
  10362. mmiowb();
  10363. }
  10364. u32 hdrqempty(struct hfi1_ctxtdata *rcd)
  10365. {
  10366. u32 head, tail;
  10367. head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
  10368. & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
  10369. if (rcd->rcvhdrtail_kvaddr)
  10370. tail = get_rcvhdrtail(rcd);
  10371. else
  10372. tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
  10373. return head == tail;
  10374. }
  10375. /*
  10376. * Context Control and Receive Array encoding for buffer size:
  10377. * 0x0 invalid
  10378. * 0x1 4 KB
  10379. * 0x2 8 KB
  10380. * 0x3 16 KB
  10381. * 0x4 32 KB
  10382. * 0x5 64 KB
  10383. * 0x6 128 KB
  10384. * 0x7 256 KB
  10385. * 0x8 512 KB (Receive Array only)
  10386. * 0x9 1 MB (Receive Array only)
  10387. * 0xa 2 MB (Receive Array only)
  10388. *
  10389. * 0xB-0xF - reserved (Receive Array only)
  10390. *
  10391. *
  10392. * This routine assumes that the value has already been sanity checked.
  10393. */
  10394. static u32 encoded_size(u32 size)
  10395. {
  10396. switch (size) {
  10397. case 4 * 1024: return 0x1;
  10398. case 8 * 1024: return 0x2;
  10399. case 16 * 1024: return 0x3;
  10400. case 32 * 1024: return 0x4;
  10401. case 64 * 1024: return 0x5;
  10402. case 128 * 1024: return 0x6;
  10403. case 256 * 1024: return 0x7;
  10404. case 512 * 1024: return 0x8;
  10405. case 1 * 1024 * 1024: return 0x9;
  10406. case 2 * 1024 * 1024: return 0xa;
  10407. }
  10408. return 0x1; /* if invalid, go with the minimum size */
  10409. }
  10410. void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
  10411. struct hfi1_ctxtdata *rcd)
  10412. {
  10413. u64 rcvctrl, reg;
  10414. int did_enable = 0;
  10415. u16 ctxt;
  10416. if (!rcd)
  10417. return;
  10418. ctxt = rcd->ctxt;
  10419. hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
  10420. rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
  10421. /* if the context already enabled, don't do the extra steps */
  10422. if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
  10423. !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
  10424. /* reset the tail and hdr addresses, and sequence count */
  10425. write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
  10426. rcd->rcvhdrq_dma);
  10427. if (rcd->rcvhdrtail_kvaddr)
  10428. write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
  10429. rcd->rcvhdrqtailaddr_dma);
  10430. rcd->seq_cnt = 1;
  10431. /* reset the cached receive header queue head value */
  10432. rcd->head = 0;
  10433. /*
  10434. * Zero the receive header queue so we don't get false
  10435. * positives when checking the sequence number. The
  10436. * sequence numbers could land exactly on the same spot.
  10437. * E.g. a rcd restart before the receive header wrapped.
  10438. */
  10439. memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
  10440. /* starting timeout */
  10441. rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
  10442. /* enable the context */
  10443. rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
  10444. /* clean the egr buffer size first */
  10445. rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
  10446. rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
  10447. & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
  10448. << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
  10449. /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
  10450. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
  10451. did_enable = 1;
  10452. /* zero RcvEgrIndexHead */
  10453. write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
  10454. /* set eager count and base index */
  10455. reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
  10456. & RCV_EGR_CTRL_EGR_CNT_MASK)
  10457. << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
  10458. (((rcd->eager_base >> RCV_SHIFT)
  10459. & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
  10460. << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
  10461. write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
  10462. /*
  10463. * Set TID (expected) count and base index.
  10464. * rcd->expected_count is set to individual RcvArray entries,
  10465. * not pairs, and the CSR takes a pair-count in groups of
  10466. * four, so divide by 8.
  10467. */
  10468. reg = (((rcd->expected_count >> RCV_SHIFT)
  10469. & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
  10470. << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
  10471. (((rcd->expected_base >> RCV_SHIFT)
  10472. & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
  10473. << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
  10474. write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
  10475. if (ctxt == HFI1_CTRL_CTXT)
  10476. write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
  10477. }
  10478. if (op & HFI1_RCVCTRL_CTXT_DIS) {
  10479. write_csr(dd, RCV_VL15, 0);
  10480. /*
  10481. * When receive context is being disabled turn on tail
  10482. * update with a dummy tail address and then disable
  10483. * receive context.
  10484. */
  10485. if (dd->rcvhdrtail_dummy_dma) {
  10486. write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
  10487. dd->rcvhdrtail_dummy_dma);
  10488. /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
  10489. rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
  10490. }
  10491. rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
  10492. }
  10493. if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
  10494. rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
  10495. if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
  10496. rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
  10497. if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
  10498. rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
  10499. if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
  10500. /* See comment on RcvCtxtCtrl.TailUpd above */
  10501. if (!(op & HFI1_RCVCTRL_CTXT_DIS))
  10502. rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
  10503. }
  10504. if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
  10505. rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
  10506. if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
  10507. rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
  10508. if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
  10509. /*
  10510. * In one-packet-per-eager mode, the size comes from
  10511. * the RcvArray entry.
  10512. */
  10513. rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
  10514. rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
  10515. }
  10516. if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
  10517. rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
  10518. if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
  10519. rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
  10520. if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
  10521. rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
  10522. if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
  10523. rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
  10524. if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
  10525. rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
  10526. hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
  10527. write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
  10528. /* work around sticky RcvCtxtStatus.BlockedRHQFull */
  10529. if (did_enable &&
  10530. (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
  10531. reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
  10532. if (reg != 0) {
  10533. dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
  10534. ctxt, reg);
  10535. read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
  10536. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
  10537. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
  10538. read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
  10539. reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
  10540. dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
  10541. ctxt, reg, reg == 0 ? "not" : "still");
  10542. }
  10543. }
  10544. if (did_enable) {
  10545. /*
  10546. * The interrupt timeout and count must be set after
  10547. * the context is enabled to take effect.
  10548. */
  10549. /* set interrupt timeout */
  10550. write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
  10551. (u64)rcd->rcvavail_timeout <<
  10552. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
  10553. /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
  10554. reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
  10555. write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
  10556. }
  10557. if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
  10558. /*
  10559. * If the context has been disabled and the Tail Update has
  10560. * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
  10561. * so it doesn't contain an address that is invalid.
  10562. */
  10563. write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
  10564. dd->rcvhdrtail_dummy_dma);
  10565. }
  10566. u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
  10567. {
  10568. int ret;
  10569. u64 val = 0;
  10570. if (namep) {
  10571. ret = dd->cntrnameslen;
  10572. *namep = dd->cntrnames;
  10573. } else {
  10574. const struct cntr_entry *entry;
  10575. int i, j;
  10576. ret = (dd->ndevcntrs) * sizeof(u64);
  10577. /* Get the start of the block of counters */
  10578. *cntrp = dd->cntrs;
  10579. /*
  10580. * Now go and fill in each counter in the block.
  10581. */
  10582. for (i = 0; i < DEV_CNTR_LAST; i++) {
  10583. entry = &dev_cntrs[i];
  10584. hfi1_cdbg(CNTR, "reading %s", entry->name);
  10585. if (entry->flags & CNTR_DISABLED) {
  10586. /* Nothing */
  10587. hfi1_cdbg(CNTR, "\tDisabled\n");
  10588. } else {
  10589. if (entry->flags & CNTR_VL) {
  10590. hfi1_cdbg(CNTR, "\tPer VL\n");
  10591. for (j = 0; j < C_VL_COUNT; j++) {
  10592. val = entry->rw_cntr(entry,
  10593. dd, j,
  10594. CNTR_MODE_R,
  10595. 0);
  10596. hfi1_cdbg(
  10597. CNTR,
  10598. "\t\tRead 0x%llx for %d\n",
  10599. val, j);
  10600. dd->cntrs[entry->offset + j] =
  10601. val;
  10602. }
  10603. } else if (entry->flags & CNTR_SDMA) {
  10604. hfi1_cdbg(CNTR,
  10605. "\t Per SDMA Engine\n");
  10606. for (j = 0; j < chip_sdma_engines(dd);
  10607. j++) {
  10608. val =
  10609. entry->rw_cntr(entry, dd, j,
  10610. CNTR_MODE_R, 0);
  10611. hfi1_cdbg(CNTR,
  10612. "\t\tRead 0x%llx for %d\n",
  10613. val, j);
  10614. dd->cntrs[entry->offset + j] =
  10615. val;
  10616. }
  10617. } else {
  10618. val = entry->rw_cntr(entry, dd,
  10619. CNTR_INVALID_VL,
  10620. CNTR_MODE_R, 0);
  10621. dd->cntrs[entry->offset] = val;
  10622. hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
  10623. }
  10624. }
  10625. }
  10626. }
  10627. return ret;
  10628. }
  10629. /*
  10630. * Used by sysfs to create files for hfi stats to read
  10631. */
  10632. u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
  10633. {
  10634. int ret;
  10635. u64 val = 0;
  10636. if (namep) {
  10637. ret = ppd->dd->portcntrnameslen;
  10638. *namep = ppd->dd->portcntrnames;
  10639. } else {
  10640. const struct cntr_entry *entry;
  10641. int i, j;
  10642. ret = ppd->dd->nportcntrs * sizeof(u64);
  10643. *cntrp = ppd->cntrs;
  10644. for (i = 0; i < PORT_CNTR_LAST; i++) {
  10645. entry = &port_cntrs[i];
  10646. hfi1_cdbg(CNTR, "reading %s", entry->name);
  10647. if (entry->flags & CNTR_DISABLED) {
  10648. /* Nothing */
  10649. hfi1_cdbg(CNTR, "\tDisabled\n");
  10650. continue;
  10651. }
  10652. if (entry->flags & CNTR_VL) {
  10653. hfi1_cdbg(CNTR, "\tPer VL");
  10654. for (j = 0; j < C_VL_COUNT; j++) {
  10655. val = entry->rw_cntr(entry, ppd, j,
  10656. CNTR_MODE_R,
  10657. 0);
  10658. hfi1_cdbg(
  10659. CNTR,
  10660. "\t\tRead 0x%llx for %d",
  10661. val, j);
  10662. ppd->cntrs[entry->offset + j] = val;
  10663. }
  10664. } else {
  10665. val = entry->rw_cntr(entry, ppd,
  10666. CNTR_INVALID_VL,
  10667. CNTR_MODE_R,
  10668. 0);
  10669. ppd->cntrs[entry->offset] = val;
  10670. hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
  10671. }
  10672. }
  10673. }
  10674. return ret;
  10675. }
  10676. static void free_cntrs(struct hfi1_devdata *dd)
  10677. {
  10678. struct hfi1_pportdata *ppd;
  10679. int i;
  10680. if (dd->synth_stats_timer.function)
  10681. del_timer_sync(&dd->synth_stats_timer);
  10682. ppd = (struct hfi1_pportdata *)(dd + 1);
  10683. for (i = 0; i < dd->num_pports; i++, ppd++) {
  10684. kfree(ppd->cntrs);
  10685. kfree(ppd->scntrs);
  10686. free_percpu(ppd->ibport_data.rvp.rc_acks);
  10687. free_percpu(ppd->ibport_data.rvp.rc_qacks);
  10688. free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
  10689. ppd->cntrs = NULL;
  10690. ppd->scntrs = NULL;
  10691. ppd->ibport_data.rvp.rc_acks = NULL;
  10692. ppd->ibport_data.rvp.rc_qacks = NULL;
  10693. ppd->ibport_data.rvp.rc_delayed_comp = NULL;
  10694. }
  10695. kfree(dd->portcntrnames);
  10696. dd->portcntrnames = NULL;
  10697. kfree(dd->cntrs);
  10698. dd->cntrs = NULL;
  10699. kfree(dd->scntrs);
  10700. dd->scntrs = NULL;
  10701. kfree(dd->cntrnames);
  10702. dd->cntrnames = NULL;
  10703. if (dd->update_cntr_wq) {
  10704. destroy_workqueue(dd->update_cntr_wq);
  10705. dd->update_cntr_wq = NULL;
  10706. }
  10707. }
  10708. static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
  10709. u64 *psval, void *context, int vl)
  10710. {
  10711. u64 val;
  10712. u64 sval = *psval;
  10713. if (entry->flags & CNTR_DISABLED) {
  10714. dd_dev_err(dd, "Counter %s not enabled", entry->name);
  10715. return 0;
  10716. }
  10717. hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
  10718. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
  10719. /* If its a synthetic counter there is more work we need to do */
  10720. if (entry->flags & CNTR_SYNTH) {
  10721. if (sval == CNTR_MAX) {
  10722. /* No need to read already saturated */
  10723. return CNTR_MAX;
  10724. }
  10725. if (entry->flags & CNTR_32BIT) {
  10726. /* 32bit counters can wrap multiple times */
  10727. u64 upper = sval >> 32;
  10728. u64 lower = (sval << 32) >> 32;
  10729. if (lower > val) { /* hw wrapped */
  10730. if (upper == CNTR_32BIT_MAX)
  10731. val = CNTR_MAX;
  10732. else
  10733. upper++;
  10734. }
  10735. if (val != CNTR_MAX)
  10736. val = (upper << 32) | val;
  10737. } else {
  10738. /* If we rolled we are saturated */
  10739. if ((val < sval) || (val > CNTR_MAX))
  10740. val = CNTR_MAX;
  10741. }
  10742. }
  10743. *psval = val;
  10744. hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
  10745. return val;
  10746. }
  10747. static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
  10748. struct cntr_entry *entry,
  10749. u64 *psval, void *context, int vl, u64 data)
  10750. {
  10751. u64 val;
  10752. if (entry->flags & CNTR_DISABLED) {
  10753. dd_dev_err(dd, "Counter %s not enabled", entry->name);
  10754. return 0;
  10755. }
  10756. hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
  10757. if (entry->flags & CNTR_SYNTH) {
  10758. *psval = data;
  10759. if (entry->flags & CNTR_32BIT) {
  10760. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
  10761. (data << 32) >> 32);
  10762. val = data; /* return the full 64bit value */
  10763. } else {
  10764. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
  10765. data);
  10766. }
  10767. } else {
  10768. val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
  10769. }
  10770. *psval = val;
  10771. hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
  10772. return val;
  10773. }
  10774. u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
  10775. {
  10776. struct cntr_entry *entry;
  10777. u64 *sval;
  10778. entry = &dev_cntrs[index];
  10779. sval = dd->scntrs + entry->offset;
  10780. if (vl != CNTR_INVALID_VL)
  10781. sval += vl;
  10782. return read_dev_port_cntr(dd, entry, sval, dd, vl);
  10783. }
  10784. u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
  10785. {
  10786. struct cntr_entry *entry;
  10787. u64 *sval;
  10788. entry = &dev_cntrs[index];
  10789. sval = dd->scntrs + entry->offset;
  10790. if (vl != CNTR_INVALID_VL)
  10791. sval += vl;
  10792. return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
  10793. }
  10794. u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
  10795. {
  10796. struct cntr_entry *entry;
  10797. u64 *sval;
  10798. entry = &port_cntrs[index];
  10799. sval = ppd->scntrs + entry->offset;
  10800. if (vl != CNTR_INVALID_VL)
  10801. sval += vl;
  10802. if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
  10803. (index <= C_RCV_HDR_OVF_LAST)) {
  10804. /* We do not want to bother for disabled contexts */
  10805. return 0;
  10806. }
  10807. return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
  10808. }
  10809. u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
  10810. {
  10811. struct cntr_entry *entry;
  10812. u64 *sval;
  10813. entry = &port_cntrs[index];
  10814. sval = ppd->scntrs + entry->offset;
  10815. if (vl != CNTR_INVALID_VL)
  10816. sval += vl;
  10817. if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
  10818. (index <= C_RCV_HDR_OVF_LAST)) {
  10819. /* We do not want to bother for disabled contexts */
  10820. return 0;
  10821. }
  10822. return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
  10823. }
  10824. static void do_update_synth_timer(struct work_struct *work)
  10825. {
  10826. u64 cur_tx;
  10827. u64 cur_rx;
  10828. u64 total_flits;
  10829. u8 update = 0;
  10830. int i, j, vl;
  10831. struct hfi1_pportdata *ppd;
  10832. struct cntr_entry *entry;
  10833. struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
  10834. update_cntr_work);
  10835. /*
  10836. * Rather than keep beating on the CSRs pick a minimal set that we can
  10837. * check to watch for potential roll over. We can do this by looking at
  10838. * the number of flits sent/recv. If the total flits exceeds 32bits then
  10839. * we have to iterate all the counters and update.
  10840. */
  10841. entry = &dev_cntrs[C_DC_RCV_FLITS];
  10842. cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
  10843. entry = &dev_cntrs[C_DC_XMIT_FLITS];
  10844. cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
  10845. hfi1_cdbg(
  10846. CNTR,
  10847. "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
  10848. dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
  10849. if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
  10850. /*
  10851. * May not be strictly necessary to update but it won't hurt and
  10852. * simplifies the logic here.
  10853. */
  10854. update = 1;
  10855. hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
  10856. dd->unit);
  10857. } else {
  10858. total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
  10859. hfi1_cdbg(CNTR,
  10860. "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
  10861. total_flits, (u64)CNTR_32BIT_MAX);
  10862. if (total_flits >= CNTR_32BIT_MAX) {
  10863. hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
  10864. dd->unit);
  10865. update = 1;
  10866. }
  10867. }
  10868. if (update) {
  10869. hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
  10870. for (i = 0; i < DEV_CNTR_LAST; i++) {
  10871. entry = &dev_cntrs[i];
  10872. if (entry->flags & CNTR_VL) {
  10873. for (vl = 0; vl < C_VL_COUNT; vl++)
  10874. read_dev_cntr(dd, i, vl);
  10875. } else {
  10876. read_dev_cntr(dd, i, CNTR_INVALID_VL);
  10877. }
  10878. }
  10879. ppd = (struct hfi1_pportdata *)(dd + 1);
  10880. for (i = 0; i < dd->num_pports; i++, ppd++) {
  10881. for (j = 0; j < PORT_CNTR_LAST; j++) {
  10882. entry = &port_cntrs[j];
  10883. if (entry->flags & CNTR_VL) {
  10884. for (vl = 0; vl < C_VL_COUNT; vl++)
  10885. read_port_cntr(ppd, j, vl);
  10886. } else {
  10887. read_port_cntr(ppd, j, CNTR_INVALID_VL);
  10888. }
  10889. }
  10890. }
  10891. /*
  10892. * We want the value in the register. The goal is to keep track
  10893. * of the number of "ticks" not the counter value. In other
  10894. * words if the register rolls we want to notice it and go ahead
  10895. * and force an update.
  10896. */
  10897. entry = &dev_cntrs[C_DC_XMIT_FLITS];
  10898. dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
  10899. CNTR_MODE_R, 0);
  10900. entry = &dev_cntrs[C_DC_RCV_FLITS];
  10901. dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
  10902. CNTR_MODE_R, 0);
  10903. hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
  10904. dd->unit, dd->last_tx, dd->last_rx);
  10905. } else {
  10906. hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
  10907. }
  10908. }
  10909. static void update_synth_timer(struct timer_list *t)
  10910. {
  10911. struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
  10912. queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
  10913. mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
  10914. }
  10915. #define C_MAX_NAME 16 /* 15 chars + one for /0 */
  10916. static int init_cntrs(struct hfi1_devdata *dd)
  10917. {
  10918. int i, rcv_ctxts, j;
  10919. size_t sz;
  10920. char *p;
  10921. char name[C_MAX_NAME];
  10922. struct hfi1_pportdata *ppd;
  10923. const char *bit_type_32 = ",32";
  10924. const int bit_type_32_sz = strlen(bit_type_32);
  10925. u32 sdma_engines = chip_sdma_engines(dd);
  10926. /* set up the stats timer; the add_timer is done at the end */
  10927. timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
  10928. /***********************/
  10929. /* per device counters */
  10930. /***********************/
  10931. /* size names and determine how many we have*/
  10932. dd->ndevcntrs = 0;
  10933. sz = 0;
  10934. for (i = 0; i < DEV_CNTR_LAST; i++) {
  10935. if (dev_cntrs[i].flags & CNTR_DISABLED) {
  10936. hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
  10937. continue;
  10938. }
  10939. if (dev_cntrs[i].flags & CNTR_VL) {
  10940. dev_cntrs[i].offset = dd->ndevcntrs;
  10941. for (j = 0; j < C_VL_COUNT; j++) {
  10942. snprintf(name, C_MAX_NAME, "%s%d",
  10943. dev_cntrs[i].name, vl_from_idx(j));
  10944. sz += strlen(name);
  10945. /* Add ",32" for 32-bit counters */
  10946. if (dev_cntrs[i].flags & CNTR_32BIT)
  10947. sz += bit_type_32_sz;
  10948. sz++;
  10949. dd->ndevcntrs++;
  10950. }
  10951. } else if (dev_cntrs[i].flags & CNTR_SDMA) {
  10952. dev_cntrs[i].offset = dd->ndevcntrs;
  10953. for (j = 0; j < sdma_engines; j++) {
  10954. snprintf(name, C_MAX_NAME, "%s%d",
  10955. dev_cntrs[i].name, j);
  10956. sz += strlen(name);
  10957. /* Add ",32" for 32-bit counters */
  10958. if (dev_cntrs[i].flags & CNTR_32BIT)
  10959. sz += bit_type_32_sz;
  10960. sz++;
  10961. dd->ndevcntrs++;
  10962. }
  10963. } else {
  10964. /* +1 for newline. */
  10965. sz += strlen(dev_cntrs[i].name) + 1;
  10966. /* Add ",32" for 32-bit counters */
  10967. if (dev_cntrs[i].flags & CNTR_32BIT)
  10968. sz += bit_type_32_sz;
  10969. dev_cntrs[i].offset = dd->ndevcntrs;
  10970. dd->ndevcntrs++;
  10971. }
  10972. }
  10973. /* allocate space for the counter values */
  10974. dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
  10975. GFP_KERNEL);
  10976. if (!dd->cntrs)
  10977. goto bail;
  10978. dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
  10979. if (!dd->scntrs)
  10980. goto bail;
  10981. /* allocate space for the counter names */
  10982. dd->cntrnameslen = sz;
  10983. dd->cntrnames = kmalloc(sz, GFP_KERNEL);
  10984. if (!dd->cntrnames)
  10985. goto bail;
  10986. /* fill in the names */
  10987. for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
  10988. if (dev_cntrs[i].flags & CNTR_DISABLED) {
  10989. /* Nothing */
  10990. } else if (dev_cntrs[i].flags & CNTR_VL) {
  10991. for (j = 0; j < C_VL_COUNT; j++) {
  10992. snprintf(name, C_MAX_NAME, "%s%d",
  10993. dev_cntrs[i].name,
  10994. vl_from_idx(j));
  10995. memcpy(p, name, strlen(name));
  10996. p += strlen(name);
  10997. /* Counter is 32 bits */
  10998. if (dev_cntrs[i].flags & CNTR_32BIT) {
  10999. memcpy(p, bit_type_32, bit_type_32_sz);
  11000. p += bit_type_32_sz;
  11001. }
  11002. *p++ = '\n';
  11003. }
  11004. } else if (dev_cntrs[i].flags & CNTR_SDMA) {
  11005. for (j = 0; j < sdma_engines; j++) {
  11006. snprintf(name, C_MAX_NAME, "%s%d",
  11007. dev_cntrs[i].name, j);
  11008. memcpy(p, name, strlen(name));
  11009. p += strlen(name);
  11010. /* Counter is 32 bits */
  11011. if (dev_cntrs[i].flags & CNTR_32BIT) {
  11012. memcpy(p, bit_type_32, bit_type_32_sz);
  11013. p += bit_type_32_sz;
  11014. }
  11015. *p++ = '\n';
  11016. }
  11017. } else {
  11018. memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
  11019. p += strlen(dev_cntrs[i].name);
  11020. /* Counter is 32 bits */
  11021. if (dev_cntrs[i].flags & CNTR_32BIT) {
  11022. memcpy(p, bit_type_32, bit_type_32_sz);
  11023. p += bit_type_32_sz;
  11024. }
  11025. *p++ = '\n';
  11026. }
  11027. }
  11028. /*********************/
  11029. /* per port counters */
  11030. /*********************/
  11031. /*
  11032. * Go through the counters for the overflows and disable the ones we
  11033. * don't need. This varies based on platform so we need to do it
  11034. * dynamically here.
  11035. */
  11036. rcv_ctxts = dd->num_rcv_contexts;
  11037. for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
  11038. i <= C_RCV_HDR_OVF_LAST; i++) {
  11039. port_cntrs[i].flags |= CNTR_DISABLED;
  11040. }
  11041. /* size port counter names and determine how many we have*/
  11042. sz = 0;
  11043. dd->nportcntrs = 0;
  11044. for (i = 0; i < PORT_CNTR_LAST; i++) {
  11045. if (port_cntrs[i].flags & CNTR_DISABLED) {
  11046. hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
  11047. continue;
  11048. }
  11049. if (port_cntrs[i].flags & CNTR_VL) {
  11050. port_cntrs[i].offset = dd->nportcntrs;
  11051. for (j = 0; j < C_VL_COUNT; j++) {
  11052. snprintf(name, C_MAX_NAME, "%s%d",
  11053. port_cntrs[i].name, vl_from_idx(j));
  11054. sz += strlen(name);
  11055. /* Add ",32" for 32-bit counters */
  11056. if (port_cntrs[i].flags & CNTR_32BIT)
  11057. sz += bit_type_32_sz;
  11058. sz++;
  11059. dd->nportcntrs++;
  11060. }
  11061. } else {
  11062. /* +1 for newline */
  11063. sz += strlen(port_cntrs[i].name) + 1;
  11064. /* Add ",32" for 32-bit counters */
  11065. if (port_cntrs[i].flags & CNTR_32BIT)
  11066. sz += bit_type_32_sz;
  11067. port_cntrs[i].offset = dd->nportcntrs;
  11068. dd->nportcntrs++;
  11069. }
  11070. }
  11071. /* allocate space for the counter names */
  11072. dd->portcntrnameslen = sz;
  11073. dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
  11074. if (!dd->portcntrnames)
  11075. goto bail;
  11076. /* fill in port cntr names */
  11077. for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
  11078. if (port_cntrs[i].flags & CNTR_DISABLED)
  11079. continue;
  11080. if (port_cntrs[i].flags & CNTR_VL) {
  11081. for (j = 0; j < C_VL_COUNT; j++) {
  11082. snprintf(name, C_MAX_NAME, "%s%d",
  11083. port_cntrs[i].name, vl_from_idx(j));
  11084. memcpy(p, name, strlen(name));
  11085. p += strlen(name);
  11086. /* Counter is 32 bits */
  11087. if (port_cntrs[i].flags & CNTR_32BIT) {
  11088. memcpy(p, bit_type_32, bit_type_32_sz);
  11089. p += bit_type_32_sz;
  11090. }
  11091. *p++ = '\n';
  11092. }
  11093. } else {
  11094. memcpy(p, port_cntrs[i].name,
  11095. strlen(port_cntrs[i].name));
  11096. p += strlen(port_cntrs[i].name);
  11097. /* Counter is 32 bits */
  11098. if (port_cntrs[i].flags & CNTR_32BIT) {
  11099. memcpy(p, bit_type_32, bit_type_32_sz);
  11100. p += bit_type_32_sz;
  11101. }
  11102. *p++ = '\n';
  11103. }
  11104. }
  11105. /* allocate per port storage for counter values */
  11106. ppd = (struct hfi1_pportdata *)(dd + 1);
  11107. for (i = 0; i < dd->num_pports; i++, ppd++) {
  11108. ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
  11109. if (!ppd->cntrs)
  11110. goto bail;
  11111. ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
  11112. if (!ppd->scntrs)
  11113. goto bail;
  11114. }
  11115. /* CPU counters need to be allocated and zeroed */
  11116. if (init_cpu_counters(dd))
  11117. goto bail;
  11118. dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
  11119. WQ_MEM_RECLAIM, dd->unit);
  11120. if (!dd->update_cntr_wq)
  11121. goto bail;
  11122. INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
  11123. mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
  11124. return 0;
  11125. bail:
  11126. free_cntrs(dd);
  11127. return -ENOMEM;
  11128. }
  11129. static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
  11130. {
  11131. switch (chip_lstate) {
  11132. default:
  11133. dd_dev_err(dd,
  11134. "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
  11135. chip_lstate);
  11136. /* fall through */
  11137. case LSTATE_DOWN:
  11138. return IB_PORT_DOWN;
  11139. case LSTATE_INIT:
  11140. return IB_PORT_INIT;
  11141. case LSTATE_ARMED:
  11142. return IB_PORT_ARMED;
  11143. case LSTATE_ACTIVE:
  11144. return IB_PORT_ACTIVE;
  11145. }
  11146. }
  11147. u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
  11148. {
  11149. /* look at the HFI meta-states only */
  11150. switch (chip_pstate & 0xf0) {
  11151. default:
  11152. dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
  11153. chip_pstate);
  11154. /* fall through */
  11155. case PLS_DISABLED:
  11156. return IB_PORTPHYSSTATE_DISABLED;
  11157. case PLS_OFFLINE:
  11158. return OPA_PORTPHYSSTATE_OFFLINE;
  11159. case PLS_POLLING:
  11160. return IB_PORTPHYSSTATE_POLLING;
  11161. case PLS_CONFIGPHY:
  11162. return IB_PORTPHYSSTATE_TRAINING;
  11163. case PLS_LINKUP:
  11164. return IB_PORTPHYSSTATE_LINKUP;
  11165. case PLS_PHYTEST:
  11166. return IB_PORTPHYSSTATE_PHY_TEST;
  11167. }
  11168. }
  11169. /* return the OPA port logical state name */
  11170. const char *opa_lstate_name(u32 lstate)
  11171. {
  11172. static const char * const port_logical_names[] = {
  11173. "PORT_NOP",
  11174. "PORT_DOWN",
  11175. "PORT_INIT",
  11176. "PORT_ARMED",
  11177. "PORT_ACTIVE",
  11178. "PORT_ACTIVE_DEFER",
  11179. };
  11180. if (lstate < ARRAY_SIZE(port_logical_names))
  11181. return port_logical_names[lstate];
  11182. return "unknown";
  11183. }
  11184. /* return the OPA port physical state name */
  11185. const char *opa_pstate_name(u32 pstate)
  11186. {
  11187. static const char * const port_physical_names[] = {
  11188. "PHYS_NOP",
  11189. "reserved1",
  11190. "PHYS_POLL",
  11191. "PHYS_DISABLED",
  11192. "PHYS_TRAINING",
  11193. "PHYS_LINKUP",
  11194. "PHYS_LINK_ERR_RECOVER",
  11195. "PHYS_PHY_TEST",
  11196. "reserved8",
  11197. "PHYS_OFFLINE",
  11198. "PHYS_GANGED",
  11199. "PHYS_TEST",
  11200. };
  11201. if (pstate < ARRAY_SIZE(port_physical_names))
  11202. return port_physical_names[pstate];
  11203. return "unknown";
  11204. }
  11205. /**
  11206. * update_statusp - Update userspace status flag
  11207. * @ppd: Port data structure
  11208. * @state: port state information
  11209. *
  11210. * Actual port status is determined by the host_link_state value
  11211. * in the ppd.
  11212. *
  11213. * host_link_state MUST be updated before updating the user space
  11214. * statusp.
  11215. */
  11216. static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
  11217. {
  11218. /*
  11219. * Set port status flags in the page mapped into userspace
  11220. * memory. Do it here to ensure a reliable state - this is
  11221. * the only function called by all state handling code.
  11222. * Always set the flags due to the fact that the cache value
  11223. * might have been changed explicitly outside of this
  11224. * function.
  11225. */
  11226. if (ppd->statusp) {
  11227. switch (state) {
  11228. case IB_PORT_DOWN:
  11229. case IB_PORT_INIT:
  11230. *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
  11231. HFI1_STATUS_IB_READY);
  11232. break;
  11233. case IB_PORT_ARMED:
  11234. *ppd->statusp |= HFI1_STATUS_IB_CONF;
  11235. break;
  11236. case IB_PORT_ACTIVE:
  11237. *ppd->statusp |= HFI1_STATUS_IB_READY;
  11238. break;
  11239. }
  11240. }
  11241. dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
  11242. opa_lstate_name(state), state);
  11243. }
  11244. /**
  11245. * wait_logical_linkstate - wait for an IB link state change to occur
  11246. * @ppd: port device
  11247. * @state: the state to wait for
  11248. * @msecs: the number of milliseconds to wait
  11249. *
  11250. * Wait up to msecs milliseconds for IB link state change to occur.
  11251. * For now, take the easy polling route.
  11252. * Returns 0 if state reached, otherwise -ETIMEDOUT.
  11253. */
  11254. static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
  11255. int msecs)
  11256. {
  11257. unsigned long timeout;
  11258. u32 new_state;
  11259. timeout = jiffies + msecs_to_jiffies(msecs);
  11260. while (1) {
  11261. new_state = chip_to_opa_lstate(ppd->dd,
  11262. read_logical_state(ppd->dd));
  11263. if (new_state == state)
  11264. break;
  11265. if (time_after(jiffies, timeout)) {
  11266. dd_dev_err(ppd->dd,
  11267. "timeout waiting for link state 0x%x\n",
  11268. state);
  11269. return -ETIMEDOUT;
  11270. }
  11271. msleep(20);
  11272. }
  11273. return 0;
  11274. }
  11275. static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
  11276. {
  11277. u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
  11278. dd_dev_info(ppd->dd,
  11279. "physical state changed to %s (0x%x), phy 0x%x\n",
  11280. opa_pstate_name(ib_pstate), ib_pstate, state);
  11281. }
  11282. /*
  11283. * Read the physical hardware link state and check if it matches host
  11284. * drivers anticipated state.
  11285. */
  11286. static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
  11287. {
  11288. u32 read_state = read_physical_state(ppd->dd);
  11289. if (read_state == state) {
  11290. log_state_transition(ppd, state);
  11291. } else {
  11292. dd_dev_err(ppd->dd,
  11293. "anticipated phy link state 0x%x, read 0x%x\n",
  11294. state, read_state);
  11295. }
  11296. }
  11297. /*
  11298. * wait_physical_linkstate - wait for an physical link state change to occur
  11299. * @ppd: port device
  11300. * @state: the state to wait for
  11301. * @msecs: the number of milliseconds to wait
  11302. *
  11303. * Wait up to msecs milliseconds for physical link state change to occur.
  11304. * Returns 0 if state reached, otherwise -ETIMEDOUT.
  11305. */
  11306. static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
  11307. int msecs)
  11308. {
  11309. u32 read_state;
  11310. unsigned long timeout;
  11311. timeout = jiffies + msecs_to_jiffies(msecs);
  11312. while (1) {
  11313. read_state = read_physical_state(ppd->dd);
  11314. if (read_state == state)
  11315. break;
  11316. if (time_after(jiffies, timeout)) {
  11317. dd_dev_err(ppd->dd,
  11318. "timeout waiting for phy link state 0x%x\n",
  11319. state);
  11320. return -ETIMEDOUT;
  11321. }
  11322. usleep_range(1950, 2050); /* sleep 2ms-ish */
  11323. }
  11324. log_state_transition(ppd, state);
  11325. return 0;
  11326. }
  11327. /*
  11328. * wait_phys_link_offline_quiet_substates - wait for any offline substate
  11329. * @ppd: port device
  11330. * @msecs: the number of milliseconds to wait
  11331. *
  11332. * Wait up to msecs milliseconds for any offline physical link
  11333. * state change to occur.
  11334. * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
  11335. */
  11336. static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
  11337. int msecs)
  11338. {
  11339. u32 read_state;
  11340. unsigned long timeout;
  11341. timeout = jiffies + msecs_to_jiffies(msecs);
  11342. while (1) {
  11343. read_state = read_physical_state(ppd->dd);
  11344. if ((read_state & 0xF0) == PLS_OFFLINE)
  11345. break;
  11346. if (time_after(jiffies, timeout)) {
  11347. dd_dev_err(ppd->dd,
  11348. "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
  11349. read_state, msecs);
  11350. return -ETIMEDOUT;
  11351. }
  11352. usleep_range(1950, 2050); /* sleep 2ms-ish */
  11353. }
  11354. log_state_transition(ppd, read_state);
  11355. return read_state;
  11356. }
  11357. /*
  11358. * wait_phys_link_out_of_offline - wait for any out of offline state
  11359. * @ppd: port device
  11360. * @msecs: the number of milliseconds to wait
  11361. *
  11362. * Wait up to msecs milliseconds for any out of offline physical link
  11363. * state change to occur.
  11364. * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
  11365. */
  11366. static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
  11367. int msecs)
  11368. {
  11369. u32 read_state;
  11370. unsigned long timeout;
  11371. timeout = jiffies + msecs_to_jiffies(msecs);
  11372. while (1) {
  11373. read_state = read_physical_state(ppd->dd);
  11374. if ((read_state & 0xF0) != PLS_OFFLINE)
  11375. break;
  11376. if (time_after(jiffies, timeout)) {
  11377. dd_dev_err(ppd->dd,
  11378. "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
  11379. read_state, msecs);
  11380. return -ETIMEDOUT;
  11381. }
  11382. usleep_range(1950, 2050); /* sleep 2ms-ish */
  11383. }
  11384. log_state_transition(ppd, read_state);
  11385. return read_state;
  11386. }
  11387. #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
  11388. (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
  11389. #define SET_STATIC_RATE_CONTROL_SMASK(r) \
  11390. (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
  11391. void hfi1_init_ctxt(struct send_context *sc)
  11392. {
  11393. if (sc) {
  11394. struct hfi1_devdata *dd = sc->dd;
  11395. u64 reg;
  11396. u8 set = (sc->type == SC_USER ?
  11397. HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
  11398. HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
  11399. reg = read_kctxt_csr(dd, sc->hw_context,
  11400. SEND_CTXT_CHECK_ENABLE);
  11401. if (set)
  11402. CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
  11403. else
  11404. SET_STATIC_RATE_CONTROL_SMASK(reg);
  11405. write_kctxt_csr(dd, sc->hw_context,
  11406. SEND_CTXT_CHECK_ENABLE, reg);
  11407. }
  11408. }
  11409. int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
  11410. {
  11411. int ret = 0;
  11412. u64 reg;
  11413. if (dd->icode != ICODE_RTL_SILICON) {
  11414. if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
  11415. dd_dev_info(dd, "%s: tempsense not supported by HW\n",
  11416. __func__);
  11417. return -EINVAL;
  11418. }
  11419. reg = read_csr(dd, ASIC_STS_THERM);
  11420. temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
  11421. ASIC_STS_THERM_CURR_TEMP_MASK);
  11422. temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
  11423. ASIC_STS_THERM_LO_TEMP_MASK);
  11424. temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
  11425. ASIC_STS_THERM_HI_TEMP_MASK);
  11426. temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
  11427. ASIC_STS_THERM_CRIT_TEMP_MASK);
  11428. /* triggers is a 3-bit value - 1 bit per trigger. */
  11429. temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
  11430. return ret;
  11431. }
  11432. /**
  11433. * get_int_mask - get 64 bit int mask
  11434. * @dd - the devdata
  11435. * @i - the csr (relative to CCE_INT_MASK)
  11436. *
  11437. * Returns the mask with the urgent interrupt mask
  11438. * bit clear for kernel receive contexts.
  11439. */
  11440. static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
  11441. {
  11442. u64 mask = U64_MAX; /* default to no change */
  11443. if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
  11444. int j = (i - (IS_RCVURGENT_START / 64)) * 64;
  11445. int k = !j ? IS_RCVURGENT_START % 64 : 0;
  11446. if (j)
  11447. j -= IS_RCVURGENT_START % 64;
  11448. /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
  11449. for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
  11450. /* convert to bit in mask and clear */
  11451. mask &= ~BIT_ULL(k);
  11452. }
  11453. return mask;
  11454. }
  11455. /* ========================================================================= */
  11456. /*
  11457. * Enable/disable chip from delivering interrupts.
  11458. */
  11459. void set_intr_state(struct hfi1_devdata *dd, u32 enable)
  11460. {
  11461. int i;
  11462. /*
  11463. * In HFI, the mask needs to be 1 to allow interrupts.
  11464. */
  11465. if (enable) {
  11466. /* enable all interrupts but urgent on kernel contexts */
  11467. for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
  11468. u64 mask = get_int_mask(dd, i);
  11469. write_csr(dd, CCE_INT_MASK + (8 * i), mask);
  11470. }
  11471. init_qsfp_int(dd);
  11472. } else {
  11473. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  11474. write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
  11475. }
  11476. }
  11477. /*
  11478. * Clear all interrupt sources on the chip.
  11479. */
  11480. static void clear_all_interrupts(struct hfi1_devdata *dd)
  11481. {
  11482. int i;
  11483. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  11484. write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
  11485. write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
  11486. write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
  11487. write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
  11488. write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
  11489. write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
  11490. write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
  11491. write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
  11492. for (i = 0; i < chip_send_contexts(dd); i++)
  11493. write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
  11494. for (i = 0; i < chip_sdma_engines(dd); i++)
  11495. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
  11496. write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
  11497. write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
  11498. write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
  11499. }
  11500. /**
  11501. * hfi1_clean_up_interrupts() - Free all IRQ resources
  11502. * @dd: valid device data data structure
  11503. *
  11504. * Free the MSIx and assoicated PCI resources, if they have been allocated.
  11505. */
  11506. void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
  11507. {
  11508. int i;
  11509. struct hfi1_msix_entry *me = dd->msix_entries;
  11510. /* remove irqs - must happen before disabling/turning off */
  11511. for (i = 0; i < dd->num_msix_entries; i++, me++) {
  11512. if (!me->arg) /* => no irq, no affinity */
  11513. continue;
  11514. hfi1_put_irq_affinity(dd, me);
  11515. pci_free_irq(dd->pcidev, i, me->arg);
  11516. }
  11517. /* clean structures */
  11518. kfree(dd->msix_entries);
  11519. dd->msix_entries = NULL;
  11520. dd->num_msix_entries = 0;
  11521. pci_free_irq_vectors(dd->pcidev);
  11522. }
  11523. /*
  11524. * Remap the interrupt source from the general handler to the given MSI-X
  11525. * interrupt.
  11526. */
  11527. static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
  11528. {
  11529. u64 reg;
  11530. int m, n;
  11531. /* clear from the handled mask of the general interrupt */
  11532. m = isrc / 64;
  11533. n = isrc % 64;
  11534. if (likely(m < CCE_NUM_INT_CSRS)) {
  11535. dd->gi_mask[m] &= ~((u64)1 << n);
  11536. } else {
  11537. dd_dev_err(dd, "remap interrupt err\n");
  11538. return;
  11539. }
  11540. /* direct the chip source to the given MSI-X interrupt */
  11541. m = isrc / 8;
  11542. n = isrc % 8;
  11543. reg = read_csr(dd, CCE_INT_MAP + (8 * m));
  11544. reg &= ~((u64)0xff << (8 * n));
  11545. reg |= ((u64)msix_intr & 0xff) << (8 * n);
  11546. write_csr(dd, CCE_INT_MAP + (8 * m), reg);
  11547. }
  11548. static void remap_sdma_interrupts(struct hfi1_devdata *dd,
  11549. int engine, int msix_intr)
  11550. {
  11551. /*
  11552. * SDMA engine interrupt sources grouped by type, rather than
  11553. * engine. Per-engine interrupts are as follows:
  11554. * SDMA
  11555. * SDMAProgress
  11556. * SDMAIdle
  11557. */
  11558. remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
  11559. msix_intr);
  11560. remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
  11561. msix_intr);
  11562. remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
  11563. msix_intr);
  11564. }
  11565. static int request_msix_irqs(struct hfi1_devdata *dd)
  11566. {
  11567. int first_general, last_general;
  11568. int first_sdma, last_sdma;
  11569. int first_rx, last_rx;
  11570. int i, ret = 0;
  11571. /* calculate the ranges we are going to use */
  11572. first_general = 0;
  11573. last_general = first_general + 1;
  11574. first_sdma = last_general;
  11575. last_sdma = first_sdma + dd->num_sdma;
  11576. first_rx = last_sdma;
  11577. last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
  11578. /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
  11579. dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
  11580. /*
  11581. * Sanity check - the code expects all SDMA chip source
  11582. * interrupts to be in the same CSR, starting at bit 0. Verify
  11583. * that this is true by checking the bit location of the start.
  11584. */
  11585. BUILD_BUG_ON(IS_SDMA_START % 64);
  11586. for (i = 0; i < dd->num_msix_entries; i++) {
  11587. struct hfi1_msix_entry *me = &dd->msix_entries[i];
  11588. const char *err_info;
  11589. irq_handler_t handler;
  11590. irq_handler_t thread = NULL;
  11591. void *arg = NULL;
  11592. int idx;
  11593. struct hfi1_ctxtdata *rcd = NULL;
  11594. struct sdma_engine *sde = NULL;
  11595. char name[MAX_NAME_SIZE];
  11596. /* obtain the arguments to pci_request_irq */
  11597. if (first_general <= i && i < last_general) {
  11598. idx = i - first_general;
  11599. handler = general_interrupt;
  11600. arg = dd;
  11601. snprintf(name, sizeof(name),
  11602. DRIVER_NAME "_%d", dd->unit);
  11603. err_info = "general";
  11604. me->type = IRQ_GENERAL;
  11605. } else if (first_sdma <= i && i < last_sdma) {
  11606. idx = i - first_sdma;
  11607. sde = &dd->per_sdma[idx];
  11608. handler = sdma_interrupt;
  11609. arg = sde;
  11610. snprintf(name, sizeof(name),
  11611. DRIVER_NAME "_%d sdma%d", dd->unit, idx);
  11612. err_info = "sdma";
  11613. remap_sdma_interrupts(dd, idx, i);
  11614. me->type = IRQ_SDMA;
  11615. } else if (first_rx <= i && i < last_rx) {
  11616. idx = i - first_rx;
  11617. rcd = hfi1_rcd_get_by_index_safe(dd, idx);
  11618. if (rcd) {
  11619. /*
  11620. * Set the interrupt register and mask for this
  11621. * context's interrupt.
  11622. */
  11623. rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
  11624. rcd->imask = ((u64)1) <<
  11625. ((IS_RCVAVAIL_START + idx) % 64);
  11626. handler = receive_context_interrupt;
  11627. thread = receive_context_thread;
  11628. arg = rcd;
  11629. snprintf(name, sizeof(name),
  11630. DRIVER_NAME "_%d kctxt%d",
  11631. dd->unit, idx);
  11632. err_info = "receive context";
  11633. remap_intr(dd, IS_RCVAVAIL_START + idx, i);
  11634. me->type = IRQ_RCVCTXT;
  11635. rcd->msix_intr = i;
  11636. hfi1_rcd_put(rcd);
  11637. }
  11638. } else {
  11639. /* not in our expected range - complain, then
  11640. * ignore it
  11641. */
  11642. dd_dev_err(dd,
  11643. "Unexpected extra MSI-X interrupt %d\n", i);
  11644. continue;
  11645. }
  11646. /* no argument, no interrupt */
  11647. if (!arg)
  11648. continue;
  11649. /* make sure the name is terminated */
  11650. name[sizeof(name) - 1] = 0;
  11651. me->irq = pci_irq_vector(dd->pcidev, i);
  11652. ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
  11653. name);
  11654. if (ret) {
  11655. dd_dev_err(dd,
  11656. "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
  11657. err_info, me->irq, idx, ret);
  11658. return ret;
  11659. }
  11660. /*
  11661. * assign arg after pci_request_irq call, so it will be
  11662. * cleaned up
  11663. */
  11664. me->arg = arg;
  11665. ret = hfi1_get_irq_affinity(dd, me);
  11666. if (ret)
  11667. dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
  11668. }
  11669. return ret;
  11670. }
  11671. void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
  11672. {
  11673. int i;
  11674. for (i = 0; i < dd->vnic.num_ctxt; i++) {
  11675. struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
  11676. struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
  11677. synchronize_irq(me->irq);
  11678. }
  11679. }
  11680. void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
  11681. {
  11682. struct hfi1_devdata *dd = rcd->dd;
  11683. struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
  11684. if (!me->arg) /* => no irq, no affinity */
  11685. return;
  11686. hfi1_put_irq_affinity(dd, me);
  11687. pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
  11688. me->arg = NULL;
  11689. }
  11690. void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
  11691. {
  11692. struct hfi1_devdata *dd = rcd->dd;
  11693. struct hfi1_msix_entry *me;
  11694. int idx = rcd->ctxt;
  11695. void *arg = rcd;
  11696. int ret;
  11697. rcd->msix_intr = dd->vnic.msix_idx++;
  11698. me = &dd->msix_entries[rcd->msix_intr];
  11699. /*
  11700. * Set the interrupt register and mask for this
  11701. * context's interrupt.
  11702. */
  11703. rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
  11704. rcd->imask = ((u64)1) <<
  11705. ((IS_RCVAVAIL_START + idx) % 64);
  11706. me->type = IRQ_RCVCTXT;
  11707. me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
  11708. remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
  11709. ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
  11710. receive_context_interrupt,
  11711. receive_context_thread, arg,
  11712. DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
  11713. if (ret) {
  11714. dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
  11715. me->irq, idx, ret);
  11716. return;
  11717. }
  11718. /*
  11719. * assign arg after pci_request_irq call, so it will be
  11720. * cleaned up
  11721. */
  11722. me->arg = arg;
  11723. ret = hfi1_get_irq_affinity(dd, me);
  11724. if (ret) {
  11725. dd_dev_err(dd,
  11726. "unable to pin IRQ %d\n", ret);
  11727. pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
  11728. }
  11729. }
  11730. /*
  11731. * Set the general handler to accept all interrupts, remap all
  11732. * chip interrupts back to MSI-X 0.
  11733. */
  11734. static void reset_interrupts(struct hfi1_devdata *dd)
  11735. {
  11736. int i;
  11737. /* all interrupts handled by the general handler */
  11738. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  11739. dd->gi_mask[i] = ~(u64)0;
  11740. /* all chip interrupts map to MSI-X 0 */
  11741. for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
  11742. write_csr(dd, CCE_INT_MAP + (8 * i), 0);
  11743. }
  11744. static int set_up_interrupts(struct hfi1_devdata *dd)
  11745. {
  11746. u32 total;
  11747. int ret, request;
  11748. /*
  11749. * Interrupt count:
  11750. * 1 general, "slow path" interrupt (includes the SDMA engines
  11751. * slow source, SDMACleanupDone)
  11752. * N interrupts - one per used SDMA engine
  11753. * M interrupt - one per kernel receive context
  11754. * V interrupt - one for each VNIC context
  11755. */
  11756. total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
  11757. /* ask for MSI-X interrupts */
  11758. request = request_msix(dd, total);
  11759. if (request < 0) {
  11760. ret = request;
  11761. goto fail;
  11762. } else {
  11763. dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
  11764. GFP_KERNEL);
  11765. if (!dd->msix_entries) {
  11766. ret = -ENOMEM;
  11767. goto fail;
  11768. }
  11769. /* using MSI-X */
  11770. dd->num_msix_entries = total;
  11771. dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
  11772. }
  11773. /* mask all interrupts */
  11774. set_intr_state(dd, 0);
  11775. /* clear all pending interrupts */
  11776. clear_all_interrupts(dd);
  11777. /* reset general handler mask, chip MSI-X mappings */
  11778. reset_interrupts(dd);
  11779. ret = request_msix_irqs(dd);
  11780. if (ret)
  11781. goto fail;
  11782. return 0;
  11783. fail:
  11784. hfi1_clean_up_interrupts(dd);
  11785. return ret;
  11786. }
  11787. /*
  11788. * Set up context values in dd. Sets:
  11789. *
  11790. * num_rcv_contexts - number of contexts being used
  11791. * n_krcv_queues - number of kernel contexts
  11792. * first_dyn_alloc_ctxt - first dynamically allocated context
  11793. * in array of contexts
  11794. * freectxts - number of free user contexts
  11795. * num_send_contexts - number of PIO send contexts being used
  11796. * num_vnic_contexts - number of contexts reserved for VNIC
  11797. */
  11798. static int set_up_context_variables(struct hfi1_devdata *dd)
  11799. {
  11800. unsigned long num_kernel_contexts;
  11801. u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
  11802. int total_contexts;
  11803. int ret;
  11804. unsigned ngroups;
  11805. int rmt_count;
  11806. int user_rmt_reduced;
  11807. u32 n_usr_ctxts;
  11808. u32 send_contexts = chip_send_contexts(dd);
  11809. u32 rcv_contexts = chip_rcv_contexts(dd);
  11810. /*
  11811. * Kernel receive contexts:
  11812. * - Context 0 - control context (VL15/multicast/error)
  11813. * - Context 1 - first kernel context
  11814. * - Context 2 - second kernel context
  11815. * ...
  11816. */
  11817. if (n_krcvqs)
  11818. /*
  11819. * n_krcvqs is the sum of module parameter kernel receive
  11820. * contexts, krcvqs[]. It does not include the control
  11821. * context, so add that.
  11822. */
  11823. num_kernel_contexts = n_krcvqs + 1;
  11824. else
  11825. num_kernel_contexts = DEFAULT_KRCVQS + 1;
  11826. /*
  11827. * Every kernel receive context needs an ACK send context.
  11828. * one send context is allocated for each VL{0-7} and VL15
  11829. */
  11830. if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
  11831. dd_dev_err(dd,
  11832. "Reducing # kernel rcv contexts to: %d, from %lu\n",
  11833. send_contexts - num_vls - 1,
  11834. num_kernel_contexts);
  11835. num_kernel_contexts = send_contexts - num_vls - 1;
  11836. }
  11837. /* Accommodate VNIC contexts if possible */
  11838. if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
  11839. dd_dev_err(dd, "No receive contexts available for VNIC\n");
  11840. num_vnic_contexts = 0;
  11841. }
  11842. total_contexts = num_kernel_contexts + num_vnic_contexts;
  11843. /*
  11844. * User contexts:
  11845. * - default to 1 user context per real (non-HT) CPU core if
  11846. * num_user_contexts is negative
  11847. */
  11848. if (num_user_contexts < 0)
  11849. n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
  11850. else
  11851. n_usr_ctxts = num_user_contexts;
  11852. /*
  11853. * Adjust the counts given a global max.
  11854. */
  11855. if (total_contexts + n_usr_ctxts > rcv_contexts) {
  11856. dd_dev_err(dd,
  11857. "Reducing # user receive contexts to: %d, from %u\n",
  11858. rcv_contexts - total_contexts,
  11859. n_usr_ctxts);
  11860. /* recalculate */
  11861. n_usr_ctxts = rcv_contexts - total_contexts;
  11862. }
  11863. /*
  11864. * The RMT entries are currently allocated as shown below:
  11865. * 1. QOS (0 to 128 entries);
  11866. * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
  11867. * 3. VNIC (num_vnic_contexts).
  11868. * It should be noted that PSM FECN oversubscribe num_vnic_contexts
  11869. * entries of RMT because both VNIC and PSM could allocate any receive
  11870. * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
  11871. * and PSM FECN must reserve an RMT entry for each possible PSM receive
  11872. * context.
  11873. */
  11874. rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
  11875. if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
  11876. user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
  11877. dd_dev_err(dd,
  11878. "RMT size is reducing the number of user receive contexts from %u to %d\n",
  11879. n_usr_ctxts,
  11880. user_rmt_reduced);
  11881. /* recalculate */
  11882. n_usr_ctxts = user_rmt_reduced;
  11883. }
  11884. total_contexts += n_usr_ctxts;
  11885. /* the first N are kernel contexts, the rest are user/vnic contexts */
  11886. dd->num_rcv_contexts = total_contexts;
  11887. dd->n_krcv_queues = num_kernel_contexts;
  11888. dd->first_dyn_alloc_ctxt = num_kernel_contexts;
  11889. dd->num_vnic_contexts = num_vnic_contexts;
  11890. dd->num_user_contexts = n_usr_ctxts;
  11891. dd->freectxts = n_usr_ctxts;
  11892. dd_dev_info(dd,
  11893. "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
  11894. rcv_contexts,
  11895. (int)dd->num_rcv_contexts,
  11896. (int)dd->n_krcv_queues,
  11897. dd->num_vnic_contexts,
  11898. dd->num_user_contexts);
  11899. /*
  11900. * Receive array allocation:
  11901. * All RcvArray entries are divided into groups of 8. This
  11902. * is required by the hardware and will speed up writes to
  11903. * consecutive entries by using write-combining of the entire
  11904. * cacheline.
  11905. *
  11906. * The number of groups are evenly divided among all contexts.
  11907. * any left over groups will be given to the first N user
  11908. * contexts.
  11909. */
  11910. dd->rcv_entries.group_size = RCV_INCREMENT;
  11911. ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
  11912. dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
  11913. dd->rcv_entries.nctxt_extra = ngroups -
  11914. (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
  11915. dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
  11916. dd->rcv_entries.ngroups,
  11917. dd->rcv_entries.nctxt_extra);
  11918. if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
  11919. MAX_EAGER_ENTRIES * 2) {
  11920. dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
  11921. dd->rcv_entries.group_size;
  11922. dd_dev_info(dd,
  11923. "RcvArray group count too high, change to %u\n",
  11924. dd->rcv_entries.ngroups);
  11925. dd->rcv_entries.nctxt_extra = 0;
  11926. }
  11927. /*
  11928. * PIO send contexts
  11929. */
  11930. ret = init_sc_pools_and_sizes(dd);
  11931. if (ret >= 0) { /* success */
  11932. dd->num_send_contexts = ret;
  11933. dd_dev_info(
  11934. dd,
  11935. "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
  11936. send_contexts,
  11937. dd->num_send_contexts,
  11938. dd->sc_sizes[SC_KERNEL].count,
  11939. dd->sc_sizes[SC_ACK].count,
  11940. dd->sc_sizes[SC_USER].count,
  11941. dd->sc_sizes[SC_VL15].count);
  11942. ret = 0; /* success */
  11943. }
  11944. return ret;
  11945. }
  11946. /*
  11947. * Set the device/port partition key table. The MAD code
  11948. * will ensure that, at least, the partial management
  11949. * partition key is present in the table.
  11950. */
  11951. static void set_partition_keys(struct hfi1_pportdata *ppd)
  11952. {
  11953. struct hfi1_devdata *dd = ppd->dd;
  11954. u64 reg = 0;
  11955. int i;
  11956. dd_dev_info(dd, "Setting partition keys\n");
  11957. for (i = 0; i < hfi1_get_npkeys(dd); i++) {
  11958. reg |= (ppd->pkeys[i] &
  11959. RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
  11960. ((i % 4) *
  11961. RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
  11962. /* Each register holds 4 PKey values. */
  11963. if ((i % 4) == 3) {
  11964. write_csr(dd, RCV_PARTITION_KEY +
  11965. ((i - 3) * 2), reg);
  11966. reg = 0;
  11967. }
  11968. }
  11969. /* Always enable HW pkeys check when pkeys table is set */
  11970. add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
  11971. }
  11972. /*
  11973. * These CSRs and memories are uninitialized on reset and must be
  11974. * written before reading to set the ECC/parity bits.
  11975. *
  11976. * NOTE: All user context CSRs that are not mmaped write-only
  11977. * (e.g. the TID flows) must be initialized even if the driver never
  11978. * reads them.
  11979. */
  11980. static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
  11981. {
  11982. int i, j;
  11983. /* CceIntMap */
  11984. for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
  11985. write_csr(dd, CCE_INT_MAP + (8 * i), 0);
  11986. /* SendCtxtCreditReturnAddr */
  11987. for (i = 0; i < chip_send_contexts(dd); i++)
  11988. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
  11989. /* PIO Send buffers */
  11990. /* SDMA Send buffers */
  11991. /*
  11992. * These are not normally read, and (presently) have no method
  11993. * to be read, so are not pre-initialized
  11994. */
  11995. /* RcvHdrAddr */
  11996. /* RcvHdrTailAddr */
  11997. /* RcvTidFlowTable */
  11998. for (i = 0; i < chip_rcv_contexts(dd); i++) {
  11999. write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
  12000. write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
  12001. for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
  12002. write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
  12003. }
  12004. /* RcvArray */
  12005. for (i = 0; i < chip_rcv_array_count(dd); i++)
  12006. hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
  12007. /* RcvQPMapTable */
  12008. for (i = 0; i < 32; i++)
  12009. write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
  12010. }
  12011. /*
  12012. * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
  12013. */
  12014. static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
  12015. u64 ctrl_bits)
  12016. {
  12017. unsigned long timeout;
  12018. u64 reg;
  12019. /* is the condition present? */
  12020. reg = read_csr(dd, CCE_STATUS);
  12021. if ((reg & status_bits) == 0)
  12022. return;
  12023. /* clear the condition */
  12024. write_csr(dd, CCE_CTRL, ctrl_bits);
  12025. /* wait for the condition to clear */
  12026. timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
  12027. while (1) {
  12028. reg = read_csr(dd, CCE_STATUS);
  12029. if ((reg & status_bits) == 0)
  12030. return;
  12031. if (time_after(jiffies, timeout)) {
  12032. dd_dev_err(dd,
  12033. "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
  12034. status_bits, reg & status_bits);
  12035. return;
  12036. }
  12037. udelay(1);
  12038. }
  12039. }
  12040. /* set CCE CSRs to chip reset defaults */
  12041. static void reset_cce_csrs(struct hfi1_devdata *dd)
  12042. {
  12043. int i;
  12044. /* CCE_REVISION read-only */
  12045. /* CCE_REVISION2 read-only */
  12046. /* CCE_CTRL - bits clear automatically */
  12047. /* CCE_STATUS read-only, use CceCtrl to clear */
  12048. clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
  12049. clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
  12050. clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
  12051. for (i = 0; i < CCE_NUM_SCRATCH; i++)
  12052. write_csr(dd, CCE_SCRATCH + (8 * i), 0);
  12053. /* CCE_ERR_STATUS read-only */
  12054. write_csr(dd, CCE_ERR_MASK, 0);
  12055. write_csr(dd, CCE_ERR_CLEAR, ~0ull);
  12056. /* CCE_ERR_FORCE leave alone */
  12057. for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
  12058. write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
  12059. write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
  12060. /* CCE_PCIE_CTRL leave alone */
  12061. for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
  12062. write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
  12063. write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
  12064. CCE_MSIX_TABLE_UPPER_RESETCSR);
  12065. }
  12066. for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
  12067. /* CCE_MSIX_PBA read-only */
  12068. write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
  12069. write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
  12070. }
  12071. for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
  12072. write_csr(dd, CCE_INT_MAP, 0);
  12073. for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
  12074. /* CCE_INT_STATUS read-only */
  12075. write_csr(dd, CCE_INT_MASK + (8 * i), 0);
  12076. write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
  12077. /* CCE_INT_FORCE leave alone */
  12078. /* CCE_INT_BLOCKED read-only */
  12079. }
  12080. for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
  12081. write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
  12082. }
  12083. /* set MISC CSRs to chip reset defaults */
  12084. static void reset_misc_csrs(struct hfi1_devdata *dd)
  12085. {
  12086. int i;
  12087. for (i = 0; i < 32; i++) {
  12088. write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
  12089. write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
  12090. write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
  12091. }
  12092. /*
  12093. * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
  12094. * only be written 128-byte chunks
  12095. */
  12096. /* init RSA engine to clear lingering errors */
  12097. write_csr(dd, MISC_CFG_RSA_CMD, 1);
  12098. write_csr(dd, MISC_CFG_RSA_MU, 0);
  12099. write_csr(dd, MISC_CFG_FW_CTRL, 0);
  12100. /* MISC_STS_8051_DIGEST read-only */
  12101. /* MISC_STS_SBM_DIGEST read-only */
  12102. /* MISC_STS_PCIE_DIGEST read-only */
  12103. /* MISC_STS_FAB_DIGEST read-only */
  12104. /* MISC_ERR_STATUS read-only */
  12105. write_csr(dd, MISC_ERR_MASK, 0);
  12106. write_csr(dd, MISC_ERR_CLEAR, ~0ull);
  12107. /* MISC_ERR_FORCE leave alone */
  12108. }
  12109. /* set TXE CSRs to chip reset defaults */
  12110. static void reset_txe_csrs(struct hfi1_devdata *dd)
  12111. {
  12112. int i;
  12113. /*
  12114. * TXE Kernel CSRs
  12115. */
  12116. write_csr(dd, SEND_CTRL, 0);
  12117. __cm_reset(dd, 0); /* reset CM internal state */
  12118. /* SEND_CONTEXTS read-only */
  12119. /* SEND_DMA_ENGINES read-only */
  12120. /* SEND_PIO_MEM_SIZE read-only */
  12121. /* SEND_DMA_MEM_SIZE read-only */
  12122. write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
  12123. pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
  12124. /* SEND_PIO_ERR_STATUS read-only */
  12125. write_csr(dd, SEND_PIO_ERR_MASK, 0);
  12126. write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
  12127. /* SEND_PIO_ERR_FORCE leave alone */
  12128. /* SEND_DMA_ERR_STATUS read-only */
  12129. write_csr(dd, SEND_DMA_ERR_MASK, 0);
  12130. write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
  12131. /* SEND_DMA_ERR_FORCE leave alone */
  12132. /* SEND_EGRESS_ERR_STATUS read-only */
  12133. write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
  12134. write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
  12135. /* SEND_EGRESS_ERR_FORCE leave alone */
  12136. write_csr(dd, SEND_BTH_QP, 0);
  12137. write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
  12138. write_csr(dd, SEND_SC2VLT0, 0);
  12139. write_csr(dd, SEND_SC2VLT1, 0);
  12140. write_csr(dd, SEND_SC2VLT2, 0);
  12141. write_csr(dd, SEND_SC2VLT3, 0);
  12142. write_csr(dd, SEND_LEN_CHECK0, 0);
  12143. write_csr(dd, SEND_LEN_CHECK1, 0);
  12144. /* SEND_ERR_STATUS read-only */
  12145. write_csr(dd, SEND_ERR_MASK, 0);
  12146. write_csr(dd, SEND_ERR_CLEAR, ~0ull);
  12147. /* SEND_ERR_FORCE read-only */
  12148. for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
  12149. write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
  12150. for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
  12151. write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
  12152. for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
  12153. write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
  12154. for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
  12155. write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
  12156. for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
  12157. write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
  12158. write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
  12159. write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
  12160. /* SEND_CM_CREDIT_USED_STATUS read-only */
  12161. write_csr(dd, SEND_CM_TIMER_CTRL, 0);
  12162. write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
  12163. write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
  12164. write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
  12165. write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
  12166. for (i = 0; i < TXE_NUM_DATA_VL; i++)
  12167. write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
  12168. write_csr(dd, SEND_CM_CREDIT_VL15, 0);
  12169. /* SEND_CM_CREDIT_USED_VL read-only */
  12170. /* SEND_CM_CREDIT_USED_VL15 read-only */
  12171. /* SEND_EGRESS_CTXT_STATUS read-only */
  12172. /* SEND_EGRESS_SEND_DMA_STATUS read-only */
  12173. write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
  12174. /* SEND_EGRESS_ERR_INFO read-only */
  12175. /* SEND_EGRESS_ERR_SOURCE read-only */
  12176. /*
  12177. * TXE Per-Context CSRs
  12178. */
  12179. for (i = 0; i < chip_send_contexts(dd); i++) {
  12180. write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
  12181. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
  12182. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
  12183. write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
  12184. write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
  12185. write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
  12186. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
  12187. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
  12188. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
  12189. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
  12190. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
  12191. write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
  12192. }
  12193. /*
  12194. * TXE Per-SDMA CSRs
  12195. */
  12196. for (i = 0; i < chip_sdma_engines(dd); i++) {
  12197. write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
  12198. /* SEND_DMA_STATUS read-only */
  12199. write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
  12200. write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
  12201. write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
  12202. /* SEND_DMA_HEAD read-only */
  12203. write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
  12204. write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
  12205. /* SEND_DMA_IDLE_CNT read-only */
  12206. write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
  12207. write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
  12208. /* SEND_DMA_DESC_FETCHED_CNT read-only */
  12209. /* SEND_DMA_ENG_ERR_STATUS read-only */
  12210. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
  12211. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
  12212. /* SEND_DMA_ENG_ERR_FORCE leave alone */
  12213. write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
  12214. write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
  12215. write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
  12216. write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
  12217. write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
  12218. write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
  12219. write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
  12220. }
  12221. }
  12222. /*
  12223. * Expect on entry:
  12224. * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
  12225. */
  12226. static void init_rbufs(struct hfi1_devdata *dd)
  12227. {
  12228. u64 reg;
  12229. int count;
  12230. /*
  12231. * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
  12232. * clear.
  12233. */
  12234. count = 0;
  12235. while (1) {
  12236. reg = read_csr(dd, RCV_STATUS);
  12237. if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
  12238. | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
  12239. break;
  12240. /*
  12241. * Give up after 1ms - maximum wait time.
  12242. *
  12243. * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
  12244. * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
  12245. * 136 KB / (66% * 250MB/s) = 844us
  12246. */
  12247. if (count++ > 500) {
  12248. dd_dev_err(dd,
  12249. "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
  12250. __func__, reg);
  12251. break;
  12252. }
  12253. udelay(2); /* do not busy-wait the CSR */
  12254. }
  12255. /* start the init - expect RcvCtrl to be 0 */
  12256. write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
  12257. /*
  12258. * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
  12259. * period after the write before RcvStatus.RxRbufInitDone is valid.
  12260. * The delay in the first run through the loop below is sufficient and
  12261. * required before the first read of RcvStatus.RxRbufInintDone.
  12262. */
  12263. read_csr(dd, RCV_CTRL);
  12264. /* wait for the init to finish */
  12265. count = 0;
  12266. while (1) {
  12267. /* delay is required first time through - see above */
  12268. udelay(2); /* do not busy-wait the CSR */
  12269. reg = read_csr(dd, RCV_STATUS);
  12270. if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
  12271. break;
  12272. /* give up after 100us - slowest possible at 33MHz is 73us */
  12273. if (count++ > 50) {
  12274. dd_dev_err(dd,
  12275. "%s: RcvStatus.RxRbufInit not set, continuing\n",
  12276. __func__);
  12277. break;
  12278. }
  12279. }
  12280. }
  12281. /* set RXE CSRs to chip reset defaults */
  12282. static void reset_rxe_csrs(struct hfi1_devdata *dd)
  12283. {
  12284. int i, j;
  12285. /*
  12286. * RXE Kernel CSRs
  12287. */
  12288. write_csr(dd, RCV_CTRL, 0);
  12289. init_rbufs(dd);
  12290. /* RCV_STATUS read-only */
  12291. /* RCV_CONTEXTS read-only */
  12292. /* RCV_ARRAY_CNT read-only */
  12293. /* RCV_BUF_SIZE read-only */
  12294. write_csr(dd, RCV_BTH_QP, 0);
  12295. write_csr(dd, RCV_MULTICAST, 0);
  12296. write_csr(dd, RCV_BYPASS, 0);
  12297. write_csr(dd, RCV_VL15, 0);
  12298. /* this is a clear-down */
  12299. write_csr(dd, RCV_ERR_INFO,
  12300. RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
  12301. /* RCV_ERR_STATUS read-only */
  12302. write_csr(dd, RCV_ERR_MASK, 0);
  12303. write_csr(dd, RCV_ERR_CLEAR, ~0ull);
  12304. /* RCV_ERR_FORCE leave alone */
  12305. for (i = 0; i < 32; i++)
  12306. write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
  12307. for (i = 0; i < 4; i++)
  12308. write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
  12309. for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
  12310. write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
  12311. for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
  12312. write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
  12313. for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
  12314. clear_rsm_rule(dd, i);
  12315. for (i = 0; i < 32; i++)
  12316. write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
  12317. /*
  12318. * RXE Kernel and User Per-Context CSRs
  12319. */
  12320. for (i = 0; i < chip_rcv_contexts(dd); i++) {
  12321. /* kernel */
  12322. write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
  12323. /* RCV_CTXT_STATUS read-only */
  12324. write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
  12325. write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
  12326. write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
  12327. write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
  12328. write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
  12329. write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
  12330. write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
  12331. write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
  12332. write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
  12333. write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
  12334. /* user */
  12335. /* RCV_HDR_TAIL read-only */
  12336. write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
  12337. /* RCV_EGR_INDEX_TAIL read-only */
  12338. write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
  12339. /* RCV_EGR_OFFSET_TAIL read-only */
  12340. for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
  12341. write_uctxt_csr(dd, i,
  12342. RCV_TID_FLOW_TABLE + (8 * j), 0);
  12343. }
  12344. }
  12345. }
  12346. /*
  12347. * Set sc2vl tables.
  12348. *
  12349. * They power on to zeros, so to avoid send context errors
  12350. * they need to be set:
  12351. *
  12352. * SC 0-7 -> VL 0-7 (respectively)
  12353. * SC 15 -> VL 15
  12354. * otherwise
  12355. * -> VL 0
  12356. */
  12357. static void init_sc2vl_tables(struct hfi1_devdata *dd)
  12358. {
  12359. int i;
  12360. /* init per architecture spec, constrained by hardware capability */
  12361. /* HFI maps sent packets */
  12362. write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
  12363. 0,
  12364. 0, 0, 1, 1,
  12365. 2, 2, 3, 3,
  12366. 4, 4, 5, 5,
  12367. 6, 6, 7, 7));
  12368. write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
  12369. 1,
  12370. 8, 0, 9, 0,
  12371. 10, 0, 11, 0,
  12372. 12, 0, 13, 0,
  12373. 14, 0, 15, 15));
  12374. write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
  12375. 2,
  12376. 16, 0, 17, 0,
  12377. 18, 0, 19, 0,
  12378. 20, 0, 21, 0,
  12379. 22, 0, 23, 0));
  12380. write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
  12381. 3,
  12382. 24, 0, 25, 0,
  12383. 26, 0, 27, 0,
  12384. 28, 0, 29, 0,
  12385. 30, 0, 31, 0));
  12386. /* DC maps received packets */
  12387. write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
  12388. 15_0,
  12389. 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
  12390. 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
  12391. write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
  12392. 31_16,
  12393. 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
  12394. 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
  12395. /* initialize the cached sc2vl values consistently with h/w */
  12396. for (i = 0; i < 32; i++) {
  12397. if (i < 8 || i == 15)
  12398. *((u8 *)(dd->sc2vl) + i) = (u8)i;
  12399. else
  12400. *((u8 *)(dd->sc2vl) + i) = 0;
  12401. }
  12402. }
  12403. /*
  12404. * Read chip sizes and then reset parts to sane, disabled, values. We cannot
  12405. * depend on the chip going through a power-on reset - a driver may be loaded
  12406. * and unloaded many times.
  12407. *
  12408. * Do not write any CSR values to the chip in this routine - there may be
  12409. * a reset following the (possible) FLR in this routine.
  12410. *
  12411. */
  12412. static int init_chip(struct hfi1_devdata *dd)
  12413. {
  12414. int i;
  12415. int ret = 0;
  12416. /*
  12417. * Put the HFI CSRs in a known state.
  12418. * Combine this with a DC reset.
  12419. *
  12420. * Stop the device from doing anything while we do a
  12421. * reset. We know there are no other active users of
  12422. * the device since we are now in charge. Turn off
  12423. * off all outbound and inbound traffic and make sure
  12424. * the device does not generate any interrupts.
  12425. */
  12426. /* disable send contexts and SDMA engines */
  12427. write_csr(dd, SEND_CTRL, 0);
  12428. for (i = 0; i < chip_send_contexts(dd); i++)
  12429. write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
  12430. for (i = 0; i < chip_sdma_engines(dd); i++)
  12431. write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
  12432. /* disable port (turn off RXE inbound traffic) and contexts */
  12433. write_csr(dd, RCV_CTRL, 0);
  12434. for (i = 0; i < chip_rcv_contexts(dd); i++)
  12435. write_csr(dd, RCV_CTXT_CTRL, 0);
  12436. /* mask all interrupt sources */
  12437. for (i = 0; i < CCE_NUM_INT_CSRS; i++)
  12438. write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
  12439. /*
  12440. * DC Reset: do a full DC reset before the register clear.
  12441. * A recommended length of time to hold is one CSR read,
  12442. * so reread the CceDcCtrl. Then, hold the DC in reset
  12443. * across the clear.
  12444. */
  12445. write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
  12446. (void)read_csr(dd, CCE_DC_CTRL);
  12447. if (use_flr) {
  12448. /*
  12449. * A FLR will reset the SPC core and part of the PCIe.
  12450. * The parts that need to be restored have already been
  12451. * saved.
  12452. */
  12453. dd_dev_info(dd, "Resetting CSRs with FLR\n");
  12454. /* do the FLR, the DC reset will remain */
  12455. pcie_flr(dd->pcidev);
  12456. /* restore command and BARs */
  12457. ret = restore_pci_variables(dd);
  12458. if (ret) {
  12459. dd_dev_err(dd, "%s: Could not restore PCI variables\n",
  12460. __func__);
  12461. return ret;
  12462. }
  12463. if (is_ax(dd)) {
  12464. dd_dev_info(dd, "Resetting CSRs with FLR\n");
  12465. pcie_flr(dd->pcidev);
  12466. ret = restore_pci_variables(dd);
  12467. if (ret) {
  12468. dd_dev_err(dd, "%s: Could not restore PCI variables\n",
  12469. __func__);
  12470. return ret;
  12471. }
  12472. }
  12473. } else {
  12474. dd_dev_info(dd, "Resetting CSRs with writes\n");
  12475. reset_cce_csrs(dd);
  12476. reset_txe_csrs(dd);
  12477. reset_rxe_csrs(dd);
  12478. reset_misc_csrs(dd);
  12479. }
  12480. /* clear the DC reset */
  12481. write_csr(dd, CCE_DC_CTRL, 0);
  12482. /* Set the LED off */
  12483. setextled(dd, 0);
  12484. /*
  12485. * Clear the QSFP reset.
  12486. * An FLR enforces a 0 on all out pins. The driver does not touch
  12487. * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
  12488. * anything plugged constantly in reset, if it pays attention
  12489. * to RESET_N.
  12490. * Prime examples of this are optical cables. Set all pins high.
  12491. * I2CCLK and I2CDAT will change per direction, and INT_N and
  12492. * MODPRS_N are input only and their value is ignored.
  12493. */
  12494. write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
  12495. write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
  12496. init_chip_resources(dd);
  12497. return ret;
  12498. }
  12499. static void init_early_variables(struct hfi1_devdata *dd)
  12500. {
  12501. int i;
  12502. /* assign link credit variables */
  12503. dd->vau = CM_VAU;
  12504. dd->link_credits = CM_GLOBAL_CREDITS;
  12505. if (is_ax(dd))
  12506. dd->link_credits--;
  12507. dd->vcu = cu_to_vcu(hfi1_cu);
  12508. /* enough room for 8 MAD packets plus header - 17K */
  12509. dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
  12510. if (dd->vl15_init > dd->link_credits)
  12511. dd->vl15_init = dd->link_credits;
  12512. write_uninitialized_csrs_and_memories(dd);
  12513. if (HFI1_CAP_IS_KSET(PKEY_CHECK))
  12514. for (i = 0; i < dd->num_pports; i++) {
  12515. struct hfi1_pportdata *ppd = &dd->pport[i];
  12516. set_partition_keys(ppd);
  12517. }
  12518. init_sc2vl_tables(dd);
  12519. }
  12520. static void init_kdeth_qp(struct hfi1_devdata *dd)
  12521. {
  12522. /* user changed the KDETH_QP */
  12523. if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
  12524. /* out of range or illegal value */
  12525. dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
  12526. kdeth_qp = 0;
  12527. }
  12528. if (kdeth_qp == 0) /* not set, or failed range check */
  12529. kdeth_qp = DEFAULT_KDETH_QP;
  12530. write_csr(dd, SEND_BTH_QP,
  12531. (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
  12532. SEND_BTH_QP_KDETH_QP_SHIFT);
  12533. write_csr(dd, RCV_BTH_QP,
  12534. (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
  12535. RCV_BTH_QP_KDETH_QP_SHIFT);
  12536. }
  12537. /**
  12538. * init_qpmap_table
  12539. * @dd - device data
  12540. * @first_ctxt - first context
  12541. * @last_ctxt - first context
  12542. *
  12543. * This return sets the qpn mapping table that
  12544. * is indexed by qpn[8:1].
  12545. *
  12546. * The routine will round robin the 256 settings
  12547. * from first_ctxt to last_ctxt.
  12548. *
  12549. * The first/last looks ahead to having specialized
  12550. * receive contexts for mgmt and bypass. Normal
  12551. * verbs traffic will assumed to be on a range
  12552. * of receive contexts.
  12553. */
  12554. static void init_qpmap_table(struct hfi1_devdata *dd,
  12555. u32 first_ctxt,
  12556. u32 last_ctxt)
  12557. {
  12558. u64 reg = 0;
  12559. u64 regno = RCV_QP_MAP_TABLE;
  12560. int i;
  12561. u64 ctxt = first_ctxt;
  12562. for (i = 0; i < 256; i++) {
  12563. reg |= ctxt << (8 * (i % 8));
  12564. ctxt++;
  12565. if (ctxt > last_ctxt)
  12566. ctxt = first_ctxt;
  12567. if (i % 8 == 7) {
  12568. write_csr(dd, regno, reg);
  12569. reg = 0;
  12570. regno += 8;
  12571. }
  12572. }
  12573. add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
  12574. | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
  12575. }
  12576. struct rsm_map_table {
  12577. u64 map[NUM_MAP_REGS];
  12578. unsigned int used;
  12579. };
  12580. struct rsm_rule_data {
  12581. u8 offset;
  12582. u8 pkt_type;
  12583. u32 field1_off;
  12584. u32 field2_off;
  12585. u32 index1_off;
  12586. u32 index1_width;
  12587. u32 index2_off;
  12588. u32 index2_width;
  12589. u32 mask1;
  12590. u32 value1;
  12591. u32 mask2;
  12592. u32 value2;
  12593. };
  12594. /*
  12595. * Return an initialized RMT map table for users to fill in. OK if it
  12596. * returns NULL, indicating no table.
  12597. */
  12598. static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
  12599. {
  12600. struct rsm_map_table *rmt;
  12601. u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
  12602. rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
  12603. if (rmt) {
  12604. memset(rmt->map, rxcontext, sizeof(rmt->map));
  12605. rmt->used = 0;
  12606. }
  12607. return rmt;
  12608. }
  12609. /*
  12610. * Write the final RMT map table to the chip and free the table. OK if
  12611. * table is NULL.
  12612. */
  12613. static void complete_rsm_map_table(struct hfi1_devdata *dd,
  12614. struct rsm_map_table *rmt)
  12615. {
  12616. int i;
  12617. if (rmt) {
  12618. /* write table to chip */
  12619. for (i = 0; i < NUM_MAP_REGS; i++)
  12620. write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
  12621. /* enable RSM */
  12622. add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
  12623. }
  12624. }
  12625. /*
  12626. * Add a receive side mapping rule.
  12627. */
  12628. static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
  12629. struct rsm_rule_data *rrd)
  12630. {
  12631. write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
  12632. (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
  12633. 1ull << rule_index | /* enable bit */
  12634. (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
  12635. write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
  12636. (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
  12637. (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
  12638. (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
  12639. (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
  12640. (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
  12641. (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
  12642. write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
  12643. (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
  12644. (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
  12645. (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
  12646. (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
  12647. }
  12648. /*
  12649. * Clear a receive side mapping rule.
  12650. */
  12651. static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
  12652. {
  12653. write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
  12654. write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
  12655. write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
  12656. }
  12657. /* return the number of RSM map table entries that will be used for QOS */
  12658. static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
  12659. unsigned int *np)
  12660. {
  12661. int i;
  12662. unsigned int m, n;
  12663. u8 max_by_vl = 0;
  12664. /* is QOS active at all? */
  12665. if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
  12666. num_vls == 1 ||
  12667. krcvqsset <= 1)
  12668. goto no_qos;
  12669. /* determine bits for qpn */
  12670. for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
  12671. if (krcvqs[i] > max_by_vl)
  12672. max_by_vl = krcvqs[i];
  12673. if (max_by_vl > 32)
  12674. goto no_qos;
  12675. m = ilog2(__roundup_pow_of_two(max_by_vl));
  12676. /* determine bits for vl */
  12677. n = ilog2(__roundup_pow_of_two(num_vls));
  12678. /* reject if too much is used */
  12679. if ((m + n) > 7)
  12680. goto no_qos;
  12681. if (mp)
  12682. *mp = m;
  12683. if (np)
  12684. *np = n;
  12685. return 1 << (m + n);
  12686. no_qos:
  12687. if (mp)
  12688. *mp = 0;
  12689. if (np)
  12690. *np = 0;
  12691. return 0;
  12692. }
  12693. /**
  12694. * init_qos - init RX qos
  12695. * @dd - device data
  12696. * @rmt - RSM map table
  12697. *
  12698. * This routine initializes Rule 0 and the RSM map table to implement
  12699. * quality of service (qos).
  12700. *
  12701. * If all of the limit tests succeed, qos is applied based on the array
  12702. * interpretation of krcvqs where entry 0 is VL0.
  12703. *
  12704. * The number of vl bits (n) and the number of qpn bits (m) are computed to
  12705. * feed both the RSM map table and the single rule.
  12706. */
  12707. static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
  12708. {
  12709. struct rsm_rule_data rrd;
  12710. unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
  12711. unsigned int rmt_entries;
  12712. u64 reg;
  12713. if (!rmt)
  12714. goto bail;
  12715. rmt_entries = qos_rmt_entries(dd, &m, &n);
  12716. if (rmt_entries == 0)
  12717. goto bail;
  12718. qpns_per_vl = 1 << m;
  12719. /* enough room in the map table? */
  12720. rmt_entries = 1 << (m + n);
  12721. if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
  12722. goto bail;
  12723. /* add qos entries to the the RSM map table */
  12724. for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
  12725. unsigned tctxt;
  12726. for (qpn = 0, tctxt = ctxt;
  12727. krcvqs[i] && qpn < qpns_per_vl; qpn++) {
  12728. unsigned idx, regoff, regidx;
  12729. /* generate the index the hardware will produce */
  12730. idx = rmt->used + ((qpn << n) ^ i);
  12731. regoff = (idx % 8) * 8;
  12732. regidx = idx / 8;
  12733. /* replace default with context number */
  12734. reg = rmt->map[regidx];
  12735. reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
  12736. << regoff);
  12737. reg |= (u64)(tctxt++) << regoff;
  12738. rmt->map[regidx] = reg;
  12739. if (tctxt == ctxt + krcvqs[i])
  12740. tctxt = ctxt;
  12741. }
  12742. ctxt += krcvqs[i];
  12743. }
  12744. rrd.offset = rmt->used;
  12745. rrd.pkt_type = 2;
  12746. rrd.field1_off = LRH_BTH_MATCH_OFFSET;
  12747. rrd.field2_off = LRH_SC_MATCH_OFFSET;
  12748. rrd.index1_off = LRH_SC_SELECT_OFFSET;
  12749. rrd.index1_width = n;
  12750. rrd.index2_off = QPN_SELECT_OFFSET;
  12751. rrd.index2_width = m + n;
  12752. rrd.mask1 = LRH_BTH_MASK;
  12753. rrd.value1 = LRH_BTH_VALUE;
  12754. rrd.mask2 = LRH_SC_MASK;
  12755. rrd.value2 = LRH_SC_VALUE;
  12756. /* add rule 0 */
  12757. add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
  12758. /* mark RSM map entries as used */
  12759. rmt->used += rmt_entries;
  12760. /* map everything else to the mcast/err/vl15 context */
  12761. init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
  12762. dd->qos_shift = n + 1;
  12763. return;
  12764. bail:
  12765. dd->qos_shift = 1;
  12766. init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
  12767. }
  12768. static void init_user_fecn_handling(struct hfi1_devdata *dd,
  12769. struct rsm_map_table *rmt)
  12770. {
  12771. struct rsm_rule_data rrd;
  12772. u64 reg;
  12773. int i, idx, regoff, regidx;
  12774. u8 offset;
  12775. u32 total_cnt;
  12776. /* there needs to be enough room in the map table */
  12777. total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
  12778. if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
  12779. dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
  12780. return;
  12781. }
  12782. /*
  12783. * RSM will extract the destination context as an index into the
  12784. * map table. The destination contexts are a sequential block
  12785. * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
  12786. * Map entries are accessed as offset + extracted value. Adjust
  12787. * the added offset so this sequence can be placed anywhere in
  12788. * the table - as long as the entries themselves do not wrap.
  12789. * There are only enough bits in offset for the table size, so
  12790. * start with that to allow for a "negative" offset.
  12791. */
  12792. offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
  12793. (int)dd->first_dyn_alloc_ctxt);
  12794. for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
  12795. i < dd->num_rcv_contexts; i++, idx++) {
  12796. /* replace with identity mapping */
  12797. regoff = (idx % 8) * 8;
  12798. regidx = idx / 8;
  12799. reg = rmt->map[regidx];
  12800. reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
  12801. reg |= (u64)i << regoff;
  12802. rmt->map[regidx] = reg;
  12803. }
  12804. /*
  12805. * For RSM intercept of Expected FECN packets:
  12806. * o packet type 0 - expected
  12807. * o match on F (bit 95), using select/match 1, and
  12808. * o match on SH (bit 133), using select/match 2.
  12809. *
  12810. * Use index 1 to extract the 8-bit receive context from DestQP
  12811. * (start at bit 64). Use that as the RSM map table index.
  12812. */
  12813. rrd.offset = offset;
  12814. rrd.pkt_type = 0;
  12815. rrd.field1_off = 95;
  12816. rrd.field2_off = 133;
  12817. rrd.index1_off = 64;
  12818. rrd.index1_width = 8;
  12819. rrd.index2_off = 0;
  12820. rrd.index2_width = 0;
  12821. rrd.mask1 = 1;
  12822. rrd.value1 = 1;
  12823. rrd.mask2 = 1;
  12824. rrd.value2 = 1;
  12825. /* add rule 1 */
  12826. add_rsm_rule(dd, RSM_INS_FECN, &rrd);
  12827. rmt->used += total_cnt;
  12828. }
  12829. /* Initialize RSM for VNIC */
  12830. void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
  12831. {
  12832. u8 i, j;
  12833. u8 ctx_id = 0;
  12834. u64 reg;
  12835. u32 regoff;
  12836. struct rsm_rule_data rrd;
  12837. if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
  12838. dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
  12839. dd->vnic.rmt_start);
  12840. return;
  12841. }
  12842. dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
  12843. dd->vnic.rmt_start,
  12844. dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
  12845. /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
  12846. regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
  12847. reg = read_csr(dd, regoff);
  12848. for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
  12849. /* Update map register with vnic context */
  12850. j = (dd->vnic.rmt_start + i) % 8;
  12851. reg &= ~(0xffllu << (j * 8));
  12852. reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
  12853. /* Wrap up vnic ctx index */
  12854. ctx_id %= dd->vnic.num_ctxt;
  12855. /* Write back map register */
  12856. if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
  12857. dev_dbg(&(dd)->pcidev->dev,
  12858. "Vnic rsm map reg[%d] =0x%llx\n",
  12859. regoff - RCV_RSM_MAP_TABLE, reg);
  12860. write_csr(dd, regoff, reg);
  12861. regoff += 8;
  12862. if (i < (NUM_VNIC_MAP_ENTRIES - 1))
  12863. reg = read_csr(dd, regoff);
  12864. }
  12865. }
  12866. /* Add rule for vnic */
  12867. rrd.offset = dd->vnic.rmt_start;
  12868. rrd.pkt_type = 4;
  12869. /* Match 16B packets */
  12870. rrd.field1_off = L2_TYPE_MATCH_OFFSET;
  12871. rrd.mask1 = L2_TYPE_MASK;
  12872. rrd.value1 = L2_16B_VALUE;
  12873. /* Match ETH L4 packets */
  12874. rrd.field2_off = L4_TYPE_MATCH_OFFSET;
  12875. rrd.mask2 = L4_16B_TYPE_MASK;
  12876. rrd.value2 = L4_16B_ETH_VALUE;
  12877. /* Calc context from veswid and entropy */
  12878. rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
  12879. rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
  12880. rrd.index2_off = L2_16B_ENTROPY_OFFSET;
  12881. rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
  12882. add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
  12883. /* Enable RSM if not already enabled */
  12884. add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
  12885. }
  12886. void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
  12887. {
  12888. clear_rsm_rule(dd, RSM_INS_VNIC);
  12889. /* Disable RSM if used only by vnic */
  12890. if (dd->vnic.rmt_start == 0)
  12891. clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
  12892. }
  12893. static int init_rxe(struct hfi1_devdata *dd)
  12894. {
  12895. struct rsm_map_table *rmt;
  12896. u64 val;
  12897. /* enable all receive errors */
  12898. write_csr(dd, RCV_ERR_MASK, ~0ull);
  12899. rmt = alloc_rsm_map_table(dd);
  12900. if (!rmt)
  12901. return -ENOMEM;
  12902. /* set up QOS, including the QPN map table */
  12903. init_qos(dd, rmt);
  12904. init_user_fecn_handling(dd, rmt);
  12905. complete_rsm_map_table(dd, rmt);
  12906. /* record number of used rsm map entries for vnic */
  12907. dd->vnic.rmt_start = rmt->used;
  12908. kfree(rmt);
  12909. /*
  12910. * make sure RcvCtrl.RcvWcb <= PCIe Device Control
  12911. * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
  12912. * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
  12913. * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
  12914. * Max_PayLoad_Size set to its minimum of 128.
  12915. *
  12916. * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
  12917. * (64 bytes). Max_Payload_Size is possibly modified upward in
  12918. * tune_pcie_caps() which is called after this routine.
  12919. */
  12920. /* Have 16 bytes (4DW) of bypass header available in header queue */
  12921. val = read_csr(dd, RCV_BYPASS);
  12922. val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
  12923. val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
  12924. RCV_BYPASS_HDR_SIZE_SHIFT);
  12925. write_csr(dd, RCV_BYPASS, val);
  12926. return 0;
  12927. }
  12928. static void init_other(struct hfi1_devdata *dd)
  12929. {
  12930. /* enable all CCE errors */
  12931. write_csr(dd, CCE_ERR_MASK, ~0ull);
  12932. /* enable *some* Misc errors */
  12933. write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
  12934. /* enable all DC errors, except LCB */
  12935. write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
  12936. write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
  12937. }
  12938. /*
  12939. * Fill out the given AU table using the given CU. A CU is defined in terms
  12940. * AUs. The table is a an encoding: given the index, how many AUs does that
  12941. * represent?
  12942. *
  12943. * NOTE: Assumes that the register layout is the same for the
  12944. * local and remote tables.
  12945. */
  12946. static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
  12947. u32 csr0to3, u32 csr4to7)
  12948. {
  12949. write_csr(dd, csr0to3,
  12950. 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
  12951. 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
  12952. 2ull * cu <<
  12953. SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
  12954. 4ull * cu <<
  12955. SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
  12956. write_csr(dd, csr4to7,
  12957. 8ull * cu <<
  12958. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
  12959. 16ull * cu <<
  12960. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
  12961. 32ull * cu <<
  12962. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
  12963. 64ull * cu <<
  12964. SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
  12965. }
  12966. static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
  12967. {
  12968. assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
  12969. SEND_CM_LOCAL_AU_TABLE4_TO7);
  12970. }
  12971. void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
  12972. {
  12973. assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
  12974. SEND_CM_REMOTE_AU_TABLE4_TO7);
  12975. }
  12976. static void init_txe(struct hfi1_devdata *dd)
  12977. {
  12978. int i;
  12979. /* enable all PIO, SDMA, general, and Egress errors */
  12980. write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
  12981. write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
  12982. write_csr(dd, SEND_ERR_MASK, ~0ull);
  12983. write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
  12984. /* enable all per-context and per-SDMA engine errors */
  12985. for (i = 0; i < chip_send_contexts(dd); i++)
  12986. write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
  12987. for (i = 0; i < chip_sdma_engines(dd); i++)
  12988. write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
  12989. /* set the local CU to AU mapping */
  12990. assign_local_cm_au_table(dd, dd->vcu);
  12991. /*
  12992. * Set reasonable default for Credit Return Timer
  12993. * Don't set on Simulator - causes it to choke.
  12994. */
  12995. if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
  12996. write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
  12997. }
  12998. int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
  12999. u16 jkey)
  13000. {
  13001. u8 hw_ctxt;
  13002. u64 reg;
  13003. if (!rcd || !rcd->sc)
  13004. return -EINVAL;
  13005. hw_ctxt = rcd->sc->hw_context;
  13006. reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
  13007. ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
  13008. SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
  13009. /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
  13010. if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
  13011. reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
  13012. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
  13013. /*
  13014. * Enable send-side J_KEY integrity check, unless this is A0 h/w
  13015. */
  13016. if (!is_ax(dd)) {
  13017. reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
  13018. reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
  13019. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
  13020. }
  13021. /* Enable J_KEY check on receive context. */
  13022. reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
  13023. ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
  13024. RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
  13025. write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
  13026. return 0;
  13027. }
  13028. int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
  13029. {
  13030. u8 hw_ctxt;
  13031. u64 reg;
  13032. if (!rcd || !rcd->sc)
  13033. return -EINVAL;
  13034. hw_ctxt = rcd->sc->hw_context;
  13035. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
  13036. /*
  13037. * Disable send-side J_KEY integrity check, unless this is A0 h/w.
  13038. * This check would not have been enabled for A0 h/w, see
  13039. * set_ctxt_jkey().
  13040. */
  13041. if (!is_ax(dd)) {
  13042. reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
  13043. reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
  13044. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
  13045. }
  13046. /* Turn off the J_KEY on the receive side */
  13047. write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
  13048. return 0;
  13049. }
  13050. int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
  13051. u16 pkey)
  13052. {
  13053. u8 hw_ctxt;
  13054. u64 reg;
  13055. if (!rcd || !rcd->sc)
  13056. return -EINVAL;
  13057. hw_ctxt = rcd->sc->hw_context;
  13058. reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
  13059. SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
  13060. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
  13061. reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
  13062. reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
  13063. reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
  13064. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
  13065. return 0;
  13066. }
  13067. int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
  13068. {
  13069. u8 hw_ctxt;
  13070. u64 reg;
  13071. if (!ctxt || !ctxt->sc)
  13072. return -EINVAL;
  13073. hw_ctxt = ctxt->sc->hw_context;
  13074. reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
  13075. reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
  13076. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
  13077. write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
  13078. return 0;
  13079. }
  13080. /*
  13081. * Start doing the clean up the the chip. Our clean up happens in multiple
  13082. * stages and this is just the first.
  13083. */
  13084. void hfi1_start_cleanup(struct hfi1_devdata *dd)
  13085. {
  13086. aspm_exit(dd);
  13087. free_cntrs(dd);
  13088. free_rcverr(dd);
  13089. finish_chip_resources(dd);
  13090. }
  13091. #define HFI_BASE_GUID(dev) \
  13092. ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
  13093. /*
  13094. * Information can be shared between the two HFIs on the same ASIC
  13095. * in the same OS. This function finds the peer device and sets
  13096. * up a shared structure.
  13097. */
  13098. static int init_asic_data(struct hfi1_devdata *dd)
  13099. {
  13100. unsigned long flags;
  13101. struct hfi1_devdata *tmp, *peer = NULL;
  13102. struct hfi1_asic_data *asic_data;
  13103. int ret = 0;
  13104. /* pre-allocate the asic structure in case we are the first device */
  13105. asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
  13106. if (!asic_data)
  13107. return -ENOMEM;
  13108. spin_lock_irqsave(&hfi1_devs_lock, flags);
  13109. /* Find our peer device */
  13110. list_for_each_entry(tmp, &hfi1_dev_list, list) {
  13111. if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
  13112. dd->unit != tmp->unit) {
  13113. peer = tmp;
  13114. break;
  13115. }
  13116. }
  13117. if (peer) {
  13118. /* use already allocated structure */
  13119. dd->asic_data = peer->asic_data;
  13120. kfree(asic_data);
  13121. } else {
  13122. dd->asic_data = asic_data;
  13123. mutex_init(&dd->asic_data->asic_resource_mutex);
  13124. }
  13125. dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
  13126. spin_unlock_irqrestore(&hfi1_devs_lock, flags);
  13127. /* first one through - set up i2c devices */
  13128. if (!peer)
  13129. ret = set_up_i2c(dd, dd->asic_data);
  13130. return ret;
  13131. }
  13132. /*
  13133. * Set dd->boardname. Use a generic name if a name is not returned from
  13134. * EFI variable space.
  13135. *
  13136. * Return 0 on success, -ENOMEM if space could not be allocated.
  13137. */
  13138. static int obtain_boardname(struct hfi1_devdata *dd)
  13139. {
  13140. /* generic board description */
  13141. const char generic[] =
  13142. "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
  13143. unsigned long size;
  13144. int ret;
  13145. ret = read_hfi1_efi_var(dd, "description", &size,
  13146. (void **)&dd->boardname);
  13147. if (ret) {
  13148. dd_dev_info(dd, "Board description not found\n");
  13149. /* use generic description */
  13150. dd->boardname = kstrdup(generic, GFP_KERNEL);
  13151. if (!dd->boardname)
  13152. return -ENOMEM;
  13153. }
  13154. return 0;
  13155. }
  13156. /*
  13157. * Check the interrupt registers to make sure that they are mapped correctly.
  13158. * It is intended to help user identify any mismapping by VMM when the driver
  13159. * is running in a VM. This function should only be called before interrupt
  13160. * is set up properly.
  13161. *
  13162. * Return 0 on success, -EINVAL on failure.
  13163. */
  13164. static int check_int_registers(struct hfi1_devdata *dd)
  13165. {
  13166. u64 reg;
  13167. u64 all_bits = ~(u64)0;
  13168. u64 mask;
  13169. /* Clear CceIntMask[0] to avoid raising any interrupts */
  13170. mask = read_csr(dd, CCE_INT_MASK);
  13171. write_csr(dd, CCE_INT_MASK, 0ull);
  13172. reg = read_csr(dd, CCE_INT_MASK);
  13173. if (reg)
  13174. goto err_exit;
  13175. /* Clear all interrupt status bits */
  13176. write_csr(dd, CCE_INT_CLEAR, all_bits);
  13177. reg = read_csr(dd, CCE_INT_STATUS);
  13178. if (reg)
  13179. goto err_exit;
  13180. /* Set all interrupt status bits */
  13181. write_csr(dd, CCE_INT_FORCE, all_bits);
  13182. reg = read_csr(dd, CCE_INT_STATUS);
  13183. if (reg != all_bits)
  13184. goto err_exit;
  13185. /* Restore the interrupt mask */
  13186. write_csr(dd, CCE_INT_CLEAR, all_bits);
  13187. write_csr(dd, CCE_INT_MASK, mask);
  13188. return 0;
  13189. err_exit:
  13190. write_csr(dd, CCE_INT_MASK, mask);
  13191. dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
  13192. return -EINVAL;
  13193. }
  13194. /**
  13195. * Allocate and initialize the device structure for the hfi.
  13196. * @dev: the pci_dev for hfi1_ib device
  13197. * @ent: pci_device_id struct for this dev
  13198. *
  13199. * Also allocates, initializes, and returns the devdata struct for this
  13200. * device instance
  13201. *
  13202. * This is global, and is called directly at init to set up the
  13203. * chip-specific function pointers for later use.
  13204. */
  13205. struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
  13206. const struct pci_device_id *ent)
  13207. {
  13208. struct hfi1_devdata *dd;
  13209. struct hfi1_pportdata *ppd;
  13210. u64 reg;
  13211. int i, ret;
  13212. static const char * const inames[] = { /* implementation names */
  13213. "RTL silicon",
  13214. "RTL VCS simulation",
  13215. "RTL FPGA emulation",
  13216. "Functional simulator"
  13217. };
  13218. struct pci_dev *parent = pdev->bus->self;
  13219. u32 sdma_engines;
  13220. dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
  13221. sizeof(struct hfi1_pportdata));
  13222. if (IS_ERR(dd))
  13223. goto bail;
  13224. sdma_engines = chip_sdma_engines(dd);
  13225. ppd = dd->pport;
  13226. for (i = 0; i < dd->num_pports; i++, ppd++) {
  13227. int vl;
  13228. /* init common fields */
  13229. hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
  13230. /* DC supports 4 link widths */
  13231. ppd->link_width_supported =
  13232. OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
  13233. OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
  13234. ppd->link_width_downgrade_supported =
  13235. ppd->link_width_supported;
  13236. /* start out enabling only 4X */
  13237. ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
  13238. ppd->link_width_downgrade_enabled =
  13239. ppd->link_width_downgrade_supported;
  13240. /* link width active is 0 when link is down */
  13241. /* link width downgrade active is 0 when link is down */
  13242. if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
  13243. num_vls > HFI1_MAX_VLS_SUPPORTED) {
  13244. dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
  13245. num_vls, HFI1_MAX_VLS_SUPPORTED);
  13246. num_vls = HFI1_MAX_VLS_SUPPORTED;
  13247. }
  13248. ppd->vls_supported = num_vls;
  13249. ppd->vls_operational = ppd->vls_supported;
  13250. /* Set the default MTU. */
  13251. for (vl = 0; vl < num_vls; vl++)
  13252. dd->vld[vl].mtu = hfi1_max_mtu;
  13253. dd->vld[15].mtu = MAX_MAD_PACKET;
  13254. /*
  13255. * Set the initial values to reasonable default, will be set
  13256. * for real when link is up.
  13257. */
  13258. ppd->overrun_threshold = 0x4;
  13259. ppd->phy_error_threshold = 0xf;
  13260. ppd->port_crc_mode_enabled = link_crc_mask;
  13261. /* initialize supported LTP CRC mode */
  13262. ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
  13263. /* initialize enabled LTP CRC mode */
  13264. ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
  13265. /* start in offline */
  13266. ppd->host_link_state = HLS_DN_OFFLINE;
  13267. init_vl_arb_caches(ppd);
  13268. }
  13269. /*
  13270. * Do remaining PCIe setup and save PCIe values in dd.
  13271. * Any error printing is already done by the init code.
  13272. * On return, we have the chip mapped.
  13273. */
  13274. ret = hfi1_pcie_ddinit(dd, pdev);
  13275. if (ret < 0)
  13276. goto bail_free;
  13277. /* Save PCI space registers to rewrite after device reset */
  13278. ret = save_pci_variables(dd);
  13279. if (ret < 0)
  13280. goto bail_cleanup;
  13281. dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
  13282. & CCE_REVISION_CHIP_REV_MAJOR_MASK;
  13283. dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
  13284. & CCE_REVISION_CHIP_REV_MINOR_MASK;
  13285. /*
  13286. * Check interrupt registers mapping if the driver has no access to
  13287. * the upstream component. In this case, it is likely that the driver
  13288. * is running in a VM.
  13289. */
  13290. if (!parent) {
  13291. ret = check_int_registers(dd);
  13292. if (ret)
  13293. goto bail_cleanup;
  13294. }
  13295. /*
  13296. * obtain the hardware ID - NOT related to unit, which is a
  13297. * software enumeration
  13298. */
  13299. reg = read_csr(dd, CCE_REVISION2);
  13300. dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
  13301. & CCE_REVISION2_HFI_ID_MASK;
  13302. /* the variable size will remove unwanted bits */
  13303. dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
  13304. dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
  13305. dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
  13306. dd->icode < ARRAY_SIZE(inames) ?
  13307. inames[dd->icode] : "unknown", (int)dd->irev);
  13308. /* speeds the hardware can support */
  13309. dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
  13310. /* speeds allowed to run at */
  13311. dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
  13312. /* give a reasonable active value, will be set on link up */
  13313. dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
  13314. /* fix up link widths for emulation _p */
  13315. ppd = dd->pport;
  13316. if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
  13317. ppd->link_width_supported =
  13318. ppd->link_width_enabled =
  13319. ppd->link_width_downgrade_supported =
  13320. ppd->link_width_downgrade_enabled =
  13321. OPA_LINK_WIDTH_1X;
  13322. }
  13323. /* insure num_vls isn't larger than number of sdma engines */
  13324. if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
  13325. dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
  13326. num_vls, sdma_engines);
  13327. num_vls = sdma_engines;
  13328. ppd->vls_supported = sdma_engines;
  13329. ppd->vls_operational = ppd->vls_supported;
  13330. }
  13331. /*
  13332. * Convert the ns parameter to the 64 * cclocks used in the CSR.
  13333. * Limit the max if larger than the field holds. If timeout is
  13334. * non-zero, then the calculated field will be at least 1.
  13335. *
  13336. * Must be after icode is set up - the cclock rate depends
  13337. * on knowing the hardware being used.
  13338. */
  13339. dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
  13340. if (dd->rcv_intr_timeout_csr >
  13341. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
  13342. dd->rcv_intr_timeout_csr =
  13343. RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
  13344. else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
  13345. dd->rcv_intr_timeout_csr = 1;
  13346. /* needs to be done before we look for the peer device */
  13347. read_guid(dd);
  13348. /* set up shared ASIC data with peer device */
  13349. ret = init_asic_data(dd);
  13350. if (ret)
  13351. goto bail_cleanup;
  13352. /* obtain chip sizes, reset chip CSRs */
  13353. ret = init_chip(dd);
  13354. if (ret)
  13355. goto bail_cleanup;
  13356. /* read in the PCIe link speed information */
  13357. ret = pcie_speeds(dd);
  13358. if (ret)
  13359. goto bail_cleanup;
  13360. /* call before get_platform_config(), after init_chip_resources() */
  13361. ret = eprom_init(dd);
  13362. if (ret)
  13363. goto bail_free_rcverr;
  13364. /* Needs to be called before hfi1_firmware_init */
  13365. get_platform_config(dd);
  13366. /* read in firmware */
  13367. ret = hfi1_firmware_init(dd);
  13368. if (ret)
  13369. goto bail_cleanup;
  13370. /*
  13371. * In general, the PCIe Gen3 transition must occur after the
  13372. * chip has been idled (so it won't initiate any PCIe transactions
  13373. * e.g. an interrupt) and before the driver changes any registers
  13374. * (the transition will reset the registers).
  13375. *
  13376. * In particular, place this call after:
  13377. * - init_chip() - the chip will not initiate any PCIe transactions
  13378. * - pcie_speeds() - reads the current link speed
  13379. * - hfi1_firmware_init() - the needed firmware is ready to be
  13380. * downloaded
  13381. */
  13382. ret = do_pcie_gen3_transition(dd);
  13383. if (ret)
  13384. goto bail_cleanup;
  13385. /* start setting dd values and adjusting CSRs */
  13386. init_early_variables(dd);
  13387. parse_platform_config(dd);
  13388. ret = obtain_boardname(dd);
  13389. if (ret)
  13390. goto bail_cleanup;
  13391. snprintf(dd->boardversion, BOARD_VERS_MAX,
  13392. "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
  13393. HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
  13394. (u32)dd->majrev,
  13395. (u32)dd->minrev,
  13396. (dd->revision >> CCE_REVISION_SW_SHIFT)
  13397. & CCE_REVISION_SW_MASK);
  13398. ret = set_up_context_variables(dd);
  13399. if (ret)
  13400. goto bail_cleanup;
  13401. /* set initial RXE CSRs */
  13402. ret = init_rxe(dd);
  13403. if (ret)
  13404. goto bail_cleanup;
  13405. /* set initial TXE CSRs */
  13406. init_txe(dd);
  13407. /* set initial non-RXE, non-TXE CSRs */
  13408. init_other(dd);
  13409. /* set up KDETH QP prefix in both RX and TX CSRs */
  13410. init_kdeth_qp(dd);
  13411. ret = hfi1_dev_affinity_init(dd);
  13412. if (ret)
  13413. goto bail_cleanup;
  13414. /* send contexts must be set up before receive contexts */
  13415. ret = init_send_contexts(dd);
  13416. if (ret)
  13417. goto bail_cleanup;
  13418. ret = hfi1_create_kctxts(dd);
  13419. if (ret)
  13420. goto bail_cleanup;
  13421. /*
  13422. * Initialize aspm, to be done after gen3 transition and setting up
  13423. * contexts and before enabling interrupts
  13424. */
  13425. aspm_init(dd);
  13426. ret = init_pervl_scs(dd);
  13427. if (ret)
  13428. goto bail_cleanup;
  13429. /* sdma init */
  13430. for (i = 0; i < dd->num_pports; ++i) {
  13431. ret = sdma_init(dd, i);
  13432. if (ret)
  13433. goto bail_cleanup;
  13434. }
  13435. /* use contexts created by hfi1_create_kctxts */
  13436. ret = set_up_interrupts(dd);
  13437. if (ret)
  13438. goto bail_cleanup;
  13439. ret = hfi1_comp_vectors_set_up(dd);
  13440. if (ret)
  13441. goto bail_clear_intr;
  13442. /* set up LCB access - must be after set_up_interrupts() */
  13443. init_lcb_access(dd);
  13444. /*
  13445. * Serial number is created from the base guid:
  13446. * [27:24] = base guid [38:35]
  13447. * [23: 0] = base guid [23: 0]
  13448. */
  13449. snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
  13450. (dd->base_guid & 0xFFFFFF) |
  13451. ((dd->base_guid >> 11) & 0xF000000));
  13452. dd->oui1 = dd->base_guid >> 56 & 0xFF;
  13453. dd->oui2 = dd->base_guid >> 48 & 0xFF;
  13454. dd->oui3 = dd->base_guid >> 40 & 0xFF;
  13455. ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
  13456. if (ret)
  13457. goto bail_clear_intr;
  13458. thermal_init(dd);
  13459. ret = init_cntrs(dd);
  13460. if (ret)
  13461. goto bail_clear_intr;
  13462. ret = init_rcverr(dd);
  13463. if (ret)
  13464. goto bail_free_cntrs;
  13465. init_completion(&dd->user_comp);
  13466. /* The user refcount starts with one to inidicate an active device */
  13467. atomic_set(&dd->user_refcount, 1);
  13468. goto bail;
  13469. bail_free_rcverr:
  13470. free_rcverr(dd);
  13471. bail_free_cntrs:
  13472. free_cntrs(dd);
  13473. bail_clear_intr:
  13474. hfi1_comp_vectors_clean_up(dd);
  13475. hfi1_clean_up_interrupts(dd);
  13476. bail_cleanup:
  13477. hfi1_pcie_ddcleanup(dd);
  13478. bail_free:
  13479. hfi1_free_devdata(dd);
  13480. dd = ERR_PTR(ret);
  13481. bail:
  13482. return dd;
  13483. }
  13484. static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
  13485. u32 dw_len)
  13486. {
  13487. u32 delta_cycles;
  13488. u32 current_egress_rate = ppd->current_egress_rate;
  13489. /* rates here are in units of 10^6 bits/sec */
  13490. if (desired_egress_rate == -1)
  13491. return 0; /* shouldn't happen */
  13492. if (desired_egress_rate >= current_egress_rate)
  13493. return 0; /* we can't help go faster, only slower */
  13494. delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
  13495. egress_cycles(dw_len * 4, current_egress_rate);
  13496. return (u16)delta_cycles;
  13497. }
  13498. /**
  13499. * create_pbc - build a pbc for transmission
  13500. * @flags: special case flags or-ed in built pbc
  13501. * @srate: static rate
  13502. * @vl: vl
  13503. * @dwlen: dword length (header words + data words + pbc words)
  13504. *
  13505. * Create a PBC with the given flags, rate, VL, and length.
  13506. *
  13507. * NOTE: The PBC created will not insert any HCRC - all callers but one are
  13508. * for verbs, which does not use this PSM feature. The lone other caller
  13509. * is for the diagnostic interface which calls this if the user does not
  13510. * supply their own PBC.
  13511. */
  13512. u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
  13513. u32 dw_len)
  13514. {
  13515. u64 pbc, delay = 0;
  13516. if (unlikely(srate_mbs))
  13517. delay = delay_cycles(ppd, srate_mbs, dw_len);
  13518. pbc = flags
  13519. | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
  13520. | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
  13521. | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
  13522. | (dw_len & PBC_LENGTH_DWS_MASK)
  13523. << PBC_LENGTH_DWS_SHIFT;
  13524. return pbc;
  13525. }
  13526. #define SBUS_THERMAL 0x4f
  13527. #define SBUS_THERM_MONITOR_MODE 0x1
  13528. #define THERM_FAILURE(dev, ret, reason) \
  13529. dd_dev_err((dd), \
  13530. "Thermal sensor initialization failed: %s (%d)\n", \
  13531. (reason), (ret))
  13532. /*
  13533. * Initialize the thermal sensor.
  13534. *
  13535. * After initialization, enable polling of thermal sensor through
  13536. * SBus interface. In order for this to work, the SBus Master
  13537. * firmware has to be loaded due to the fact that the HW polling
  13538. * logic uses SBus interrupts, which are not supported with
  13539. * default firmware. Otherwise, no data will be returned through
  13540. * the ASIC_STS_THERM CSR.
  13541. */
  13542. static int thermal_init(struct hfi1_devdata *dd)
  13543. {
  13544. int ret = 0;
  13545. if (dd->icode != ICODE_RTL_SILICON ||
  13546. check_chip_resource(dd, CR_THERM_INIT, NULL))
  13547. return ret;
  13548. ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
  13549. if (ret) {
  13550. THERM_FAILURE(dd, ret, "Acquire SBus");
  13551. return ret;
  13552. }
  13553. dd_dev_info(dd, "Initializing thermal sensor\n");
  13554. /* Disable polling of thermal readings */
  13555. write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
  13556. msleep(100);
  13557. /* Thermal Sensor Initialization */
  13558. /* Step 1: Reset the Thermal SBus Receiver */
  13559. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
  13560. RESET_SBUS_RECEIVER, 0);
  13561. if (ret) {
  13562. THERM_FAILURE(dd, ret, "Bus Reset");
  13563. goto done;
  13564. }
  13565. /* Step 2: Set Reset bit in Thermal block */
  13566. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
  13567. WRITE_SBUS_RECEIVER, 0x1);
  13568. if (ret) {
  13569. THERM_FAILURE(dd, ret, "Therm Block Reset");
  13570. goto done;
  13571. }
  13572. /* Step 3: Write clock divider value (100MHz -> 2MHz) */
  13573. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
  13574. WRITE_SBUS_RECEIVER, 0x32);
  13575. if (ret) {
  13576. THERM_FAILURE(dd, ret, "Write Clock Div");
  13577. goto done;
  13578. }
  13579. /* Step 4: Select temperature mode */
  13580. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
  13581. WRITE_SBUS_RECEIVER,
  13582. SBUS_THERM_MONITOR_MODE);
  13583. if (ret) {
  13584. THERM_FAILURE(dd, ret, "Write Mode Sel");
  13585. goto done;
  13586. }
  13587. /* Step 5: De-assert block reset and start conversion */
  13588. ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
  13589. WRITE_SBUS_RECEIVER, 0x2);
  13590. if (ret) {
  13591. THERM_FAILURE(dd, ret, "Write Reset Deassert");
  13592. goto done;
  13593. }
  13594. /* Step 5.1: Wait for first conversion (21.5ms per spec) */
  13595. msleep(22);
  13596. /* Enable polling of thermal readings */
  13597. write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
  13598. /* Set initialized flag */
  13599. ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
  13600. if (ret)
  13601. THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
  13602. done:
  13603. release_chip_resource(dd, CR_SBUS);
  13604. return ret;
  13605. }
  13606. static void handle_temp_err(struct hfi1_devdata *dd)
  13607. {
  13608. struct hfi1_pportdata *ppd = &dd->pport[0];
  13609. /*
  13610. * Thermal Critical Interrupt
  13611. * Put the device into forced freeze mode, take link down to
  13612. * offline, and put DC into reset.
  13613. */
  13614. dd_dev_emerg(dd,
  13615. "Critical temperature reached! Forcing device into freeze mode!\n");
  13616. dd->flags |= HFI1_FORCED_FREEZE;
  13617. start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
  13618. /*
  13619. * Shut DC down as much and as quickly as possible.
  13620. *
  13621. * Step 1: Take the link down to OFFLINE. This will cause the
  13622. * 8051 to put the Serdes in reset. However, we don't want to
  13623. * go through the entire link state machine since we want to
  13624. * shutdown ASAP. Furthermore, this is not a graceful shutdown
  13625. * but rather an attempt to save the chip.
  13626. * Code below is almost the same as quiet_serdes() but avoids
  13627. * all the extra work and the sleeps.
  13628. */
  13629. ppd->driver_link_ready = 0;
  13630. ppd->link_enabled = 0;
  13631. set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
  13632. PLS_OFFLINE);
  13633. /*
  13634. * Step 2: Shutdown LCB and 8051
  13635. * After shutdown, do not restore DC_CFG_RESET value.
  13636. */
  13637. dc_shutdown(dd);
  13638. }