pf.c 170 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718
  1. /* $OpenBSD: pf.c,v 1.935 2015/07/21 02:32:04 sashan Exp $ */
  2. /*
  3. * Copyright (c) 2001 Daniel Hartmeier
  4. * Copyright (c) 2002 - 2013 Henning Brauer <henning@openbsd.org>
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. *
  11. * - Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * - Redistributions in binary form must reproduce the above
  14. * copyright notice, this list of conditions and the following
  15. * disclaimer in the documentation and/or other materials provided
  16. * with the distribution.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  21. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  22. * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  23. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  24. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  25. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  26. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  27. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  28. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  29. * POSSIBILITY OF SUCH DAMAGE.
  30. *
  31. * Effort sponsored in part by the Defense Advanced Research Projects
  32. * Agency (DARPA) and Air Force Research Laboratory, Air Force
  33. * Materiel Command, USAF, under agreement number F30602-01-2-0537.
  34. *
  35. */
  36. #include "bpfilter.h"
  37. #include "pflog.h"
  38. #include "pfsync.h"
  39. #include "pflow.h"
  40. #include <sys/param.h>
  41. #include <sys/systm.h>
  42. #include <sys/mbuf.h>
  43. #include <sys/filio.h>
  44. #include <sys/socket.h>
  45. #include <sys/socketvar.h>
  46. #include <sys/kernel.h>
  47. #include <sys/time.h>
  48. #include <sys/pool.h>
  49. #include <sys/proc.h>
  50. #include <sys/rwlock.h>
  51. #include <sys/syslog.h>
  52. #include <crypto/sha2.h>
  53. #include <net/if.h>
  54. #include <net/if_var.h>
  55. #include <net/if_types.h>
  56. #include <net/route.h>
  57. #include <netinet/in.h>
  58. #include <netinet/ip.h>
  59. #include <netinet/ip_var.h>
  60. #include <netinet/tcp.h>
  61. #include <netinet/tcp_seq.h>
  62. #include <netinet/udp.h>
  63. #include <netinet/ip_icmp.h>
  64. #include <netinet/in_pcb.h>
  65. #include <netinet/tcp_timer.h>
  66. #include <netinet/tcp_var.h>
  67. #include <netinet/tcp_fsm.h>
  68. #include <netinet/udp_var.h>
  69. #include <netinet/icmp_var.h>
  70. #include <netinet/ip_divert.h>
  71. #include <net/pfvar.h>
  72. #if NPFLOG > 0
  73. #include <net/if_pflog.h>
  74. #endif /* NPFLOG > 0 */
  75. #if NPFLOW > 0
  76. #include <net/if_pflow.h>
  77. #endif /* NPFLOW > 0 */
  78. #if NPFSYNC > 0
  79. #include <net/if_pfsync.h>
  80. #endif /* NPFSYNC > 0 */
  81. #ifdef INET6
  82. #include <netinet6/in6_var.h>
  83. #include <netinet/ip6.h>
  84. #include <netinet6/ip6_var.h>
  85. #include <netinet/icmp6.h>
  86. #include <netinet6/nd6.h>
  87. #include <netinet6/ip6_divert.h>
  88. #endif /* INET6 */
  89. /*
  90. * Global variables
  91. */
  92. struct pf_state_tree pf_statetbl;
  93. struct pf_queuehead pf_queues[2];
  94. struct pf_queuehead *pf_queues_active;
  95. struct pf_queuehead *pf_queues_inactive;
  96. struct pf_status pf_status;
  97. SHA2_CTX pf_tcp_secret_ctx;
  98. u_char pf_tcp_secret[16];
  99. int pf_tcp_secret_init;
  100. int pf_tcp_iss_off;
  101. struct pf_anchor_stackframe {
  102. struct pf_ruleset *rs;
  103. struct pf_rule *r;
  104. struct pf_anchor_node *parent;
  105. struct pf_anchor *child;
  106. } pf_anchor_stack[64];
  107. /*
  108. * Cannot fold into pf_pdesc directly, unknown storage size outside pf.c.
  109. * Keep in sync with union pf_headers in pflog_bpfcopy() in if_pflog.c.
  110. */
  111. union pf_headers {
  112. struct tcphdr tcp;
  113. struct udphdr udp;
  114. struct icmp icmp;
  115. #ifdef INET6
  116. struct icmp6_hdr icmp6;
  117. struct mld_hdr mld;
  118. struct nd_neighbor_solicit nd_ns;
  119. #endif /* INET6 */
  120. };
  121. struct pool pf_src_tree_pl, pf_rule_pl, pf_queue_pl;
  122. struct pool pf_state_pl, pf_state_key_pl, pf_state_item_pl;
  123. struct pool pf_rule_item_pl, pf_sn_item_pl;
  124. void pf_init_threshold(struct pf_threshold *, u_int32_t,
  125. u_int32_t);
  126. void pf_add_threshold(struct pf_threshold *);
  127. int pf_check_threshold(struct pf_threshold *);
  128. void pf_change_ap(struct pf_pdesc *, struct pf_addr *,
  129. u_int16_t *, struct pf_addr *, u_int16_t,
  130. sa_family_t);
  131. int pf_modulate_sack(struct pf_pdesc *,
  132. struct pf_state_peer *);
  133. void pf_change_a6(struct pf_pdesc *, struct pf_addr *a,
  134. struct pf_addr *an);
  135. int pf_icmp_mapping(struct pf_pdesc *, u_int8_t, int *,
  136. u_int16_t *, u_int16_t *);
  137. void pf_change_icmp(struct pf_pdesc *, struct pf_addr *,
  138. u_int16_t *, struct pf_addr *, struct pf_addr *,
  139. u_int16_t, sa_family_t);
  140. int pf_change_icmp_af(struct mbuf *, int,
  141. struct pf_pdesc *, struct pf_pdesc *,
  142. struct pf_addr *, struct pf_addr *, sa_family_t,
  143. sa_family_t);
  144. int pf_translate_icmp_af(int, void *);
  145. void pf_send_tcp(const struct pf_rule *, sa_family_t,
  146. const struct pf_addr *, const struct pf_addr *,
  147. u_int16_t, u_int16_t, u_int32_t, u_int32_t,
  148. u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
  149. u_int16_t, u_int);
  150. void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
  151. sa_family_t, struct pf_rule *, u_int);
  152. void pf_detach_state(struct pf_state *);
  153. void pf_state_key_detach(struct pf_state *, int);
  154. u_int32_t pf_tcp_iss(struct pf_pdesc *);
  155. void pf_rule_to_actions(struct pf_rule *,
  156. struct pf_rule_actions *);
  157. int pf_test_rule(struct pf_pdesc *, struct pf_rule **,
  158. struct pf_state **, struct pf_rule **,
  159. struct pf_ruleset **);
  160. static __inline int pf_create_state(struct pf_pdesc *, struct pf_rule *,
  161. struct pf_rule *, struct pf_rule *,
  162. struct pf_state_key **, struct pf_state_key **,
  163. int *, struct pf_state **, int,
  164. struct pf_rule_slist *, struct pf_rule_actions *,
  165. struct pf_src_node *[]);
  166. static __inline int pf_state_key_addr_setup(struct pf_pdesc *, void *,
  167. int, struct pf_addr *, int, struct pf_addr *,
  168. int, int);
  169. int pf_state_key_setup(struct pf_pdesc *, struct
  170. pf_state_key **, struct pf_state_key **, int);
  171. int pf_tcp_track_full(struct pf_pdesc *,
  172. struct pf_state_peer *, struct pf_state_peer *,
  173. struct pf_state **, u_short *, int *);
  174. int pf_tcp_track_sloppy(struct pf_pdesc *,
  175. struct pf_state_peer *, struct pf_state_peer *,
  176. struct pf_state **, u_short *);
  177. static __inline int pf_synproxy(struct pf_pdesc *, struct pf_state **,
  178. u_short *);
  179. int pf_test_state(struct pf_pdesc *, struct pf_state **,
  180. u_short *);
  181. int pf_icmp_state_lookup(struct pf_pdesc *,
  182. struct pf_state_key_cmp *, struct pf_state **,
  183. u_int16_t, u_int16_t, int, int *, int, int);
  184. int pf_test_state_icmp(struct pf_pdesc *,
  185. struct pf_state **, u_short *);
  186. u_int8_t pf_get_wscale(struct pf_pdesc *);
  187. u_int16_t pf_get_mss(struct pf_pdesc *);
  188. u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, int,
  189. u_int16_t);
  190. static __inline int pf_set_rt_ifp(struct pf_state *, struct pf_addr *,
  191. sa_family_t);
  192. struct pf_divert *pf_get_divert(struct mbuf *);
  193. int pf_walk_option6(struct pf_pdesc *, struct ip6_hdr *,
  194. int, int, u_short *);
  195. int pf_walk_header6(struct pf_pdesc *, struct ip6_hdr *,
  196. u_short *);
  197. void pf_print_state_parts(struct pf_state *,
  198. struct pf_state_key *, struct pf_state_key *);
  199. int pf_addr_wrap_neq(struct pf_addr_wrap *,
  200. struct pf_addr_wrap *);
  201. int pf_compare_state_keys(struct pf_state_key *,
  202. struct pf_state_key *, struct pfi_kif *, u_int);
  203. struct pf_state *pf_find_state(struct pfi_kif *,
  204. struct pf_state_key_cmp *, u_int, struct mbuf *);
  205. int pf_src_connlimit(struct pf_state **);
  206. int pf_match_rcvif(struct mbuf *, struct pf_rule *);
  207. void pf_step_into_anchor(int *, struct pf_ruleset **,
  208. struct pf_rule **, struct pf_rule **);
  209. int pf_step_out_of_anchor(int *, struct pf_ruleset **,
  210. struct pf_rule **, struct pf_rule **,
  211. int *);
  212. void pf_counters_inc(int, struct pf_pdesc *,
  213. struct pf_state *, struct pf_rule *,
  214. struct pf_rule *);
  215. #if NPFLOG > 0
  216. void pf_log_matches(struct pf_pdesc *, struct pf_rule *,
  217. struct pf_rule *, struct pf_ruleset *,
  218. struct pf_rule_slist *);
  219. #endif /* NPFLOG > 0 */
  220. extern struct pool pfr_ktable_pl;
  221. extern struct pool pfr_kentry_pl;
  222. struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
  223. { &pf_state_pl, PFSTATE_HIWAT, PFSTATE_HIWAT },
  224. { &pf_src_tree_pl, PFSNODE_HIWAT, PFSNODE_HIWAT },
  225. { &pf_frent_pl, PFFRAG_FRENT_HIWAT, PFFRAG_FRENT_HIWAT },
  226. { &pfr_ktable_pl, PFR_KTABLE_HIWAT, PFR_KTABLE_HIWAT },
  227. { &pfr_kentry_pl, PFR_KENTRY_HIWAT, PFR_KENTRY_HIWAT }
  228. };
  229. #define STATE_LOOKUP(i, k, d, s, m) \
  230. do { \
  231. s = pf_find_state(i, k, d, m); \
  232. if (s == NULL || (s)->timeout == PFTM_PURGE) \
  233. return (PF_DROP); \
  234. if (d == PF_OUT && \
  235. (((s)->rule.ptr->rt == PF_ROUTETO && \
  236. (s)->rule.ptr->direction == PF_OUT) || \
  237. ((s)->rule.ptr->rt == PF_REPLYTO && \
  238. (s)->rule.ptr->direction == PF_IN)) && \
  239. (s)->rt_kif != NULL && \
  240. (s)->rt_kif != i) \
  241. return (PF_PASS); \
  242. } while (0)
  243. #define BOUND_IFACE(r, k) \
  244. ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
  245. #define STATE_INC_COUNTERS(s) \
  246. do { \
  247. struct pf_rule_item *mrm; \
  248. s->rule.ptr->states_cur++; \
  249. s->rule.ptr->states_tot++; \
  250. if (s->anchor.ptr != NULL) { \
  251. s->anchor.ptr->states_cur++; \
  252. s->anchor.ptr->states_tot++; \
  253. } \
  254. SLIST_FOREACH(mrm, &s->match_rules, entry) \
  255. mrm->r->states_cur++; \
  256. } while (0)
  257. static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
  258. static __inline int pf_state_compare_key(struct pf_state_key *,
  259. struct pf_state_key *);
  260. static __inline int pf_state_compare_id(struct pf_state *,
  261. struct pf_state *);
  262. struct pf_src_tree tree_src_tracking;
  263. struct pf_state_tree_id tree_id;
  264. struct pf_state_queue state_list;
  265. RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
  266. RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key);
  267. RB_GENERATE(pf_state_tree_id, pf_state,
  268. entry_id, pf_state_compare_id);
  269. __inline int
  270. pf_addr_compare(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
  271. {
  272. switch (af) {
  273. case AF_INET:
  274. if (a->addr32[0] > b->addr32[0])
  275. return (1);
  276. if (a->addr32[0] < b->addr32[0])
  277. return (-1);
  278. break;
  279. #ifdef INET6
  280. case AF_INET6:
  281. if (a->addr32[3] > b->addr32[3])
  282. return (1);
  283. if (a->addr32[3] < b->addr32[3])
  284. return (-1);
  285. if (a->addr32[2] > b->addr32[2])
  286. return (1);
  287. if (a->addr32[2] < b->addr32[2])
  288. return (-1);
  289. if (a->addr32[1] > b->addr32[1])
  290. return (1);
  291. if (a->addr32[1] < b->addr32[1])
  292. return (-1);
  293. if (a->addr32[0] > b->addr32[0])
  294. return (1);
  295. if (a->addr32[0] < b->addr32[0])
  296. return (-1);
  297. break;
  298. #endif /* INET6 */
  299. }
  300. return (0);
  301. }
  302. static __inline int
  303. pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
  304. {
  305. int diff;
  306. if (a->rule.ptr > b->rule.ptr)
  307. return (1);
  308. if (a->rule.ptr < b->rule.ptr)
  309. return (-1);
  310. if ((diff = a->type - b->type) != 0)
  311. return (diff);
  312. if ((diff = a->af - b->af) != 0)
  313. return (diff);
  314. if ((diff = pf_addr_compare(&a->addr, &b->addr, a->af)) != 0)
  315. return (diff);
  316. return (0);
  317. }
  318. void
  319. pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
  320. {
  321. switch (af) {
  322. case AF_INET:
  323. dst->addr32[0] = src->addr32[0];
  324. break;
  325. #ifdef INET6
  326. case AF_INET6:
  327. dst->addr32[0] = src->addr32[0];
  328. dst->addr32[1] = src->addr32[1];
  329. dst->addr32[2] = src->addr32[2];
  330. dst->addr32[3] = src->addr32[3];
  331. break;
  332. #endif /* INET6 */
  333. default:
  334. unhandled_af(af);
  335. }
  336. }
  337. void
  338. pf_init_threshold(struct pf_threshold *threshold,
  339. u_int32_t limit, u_int32_t seconds)
  340. {
  341. threshold->limit = limit * PF_THRESHOLD_MULT;
  342. threshold->seconds = seconds;
  343. threshold->count = 0;
  344. threshold->last = time_uptime;
  345. }
  346. void
  347. pf_add_threshold(struct pf_threshold *threshold)
  348. {
  349. u_int32_t t = time_uptime, diff = t - threshold->last;
  350. if (diff >= threshold->seconds)
  351. threshold->count = 0;
  352. else
  353. threshold->count -= threshold->count * diff /
  354. threshold->seconds;
  355. threshold->count += PF_THRESHOLD_MULT;
  356. threshold->last = t;
  357. }
  358. int
  359. pf_check_threshold(struct pf_threshold *threshold)
  360. {
  361. return (threshold->count > threshold->limit);
  362. }
  363. int
  364. pf_src_connlimit(struct pf_state **state)
  365. {
  366. int bad = 0;
  367. struct pf_src_node *sn;
  368. if ((sn = pf_get_src_node((*state), PF_SN_NONE)) == NULL)
  369. return (0);
  370. sn->conn++;
  371. (*state)->src.tcp_est = 1;
  372. pf_add_threshold(&sn->conn_rate);
  373. if ((*state)->rule.ptr->max_src_conn &&
  374. (*state)->rule.ptr->max_src_conn < sn->conn) {
  375. pf_status.lcounters[LCNT_SRCCONN]++;
  376. bad++;
  377. }
  378. if ((*state)->rule.ptr->max_src_conn_rate.limit &&
  379. pf_check_threshold(&sn->conn_rate)) {
  380. pf_status.lcounters[LCNT_SRCCONNRATE]++;
  381. bad++;
  382. }
  383. if (!bad)
  384. return (0);
  385. if ((*state)->rule.ptr->overload_tbl) {
  386. struct pfr_addr p;
  387. u_int32_t killed = 0;
  388. pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
  389. if (pf_status.debug >= LOG_NOTICE) {
  390. log(LOG_NOTICE,
  391. "pf: pf_src_connlimit: blocking address ");
  392. pf_print_host(&sn->addr, 0,
  393. (*state)->key[PF_SK_WIRE]->af);
  394. }
  395. bzero(&p, sizeof(p));
  396. p.pfra_af = (*state)->key[PF_SK_WIRE]->af;
  397. switch ((*state)->key[PF_SK_WIRE]->af) {
  398. case AF_INET:
  399. p.pfra_net = 32;
  400. p.pfra_ip4addr = sn->addr.v4;
  401. break;
  402. #ifdef INET6
  403. case AF_INET6:
  404. p.pfra_net = 128;
  405. p.pfra_ip6addr = sn->addr.v6;
  406. break;
  407. #endif /* INET6 */
  408. }
  409. pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
  410. &p, time_second);
  411. /* kill existing states if that's required. */
  412. if ((*state)->rule.ptr->flush) {
  413. struct pf_state_key *sk;
  414. struct pf_state *st;
  415. pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
  416. RB_FOREACH(st, pf_state_tree_id, &tree_id) {
  417. sk = st->key[PF_SK_WIRE];
  418. /*
  419. * Kill states from this source. (Only those
  420. * from the same rule if PF_FLUSH_GLOBAL is not
  421. * set)
  422. */
  423. if (sk->af ==
  424. (*state)->key[PF_SK_WIRE]->af &&
  425. (((*state)->direction == PF_OUT &&
  426. PF_AEQ(&sn->addr, &sk->addr[1], sk->af)) ||
  427. ((*state)->direction == PF_IN &&
  428. PF_AEQ(&sn->addr, &sk->addr[0], sk->af))) &&
  429. ((*state)->rule.ptr->flush &
  430. PF_FLUSH_GLOBAL ||
  431. (*state)->rule.ptr == st->rule.ptr)) {
  432. st->timeout = PFTM_PURGE;
  433. st->src.state = st->dst.state =
  434. TCPS_CLOSED;
  435. killed++;
  436. }
  437. }
  438. if (pf_status.debug >= LOG_NOTICE)
  439. addlog(", %u states killed", killed);
  440. }
  441. if (pf_status.debug >= LOG_NOTICE)
  442. addlog("\n");
  443. }
  444. /* kill this state */
  445. (*state)->timeout = PFTM_PURGE;
  446. (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
  447. return (1);
  448. }
  449. int
  450. pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
  451. enum pf_sn_types type, sa_family_t af, struct pf_addr *src,
  452. struct pf_addr *raddr, int global)
  453. {
  454. struct pf_src_node k;
  455. if (*sn == NULL) {
  456. k.af = af;
  457. k.type = type;
  458. PF_ACPY(&k.addr, src, af);
  459. if (global)
  460. k.rule.ptr = NULL;
  461. else
  462. k.rule.ptr = rule;
  463. pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
  464. *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
  465. }
  466. if (*sn == NULL) {
  467. if (!rule->max_src_nodes ||
  468. rule->src_nodes < rule->max_src_nodes)
  469. (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT | PR_ZERO);
  470. else
  471. pf_status.lcounters[LCNT_SRCNODES]++;
  472. if ((*sn) == NULL)
  473. return (-1);
  474. pf_init_threshold(&(*sn)->conn_rate,
  475. rule->max_src_conn_rate.limit,
  476. rule->max_src_conn_rate.seconds);
  477. (*sn)->type = type;
  478. (*sn)->af = af;
  479. if (global)
  480. (*sn)->rule.ptr = NULL;
  481. else
  482. (*sn)->rule.ptr = rule;
  483. PF_ACPY(&(*sn)->addr, src, af);
  484. if (raddr)
  485. PF_ACPY(&(*sn)->raddr, raddr, af);
  486. if (RB_INSERT(pf_src_tree,
  487. &tree_src_tracking, *sn) != NULL) {
  488. if (pf_status.debug >= LOG_NOTICE) {
  489. log(LOG_NOTICE,
  490. "pf: src_tree insert failed: ");
  491. pf_print_host(&(*sn)->addr, 0, af);
  492. addlog("\n");
  493. }
  494. pool_put(&pf_src_tree_pl, *sn);
  495. return (-1);
  496. }
  497. (*sn)->creation = time_uptime;
  498. if ((*sn)->rule.ptr != NULL)
  499. (*sn)->rule.ptr->src_nodes++;
  500. pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
  501. pf_status.src_nodes++;
  502. } else {
  503. if (rule->max_src_states &&
  504. (*sn)->states >= rule->max_src_states) {
  505. pf_status.lcounters[LCNT_SRCSTATES]++;
  506. return (-1);
  507. }
  508. }
  509. return (0);
  510. }
  511. void
  512. pf_remove_src_node(struct pf_src_node *sn)
  513. {
  514. if (sn->states > 0 || sn->expire > time_uptime)
  515. return;
  516. if (sn->rule.ptr != NULL) {
  517. sn->rule.ptr->src_nodes--;
  518. if (sn->rule.ptr->states_cur == 0 &&
  519. sn->rule.ptr->src_nodes == 0)
  520. pf_rm_rule(NULL, sn->rule.ptr);
  521. RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
  522. pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
  523. pf_status.src_nodes--;
  524. pool_put(&pf_src_tree_pl, sn);
  525. }
  526. }
  527. struct pf_src_node *
  528. pf_get_src_node(struct pf_state *s, enum pf_sn_types type)
  529. {
  530. struct pf_sn_item *sni;
  531. SLIST_FOREACH(sni, &s->src_nodes, next)
  532. if (sni->sn->type == type)
  533. return (sni->sn);
  534. return (NULL);
  535. }
  536. void
  537. pf_state_rm_src_node(struct pf_state *s, struct pf_src_node *sn)
  538. {
  539. struct pf_sn_item *sni, *snin, *snip = NULL;
  540. for (sni = SLIST_FIRST(&s->src_nodes); sni; sni = snin) {
  541. snin = SLIST_NEXT(sni, next);
  542. if (sni->sn == sn) {
  543. if (snip)
  544. SLIST_REMOVE_AFTER(snip, next);
  545. else
  546. SLIST_REMOVE_HEAD(&s->src_nodes, next);
  547. pool_put(&pf_sn_item_pl, sni);
  548. sni = NULL;
  549. sn->states--;
  550. }
  551. if (sni != NULL)
  552. snip = sni;
  553. }
  554. }
  555. /* state table stuff */
  556. static __inline int
  557. pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b)
  558. {
  559. int diff;
  560. if ((diff = a->proto - b->proto) != 0)
  561. return (diff);
  562. if ((diff = a->af - b->af) != 0)
  563. return (diff);
  564. if ((diff = pf_addr_compare(&a->addr[0], &b->addr[0], a->af)) != 0)
  565. return (diff);
  566. if ((diff = pf_addr_compare(&a->addr[1], &b->addr[1], a->af)) != 0)
  567. return (diff);
  568. if ((diff = a->port[0] - b->port[0]) != 0)
  569. return (diff);
  570. if ((diff = a->port[1] - b->port[1]) != 0)
  571. return (diff);
  572. if ((diff = a->rdomain - b->rdomain) != 0)
  573. return (diff);
  574. return (0);
  575. }
  576. static __inline int
  577. pf_state_compare_id(struct pf_state *a, struct pf_state *b)
  578. {
  579. if (a->id > b->id)
  580. return (1);
  581. if (a->id < b->id)
  582. return (-1);
  583. if (a->creatorid > b->creatorid)
  584. return (1);
  585. if (a->creatorid < b->creatorid)
  586. return (-1);
  587. return (0);
  588. }
  589. int
  590. pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx)
  591. {
  592. struct pf_state_item *si;
  593. struct pf_state_key *cur;
  594. struct pf_state *olds = NULL;
  595. KASSERT(s->key[idx] == NULL);
  596. if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) {
  597. /* key exists. check for same kif, if none, add to key */
  598. TAILQ_FOREACH(si, &cur->states, entry)
  599. if (si->s->kif == s->kif &&
  600. ((si->s->key[PF_SK_WIRE]->af == sk->af &&
  601. si->s->direction == s->direction) ||
  602. (si->s->key[PF_SK_WIRE]->af !=
  603. si->s->key[PF_SK_STACK]->af &&
  604. sk->af == si->s->key[PF_SK_STACK]->af &&
  605. si->s->direction != s->direction))) {
  606. if (sk->proto == IPPROTO_TCP &&
  607. si->s->src.state >= TCPS_FIN_WAIT_2 &&
  608. si->s->dst.state >= TCPS_FIN_WAIT_2) {
  609. si->s->src.state = si->s->dst.state =
  610. TCPS_CLOSED;
  611. /* unlink late or sks can go away */
  612. olds = si->s;
  613. } else {
  614. if (pf_status.debug >= LOG_NOTICE) {
  615. log(LOG_NOTICE,
  616. "pf: %s key attach "
  617. "failed on %s: ",
  618. (idx == PF_SK_WIRE) ?
  619. "wire" : "stack",
  620. s->kif->pfik_name);
  621. pf_print_state_parts(s,
  622. (idx == PF_SK_WIRE) ?
  623. sk : NULL,
  624. (idx == PF_SK_STACK) ?
  625. sk : NULL);
  626. addlog(", existing: ");
  627. pf_print_state_parts(si->s,
  628. (idx == PF_SK_WIRE) ?
  629. sk : NULL,
  630. (idx == PF_SK_STACK) ?
  631. sk : NULL);
  632. addlog("\n");
  633. }
  634. pool_put(&pf_state_key_pl, sk);
  635. return (-1); /* collision! */
  636. }
  637. }
  638. pool_put(&pf_state_key_pl, sk);
  639. s->key[idx] = cur;
  640. } else
  641. s->key[idx] = sk;
  642. if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT)) == NULL) {
  643. pf_state_key_detach(s, idx);
  644. return (-1);
  645. }
  646. si->s = s;
  647. /* list is sorted, if-bound states before floating */
  648. if (s->kif == pfi_all)
  649. TAILQ_INSERT_TAIL(&s->key[idx]->states, si, entry);
  650. else
  651. TAILQ_INSERT_HEAD(&s->key[idx]->states, si, entry);
  652. if (olds)
  653. pf_unlink_state(olds);
  654. return (0);
  655. }
  656. void
  657. pf_detach_state(struct pf_state *s)
  658. {
  659. if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK])
  660. s->key[PF_SK_WIRE] = NULL;
  661. if (s->key[PF_SK_STACK] != NULL)
  662. pf_state_key_detach(s, PF_SK_STACK);
  663. if (s->key[PF_SK_WIRE] != NULL)
  664. pf_state_key_detach(s, PF_SK_WIRE);
  665. }
  666. void
  667. pf_state_key_detach(struct pf_state *s, int idx)
  668. {
  669. struct pf_state_item *si;
  670. if (s->key[idx] == NULL)
  671. return;
  672. si = TAILQ_FIRST(&s->key[idx]->states);
  673. while (si && si->s != s)
  674. si = TAILQ_NEXT(si, entry);
  675. if (si) {
  676. TAILQ_REMOVE(&s->key[idx]->states, si, entry);
  677. pool_put(&pf_state_item_pl, si);
  678. }
  679. if (TAILQ_EMPTY(&s->key[idx]->states)) {
  680. RB_REMOVE(pf_state_tree, &pf_statetbl, s->key[idx]);
  681. if (s->key[idx]->reverse)
  682. s->key[idx]->reverse->reverse = NULL;
  683. if (s->key[idx]->inp)
  684. s->key[idx]->inp->inp_pf_sk = NULL;
  685. pool_put(&pf_state_key_pl, s->key[idx]);
  686. }
  687. s->key[idx] = NULL;
  688. }
  689. struct pf_state_key *
  690. pf_alloc_state_key(int pool_flags)
  691. {
  692. struct pf_state_key *sk;
  693. if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL)
  694. return (NULL);
  695. TAILQ_INIT(&sk->states);
  696. return (sk);
  697. }
  698. static __inline int
  699. pf_state_key_addr_setup(struct pf_pdesc *pd, void *arg, int sidx,
  700. struct pf_addr *saddr, int didx, struct pf_addr *daddr, int af, int multi)
  701. {
  702. struct pf_state_key_cmp *key = arg;
  703. #ifdef INET6
  704. struct nd_neighbor_solicit *nd;
  705. struct pf_addr *target;
  706. if (af == AF_INET || pd->proto != IPPROTO_ICMPV6)
  707. goto copy;
  708. switch (pd->hdr.icmp6->icmp6_type) {
  709. case ND_NEIGHBOR_SOLICIT:
  710. if (multi)
  711. return (-1);
  712. nd = (void *)pd->hdr.icmp6;
  713. target = (struct pf_addr *)&nd->nd_ns_target;
  714. daddr = target;
  715. break;
  716. case ND_NEIGHBOR_ADVERT:
  717. if (multi)
  718. return (-1);
  719. nd = (void *)pd->hdr.icmp6;
  720. target = (struct pf_addr *)&nd->nd_ns_target;
  721. saddr = target;
  722. if (IN6_IS_ADDR_MULTICAST(&pd->dst->v6)) {
  723. key->addr[didx].addr32[0] = 0;
  724. key->addr[didx].addr32[1] = 0;
  725. key->addr[didx].addr32[2] = 0;
  726. key->addr[didx].addr32[3] = 0;
  727. daddr = NULL; /* overwritten */
  728. }
  729. break;
  730. default:
  731. if (multi) {
  732. key->addr[sidx].addr32[0] = __IPV6_ADDR_INT32_MLL;
  733. key->addr[sidx].addr32[1] = 0;
  734. key->addr[sidx].addr32[2] = 0;
  735. key->addr[sidx].addr32[3] = __IPV6_ADDR_INT32_ONE;
  736. saddr = NULL; /* overwritten */
  737. }
  738. }
  739. copy:
  740. #endif /* INET6 */
  741. if (saddr)
  742. PF_ACPY(&key->addr[sidx], saddr, af);
  743. if (daddr)
  744. PF_ACPY(&key->addr[didx], daddr, af);
  745. return (0);
  746. }
  747. int
  748. pf_state_key_setup(struct pf_pdesc *pd, struct pf_state_key **skw,
  749. struct pf_state_key **sks, int rtableid)
  750. {
  751. /* if returning error we MUST pool_put state keys ourselves */
  752. struct pf_state_key *sk1, *sk2;
  753. u_int wrdom = pd->rdomain;
  754. int afto = pd->af != pd->naf;
  755. if ((sk1 = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
  756. return (ENOMEM);
  757. pf_state_key_addr_setup(pd, sk1, pd->sidx, pd->src, pd->didx, pd->dst,
  758. pd->af, 0);
  759. sk1->port[pd->sidx] = pd->osport;
  760. sk1->port[pd->didx] = pd->odport;
  761. sk1->proto = pd->proto;
  762. sk1->af = pd->af;
  763. sk1->rdomain = pd->rdomain;
  764. if (rtableid >= 0)
  765. wrdom = rtable_l2(rtableid);
  766. if (PF_ANEQ(&pd->nsaddr, pd->src, pd->af) ||
  767. PF_ANEQ(&pd->ndaddr, pd->dst, pd->af) ||
  768. pd->nsport != pd->osport || pd->ndport != pd->odport ||
  769. wrdom != pd->rdomain || afto) { /* NAT/NAT64 */
  770. if ((sk2 = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL) {
  771. pool_put(&pf_state_key_pl, sk1);
  772. return (ENOMEM);
  773. }
  774. pf_state_key_addr_setup(pd, sk2, afto ? pd->didx : pd->sidx,
  775. &pd->nsaddr, afto ? pd->sidx : pd->didx, &pd->ndaddr,
  776. pd->naf, 0);
  777. sk2->port[afto ? pd->didx : pd->sidx] = pd->nsport;
  778. sk2->port[afto ? pd->sidx : pd->didx] = pd->ndport;
  779. if (afto) {
  780. switch (pd->proto) {
  781. case IPPROTO_ICMP:
  782. sk2->proto = IPPROTO_ICMPV6;
  783. break;
  784. case IPPROTO_ICMPV6:
  785. sk2->proto = IPPROTO_ICMP;
  786. break;
  787. default:
  788. sk2->proto = pd->proto;
  789. }
  790. } else
  791. sk2->proto = pd->proto;
  792. sk2->af = pd->naf;
  793. sk2->rdomain = wrdom;
  794. } else
  795. sk2 = sk1;
  796. if (pd->dir == PF_IN) {
  797. *skw = sk1;
  798. *sks = sk2;
  799. } else {
  800. *sks = sk1;
  801. *skw = sk2;
  802. }
  803. if (pf_status.debug >= LOG_DEBUG) {
  804. log(LOG_DEBUG, "pf: key setup: ");
  805. pf_print_state_parts(NULL, *skw, *sks);
  806. addlog("\n");
  807. }
  808. return (0);
  809. }
  810. int
  811. pf_state_insert(struct pfi_kif *kif, struct pf_state_key **skw,
  812. struct pf_state_key **sks, struct pf_state *s)
  813. {
  814. splsoftassert(IPL_SOFTNET);
  815. s->kif = kif;
  816. if (*skw == *sks) {
  817. if (pf_state_key_attach(*skw, s, PF_SK_WIRE))
  818. return (-1);
  819. *skw = *sks = s->key[PF_SK_WIRE];
  820. s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
  821. } else {
  822. if (pf_state_key_attach(*skw, s, PF_SK_WIRE)) {
  823. pool_put(&pf_state_key_pl, *sks);
  824. return (-1);
  825. }
  826. *skw = s->key[PF_SK_WIRE];
  827. if (pf_state_key_attach(*sks, s, PF_SK_STACK)) {
  828. pf_state_key_detach(s, PF_SK_WIRE);
  829. return (-1);
  830. }
  831. *sks = s->key[PF_SK_STACK];
  832. }
  833. if (s->id == 0 && s->creatorid == 0) {
  834. s->id = htobe64(pf_status.stateid++);
  835. s->creatorid = pf_status.hostid;
  836. }
  837. if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) {
  838. if (pf_status.debug >= LOG_NOTICE) {
  839. log(LOG_NOTICE, "pf: state insert failed: "
  840. "id: %016llx creatorid: %08x",
  841. betoh64(s->id), ntohl(s->creatorid));
  842. addlog("\n");
  843. }
  844. pf_detach_state(s);
  845. return (-1);
  846. }
  847. TAILQ_INSERT_TAIL(&state_list, s, entry_list);
  848. pf_status.fcounters[FCNT_STATE_INSERT]++;
  849. pf_status.states++;
  850. pfi_kif_ref(kif, PFI_KIF_REF_STATE);
  851. #if NPFSYNC > 0
  852. pfsync_insert_state(s);
  853. #endif /* NPFSYNC > 0 */
  854. return (0);
  855. }
  856. struct pf_state *
  857. pf_find_state_byid(struct pf_state_cmp *key)
  858. {
  859. pf_status.fcounters[FCNT_STATE_SEARCH]++;
  860. return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key));
  861. }
  862. int
  863. pf_compare_state_keys(struct pf_state_key *a, struct pf_state_key *b,
  864. struct pfi_kif *kif, u_int dir)
  865. {
  866. /* a (from hdr) and b (new) must be exact opposites of each other */
  867. if (a->af == b->af && a->proto == b->proto &&
  868. PF_AEQ(&a->addr[0], &b->addr[1], a->af) &&
  869. PF_AEQ(&a->addr[1], &b->addr[0], a->af) &&
  870. a->port[0] == b->port[1] &&
  871. a->port[1] == b->port[0] && a->rdomain == b->rdomain)
  872. return (0);
  873. else {
  874. /* mismatch. must not happen. */
  875. if (pf_status.debug >= LOG_ERR) {
  876. log(LOG_ERR,
  877. "pf: state key linking mismatch! dir=%s, "
  878. "if=%s, stored af=%u, a0: ",
  879. dir == PF_OUT ? "OUT" : "IN",
  880. kif->pfik_name, a->af);
  881. pf_print_host(&a->addr[0], a->port[0], a->af);
  882. addlog(", a1: ");
  883. pf_print_host(&a->addr[1], a->port[1], a->af);
  884. addlog(", proto=%u", a->proto);
  885. addlog(", found af=%u, a0: ", b->af);
  886. pf_print_host(&b->addr[0], b->port[0], b->af);
  887. addlog(", a1: ");
  888. pf_print_host(&b->addr[1], b->port[1], b->af);
  889. addlog(", proto=%u", b->proto);
  890. addlog("\n");
  891. }
  892. return (-1);
  893. }
  894. }
  895. struct pf_state *
  896. pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir,
  897. struct mbuf *m)
  898. {
  899. struct pf_state_key *sk;
  900. struct pf_state_item *si;
  901. pf_status.fcounters[FCNT_STATE_SEARCH]++;
  902. if (pf_status.debug >= LOG_DEBUG) {
  903. log(LOG_DEBUG, "pf: key search, if=%s: ", kif->pfik_name);
  904. pf_print_state_parts(NULL, (struct pf_state_key *)key, NULL);
  905. addlog("\n");
  906. }
  907. if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
  908. m->m_pkthdr.pf.statekey->reverse)
  909. sk = m->m_pkthdr.pf.statekey->reverse;
  910. else if (dir == PF_OUT && m->m_pkthdr.pf.inp &&
  911. m->m_pkthdr.pf.inp->inp_pf_sk)
  912. sk = m->m_pkthdr.pf.inp->inp_pf_sk;
  913. else {
  914. if ((sk = RB_FIND(pf_state_tree, &pf_statetbl,
  915. (struct pf_state_key *)key)) == NULL)
  916. return (NULL);
  917. if (dir == PF_OUT && m->m_pkthdr.pf.statekey &&
  918. pf_compare_state_keys(m->m_pkthdr.pf.statekey, sk,
  919. kif, dir) == 0) {
  920. m->m_pkthdr.pf.statekey->reverse = sk;
  921. sk->reverse = m->m_pkthdr.pf.statekey;
  922. } else if (dir == PF_OUT && m->m_pkthdr.pf.inp && !sk->inp) {
  923. m->m_pkthdr.pf.inp->inp_pf_sk = sk;
  924. sk->inp = m->m_pkthdr.pf.inp;
  925. }
  926. }
  927. if (dir == PF_OUT) {
  928. m->m_pkthdr.pf.statekey = NULL;
  929. m->m_pkthdr.pf.inp = NULL;
  930. }
  931. /* list is sorted, if-bound states before floating ones */
  932. TAILQ_FOREACH(si, &sk->states, entry)
  933. if ((si->s->kif == pfi_all || si->s->kif == kif) &&
  934. ((si->s->key[PF_SK_WIRE]->af == si->s->key[PF_SK_STACK]->af
  935. && sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
  936. si->s->key[PF_SK_STACK])) ||
  937. (si->s->key[PF_SK_WIRE]->af != si->s->key[PF_SK_STACK]->af
  938. && dir == PF_IN && (sk == si->s->key[PF_SK_STACK] ||
  939. sk == si->s->key[PF_SK_WIRE]))))
  940. return (si->s);
  941. return (NULL);
  942. }
  943. struct pf_state *
  944. pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
  945. {
  946. struct pf_state_key *sk;
  947. struct pf_state_item *si, *ret = NULL;
  948. pf_status.fcounters[FCNT_STATE_SEARCH]++;
  949. sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key);
  950. if (sk != NULL) {
  951. TAILQ_FOREACH(si, &sk->states, entry)
  952. if (dir == PF_INOUT ||
  953. (sk == (dir == PF_IN ? si->s->key[PF_SK_WIRE] :
  954. si->s->key[PF_SK_STACK]))) {
  955. if (more == NULL)
  956. return (si->s);
  957. if (ret)
  958. (*more)++;
  959. else
  960. ret = si;
  961. }
  962. }
  963. return (ret ? ret->s : NULL);
  964. }
  965. void
  966. pf_state_export(struct pfsync_state *sp, struct pf_state *st)
  967. {
  968. int32_t expire;
  969. bzero(sp, sizeof(struct pfsync_state));
  970. /* copy from state key */
  971. sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
  972. sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
  973. sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
  974. sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
  975. sp->key[PF_SK_WIRE].rdomain = htons(st->key[PF_SK_WIRE]->rdomain);
  976. sp->key[PF_SK_WIRE].af = st->key[PF_SK_WIRE]->af;
  977. sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
  978. sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
  979. sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
  980. sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
  981. sp->key[PF_SK_STACK].rdomain = htons(st->key[PF_SK_STACK]->rdomain);
  982. sp->key[PF_SK_STACK].af = st->key[PF_SK_STACK]->af;
  983. sp->rtableid[PF_SK_WIRE] = htonl(st->rtableid[PF_SK_WIRE]);
  984. sp->rtableid[PF_SK_STACK] = htonl(st->rtableid[PF_SK_STACK]);
  985. sp->proto = st->key[PF_SK_WIRE]->proto;
  986. sp->af = st->key[PF_SK_WIRE]->af;
  987. /* copy from state */
  988. strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
  989. memcpy(&sp->rt_addr, &st->rt_addr, sizeof(sp->rt_addr));
  990. sp->creation = htonl(time_uptime - st->creation);
  991. expire = pf_state_expires(st);
  992. if (expire <= time_uptime)
  993. sp->expire = htonl(0);
  994. else
  995. sp->expire = htonl(expire - time_uptime);
  996. sp->direction = st->direction;
  997. #if NPFLOG > 0
  998. sp->log = st->log;
  999. #endif /* NPFLOG > 0 */
  1000. sp->timeout = st->timeout;
  1001. sp->state_flags = htons(st->state_flags);
  1002. if (!SLIST_EMPTY(&st->src_nodes))
  1003. sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
  1004. sp->id = st->id;
  1005. sp->creatorid = st->creatorid;
  1006. pf_state_peer_hton(&st->src, &sp->src);
  1007. pf_state_peer_hton(&st->dst, &sp->dst);
  1008. if (st->rule.ptr == NULL)
  1009. sp->rule = htonl(-1);
  1010. else
  1011. sp->rule = htonl(st->rule.ptr->nr);
  1012. if (st->anchor.ptr == NULL)
  1013. sp->anchor = htonl(-1);
  1014. else
  1015. sp->anchor = htonl(st->anchor.ptr->nr);
  1016. sp->nat_rule = htonl(-1); /* left for compat, nat_rule is gone */
  1017. pf_state_counter_hton(st->packets[0], sp->packets[0]);
  1018. pf_state_counter_hton(st->packets[1], sp->packets[1]);
  1019. pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
  1020. pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
  1021. sp->max_mss = htons(st->max_mss);
  1022. sp->min_ttl = st->min_ttl;
  1023. sp->set_tos = st->set_tos;
  1024. sp->set_prio[0] = st->set_prio[0];
  1025. sp->set_prio[1] = st->set_prio[1];
  1026. }
  1027. /* END state table stuff */
  1028. void
  1029. pf_purge_thread(void *v)
  1030. {
  1031. int nloops = 0, s;
  1032. for (;;) {
  1033. tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz);
  1034. s = splsoftnet();
  1035. /* process a fraction of the state table every second */
  1036. pf_purge_expired_states(1 + (pf_status.states
  1037. / pf_default_rule.timeout[PFTM_INTERVAL]));
  1038. /* purge other expired types every PFTM_INTERVAL seconds */
  1039. if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
  1040. pf_purge_expired_fragments();
  1041. pf_purge_expired_src_nodes(0);
  1042. nloops = 0;
  1043. }
  1044. splx(s);
  1045. }
  1046. }
  1047. int32_t
  1048. pf_state_expires(const struct pf_state *state)
  1049. {
  1050. int32_t timeout;
  1051. u_int32_t start;
  1052. u_int32_t end;
  1053. u_int32_t states;
  1054. /* handle all PFTM_* > PFTM_MAX here */
  1055. if (state->timeout == PFTM_PURGE)
  1056. return (0);
  1057. KASSERT(state->timeout != PFTM_UNLINKED);
  1058. KASSERT(state->timeout < PFTM_MAX);
  1059. timeout = state->rule.ptr->timeout[state->timeout];
  1060. if (!timeout)
  1061. timeout = pf_default_rule.timeout[state->timeout];
  1062. start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
  1063. if (start) {
  1064. end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
  1065. states = state->rule.ptr->states_cur;
  1066. } else {
  1067. start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
  1068. end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
  1069. states = pf_status.states;
  1070. }
  1071. if (end && states > start && start < end) {
  1072. if (states >= end)
  1073. return (0);
  1074. timeout = timeout * (end - states) / (end - start);
  1075. }
  1076. return (state->expire + timeout);
  1077. }
  1078. void
  1079. pf_purge_expired_src_nodes(int waslocked)
  1080. {
  1081. struct pf_src_node *cur, *next;
  1082. int locked = waslocked;
  1083. for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
  1084. next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
  1085. if (cur->states == 0 && cur->expire <= time_uptime) {
  1086. if (! locked) {
  1087. rw_enter_write(&pf_consistency_lock);
  1088. next = RB_NEXT(pf_src_tree,
  1089. &tree_src_tracking, cur);
  1090. locked = 1;
  1091. }
  1092. pf_remove_src_node(cur);
  1093. }
  1094. }
  1095. if (locked && !waslocked)
  1096. rw_exit_write(&pf_consistency_lock);
  1097. }
  1098. void
  1099. pf_src_tree_remove_state(struct pf_state *s)
  1100. {
  1101. u_int32_t timeout;
  1102. struct pf_sn_item *sni;
  1103. while ((sni = SLIST_FIRST(&s->src_nodes)) != NULL) {
  1104. SLIST_REMOVE_HEAD(&s->src_nodes, next);
  1105. if (s->src.tcp_est)
  1106. --sni->sn->conn;
  1107. if (--sni->sn->states == 0) {
  1108. timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
  1109. if (!timeout)
  1110. timeout =
  1111. pf_default_rule.timeout[PFTM_SRC_NODE];
  1112. sni->sn->expire = time_uptime + timeout;
  1113. }
  1114. pool_put(&pf_sn_item_pl, sni);
  1115. }
  1116. }
  1117. /* callers should be at splsoftnet */
  1118. void
  1119. pf_unlink_state(struct pf_state *cur)
  1120. {
  1121. splsoftassert(IPL_SOFTNET);
  1122. /* handle load balancing related tasks */
  1123. pf_postprocess_addr(cur);
  1124. if (cur->src.state == PF_TCPS_PROXY_DST) {
  1125. pf_send_tcp(cur->rule.ptr, cur->key[PF_SK_WIRE]->af,
  1126. &cur->key[PF_SK_WIRE]->addr[1],
  1127. &cur->key[PF_SK_WIRE]->addr[0],
  1128. cur->key[PF_SK_WIRE]->port[1],
  1129. cur->key[PF_SK_WIRE]->port[0],
  1130. cur->src.seqhi, cur->src.seqlo + 1,
  1131. TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag,
  1132. cur->key[PF_SK_WIRE]->rdomain);
  1133. }
  1134. RB_REMOVE(pf_state_tree_id, &tree_id, cur);
  1135. #if NPFLOW > 0
  1136. if (cur->state_flags & PFSTATE_PFLOW)
  1137. export_pflow(cur);
  1138. #endif /* NPFLOW > 0 */
  1139. #if NPFSYNC > 0
  1140. pfsync_delete_state(cur);
  1141. #endif /* NPFSYNC > 0 */
  1142. cur->timeout = PFTM_UNLINKED;
  1143. pf_src_tree_remove_state(cur);
  1144. pf_detach_state(cur);
  1145. }
  1146. /* callers should be at splsoftnet and hold the
  1147. * write_lock on pf_consistency_lock */
  1148. void
  1149. pf_free_state(struct pf_state *cur)
  1150. {
  1151. struct pf_rule_item *ri;
  1152. splsoftassert(IPL_SOFTNET);
  1153. #if NPFSYNC > 0
  1154. if (pfsync_state_in_use(cur))
  1155. return;
  1156. #endif /* NPFSYNC > 0 */
  1157. KASSERT(cur->timeout == PFTM_UNLINKED);
  1158. if (--cur->rule.ptr->states_cur == 0 &&
  1159. cur->rule.ptr->src_nodes == 0)
  1160. pf_rm_rule(NULL, cur->rule.ptr);
  1161. if (cur->anchor.ptr != NULL)
  1162. if (--cur->anchor.ptr->states_cur == 0)
  1163. pf_rm_rule(NULL, cur->anchor.ptr);
  1164. while ((ri = SLIST_FIRST(&cur->match_rules))) {
  1165. SLIST_REMOVE_HEAD(&cur->match_rules, entry);
  1166. if (--ri->r->states_cur == 0 &&
  1167. ri->r->src_nodes == 0)
  1168. pf_rm_rule(NULL, ri->r);
  1169. pool_put(&pf_rule_item_pl, ri);
  1170. }
  1171. pf_normalize_tcp_cleanup(cur);
  1172. pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
  1173. TAILQ_REMOVE(&state_list, cur, entry_list);
  1174. if (cur->tag)
  1175. pf_tag_unref(cur->tag);
  1176. pool_put(&pf_state_pl, cur);
  1177. pf_status.fcounters[FCNT_STATE_REMOVALS]++;
  1178. pf_status.states--;
  1179. }
  1180. void
  1181. pf_purge_expired_states(u_int32_t maxcheck)
  1182. {
  1183. static struct pf_state *cur = NULL;
  1184. struct pf_state *next;
  1185. int locked = 0;
  1186. while (maxcheck--) {
  1187. /* wrap to start of list when we hit the end */
  1188. if (cur == NULL) {
  1189. cur = TAILQ_FIRST(&state_list);
  1190. if (cur == NULL)
  1191. break; /* list empty */
  1192. }
  1193. /* get next state, as cur may get deleted */
  1194. next = TAILQ_NEXT(cur, entry_list);
  1195. if (cur->timeout == PFTM_UNLINKED) {
  1196. /* free unlinked state */
  1197. if (! locked) {
  1198. rw_enter_write(&pf_consistency_lock);
  1199. locked = 1;
  1200. }
  1201. pf_free_state(cur);
  1202. } else if (pf_state_expires(cur) <= time_uptime) {
  1203. /* unlink and free expired state */
  1204. pf_unlink_state(cur);
  1205. if (! locked) {
  1206. rw_enter_write(&pf_consistency_lock);
  1207. locked = 1;
  1208. }
  1209. pf_free_state(cur);
  1210. }
  1211. cur = next;
  1212. }
  1213. if (locked)
  1214. rw_exit_write(&pf_consistency_lock);
  1215. }
  1216. int
  1217. pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
  1218. {
  1219. if (aw->type != PF_ADDR_TABLE)
  1220. return (0);
  1221. if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname, 1)) == NULL)
  1222. return (1);
  1223. return (0);
  1224. }
  1225. void
  1226. pf_tbladdr_remove(struct pf_addr_wrap *aw)
  1227. {
  1228. if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
  1229. return;
  1230. pfr_detach_table(aw->p.tbl);
  1231. aw->p.tbl = NULL;
  1232. }
  1233. void
  1234. pf_tbladdr_copyout(struct pf_addr_wrap *aw)
  1235. {
  1236. struct pfr_ktable *kt = aw->p.tbl;
  1237. if (aw->type != PF_ADDR_TABLE || kt == NULL)
  1238. return;
  1239. if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
  1240. kt = kt->pfrkt_root;
  1241. aw->p.tbl = NULL;
  1242. aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
  1243. kt->pfrkt_cnt : -1;
  1244. }
  1245. void
  1246. pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
  1247. {
  1248. switch (af) {
  1249. case AF_INET: {
  1250. u_int32_t a = ntohl(addr->addr32[0]);
  1251. addlog("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
  1252. (a>>8)&255, a&255);
  1253. if (p) {
  1254. p = ntohs(p);
  1255. addlog(":%u", p);
  1256. }
  1257. break;
  1258. }
  1259. #ifdef INET6
  1260. case AF_INET6: {
  1261. u_int16_t b;
  1262. u_int8_t i, curstart, curend, maxstart, maxend;
  1263. curstart = curend = maxstart = maxend = 255;
  1264. for (i = 0; i < 8; i++) {
  1265. if (!addr->addr16[i]) {
  1266. if (curstart == 255)
  1267. curstart = i;
  1268. curend = i;
  1269. } else {
  1270. if ((curend - curstart) >
  1271. (maxend - maxstart)) {
  1272. maxstart = curstart;
  1273. maxend = curend;
  1274. }
  1275. curstart = curend = 255;
  1276. }
  1277. }
  1278. if ((curend - curstart) >
  1279. (maxend - maxstart)) {
  1280. maxstart = curstart;
  1281. maxend = curend;
  1282. }
  1283. for (i = 0; i < 8; i++) {
  1284. if (i >= maxstart && i <= maxend) {
  1285. if (i == 0)
  1286. addlog(":");
  1287. if (i == maxend)
  1288. addlog(":");
  1289. } else {
  1290. b = ntohs(addr->addr16[i]);
  1291. addlog("%x", b);
  1292. if (i < 7)
  1293. addlog(":");
  1294. }
  1295. }
  1296. if (p) {
  1297. p = ntohs(p);
  1298. addlog("[%u]", p);
  1299. }
  1300. break;
  1301. }
  1302. #endif /* INET6 */
  1303. }
  1304. }
  1305. void
  1306. pf_print_state(struct pf_state *s)
  1307. {
  1308. pf_print_state_parts(s, NULL, NULL);
  1309. }
  1310. void
  1311. pf_print_state_parts(struct pf_state *s,
  1312. struct pf_state_key *skwp, struct pf_state_key *sksp)
  1313. {
  1314. struct pf_state_key *skw, *sks;
  1315. u_int8_t proto, dir;
  1316. /* Do our best to fill these, but they're skipped if NULL */
  1317. skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
  1318. sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
  1319. proto = skw ? skw->proto : (sks ? sks->proto : 0);
  1320. dir = s ? s->direction : 0;
  1321. switch (proto) {
  1322. case IPPROTO_IPV4:
  1323. addlog("IPv4");
  1324. break;
  1325. case IPPROTO_IPV6:
  1326. addlog("IPv6");
  1327. break;
  1328. case IPPROTO_TCP:
  1329. addlog("TCP");
  1330. break;
  1331. case IPPROTO_UDP:
  1332. addlog("UDP");
  1333. break;
  1334. case IPPROTO_ICMP:
  1335. addlog("ICMP");
  1336. break;
  1337. case IPPROTO_ICMPV6:
  1338. addlog("ICMPv6");
  1339. break;
  1340. default:
  1341. addlog("%u", proto);
  1342. break;
  1343. }
  1344. switch (dir) {
  1345. case PF_IN:
  1346. addlog(" in");
  1347. break;
  1348. case PF_OUT:
  1349. addlog(" out");
  1350. break;
  1351. }
  1352. if (skw) {
  1353. addlog(" wire: (%d) ", skw->rdomain);
  1354. pf_print_host(&skw->addr[0], skw->port[0], skw->af);
  1355. addlog(" ");
  1356. pf_print_host(&skw->addr[1], skw->port[1], skw->af);
  1357. }
  1358. if (sks) {
  1359. addlog(" stack: (%d) ", sks->rdomain);
  1360. if (sks != skw) {
  1361. pf_print_host(&sks->addr[0], sks->port[0], sks->af);
  1362. addlog(" ");
  1363. pf_print_host(&sks->addr[1], sks->port[1], sks->af);
  1364. } else
  1365. addlog("-");
  1366. }
  1367. if (s) {
  1368. if (proto == IPPROTO_TCP) {
  1369. addlog(" [lo=%u high=%u win=%u modulator=%u",
  1370. s->src.seqlo, s->src.seqhi,
  1371. s->src.max_win, s->src.seqdiff);
  1372. if (s->src.wscale && s->dst.wscale)
  1373. addlog(" wscale=%u",
  1374. s->src.wscale & PF_WSCALE_MASK);
  1375. addlog("]");
  1376. addlog(" [lo=%u high=%u win=%u modulator=%u",
  1377. s->dst.seqlo, s->dst.seqhi,
  1378. s->dst.max_win, s->dst.seqdiff);
  1379. if (s->src.wscale && s->dst.wscale)
  1380. addlog(" wscale=%u",
  1381. s->dst.wscale & PF_WSCALE_MASK);
  1382. addlog("]");
  1383. }
  1384. addlog(" %u:%u", s->src.state, s->dst.state);
  1385. if (s->rule.ptr)
  1386. addlog(" @%d", s->rule.ptr->nr);
  1387. }
  1388. }
  1389. void
  1390. pf_print_flags(u_int8_t f)
  1391. {
  1392. if (f)
  1393. addlog(" ");
  1394. if (f & TH_FIN)
  1395. addlog("F");
  1396. if (f & TH_SYN)
  1397. addlog("S");
  1398. if (f & TH_RST)
  1399. addlog("R");
  1400. if (f & TH_PUSH)
  1401. addlog("P");
  1402. if (f & TH_ACK)
  1403. addlog("A");
  1404. if (f & TH_URG)
  1405. addlog("U");
  1406. if (f & TH_ECE)
  1407. addlog("E");
  1408. if (f & TH_CWR)
  1409. addlog("W");
  1410. }
  1411. #define PF_SET_SKIP_STEPS(i) \
  1412. do { \
  1413. while (head[i] != cur) { \
  1414. head[i]->skip[i].ptr = cur; \
  1415. head[i] = TAILQ_NEXT(head[i], entries); \
  1416. } \
  1417. } while (0)
  1418. void
  1419. pf_calc_skip_steps(struct pf_rulequeue *rules)
  1420. {
  1421. struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
  1422. int i;
  1423. cur = TAILQ_FIRST(rules);
  1424. prev = cur;
  1425. for (i = 0; i < PF_SKIP_COUNT; ++i)
  1426. head[i] = cur;
  1427. while (cur != NULL) {
  1428. if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
  1429. PF_SET_SKIP_STEPS(PF_SKIP_IFP);
  1430. if (cur->direction != prev->direction)
  1431. PF_SET_SKIP_STEPS(PF_SKIP_DIR);
  1432. if (cur->onrdomain != prev->onrdomain ||
  1433. cur->ifnot != prev->ifnot)
  1434. PF_SET_SKIP_STEPS(PF_SKIP_RDOM);
  1435. if (cur->af != prev->af)
  1436. PF_SET_SKIP_STEPS(PF_SKIP_AF);
  1437. if (cur->proto != prev->proto)
  1438. PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
  1439. if (cur->src.neg != prev->src.neg ||
  1440. pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
  1441. PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
  1442. if (cur->dst.neg != prev->dst.neg ||
  1443. pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
  1444. PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
  1445. if (cur->src.port[0] != prev->src.port[0] ||
  1446. cur->src.port[1] != prev->src.port[1] ||
  1447. cur->src.port_op != prev->src.port_op)
  1448. PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
  1449. if (cur->dst.port[0] != prev->dst.port[0] ||
  1450. cur->dst.port[1] != prev->dst.port[1] ||
  1451. cur->dst.port_op != prev->dst.port_op)
  1452. PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
  1453. prev = cur;
  1454. cur = TAILQ_NEXT(cur, entries);
  1455. }
  1456. for (i = 0; i < PF_SKIP_COUNT; ++i)
  1457. PF_SET_SKIP_STEPS(i);
  1458. }
  1459. int
  1460. pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
  1461. {
  1462. if (aw1->type != aw2->type)
  1463. return (1);
  1464. switch (aw1->type) {
  1465. case PF_ADDR_ADDRMASK:
  1466. case PF_ADDR_RANGE:
  1467. if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
  1468. return (1);
  1469. if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
  1470. return (1);
  1471. return (0);
  1472. case PF_ADDR_DYNIFTL:
  1473. return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
  1474. case PF_ADDR_NONE:
  1475. case PF_ADDR_NOROUTE:
  1476. case PF_ADDR_URPFFAILED:
  1477. return (0);
  1478. case PF_ADDR_TABLE:
  1479. return (aw1->p.tbl != aw2->p.tbl);
  1480. case PF_ADDR_RTLABEL:
  1481. return (aw1->v.rtlabel != aw2->v.rtlabel);
  1482. default:
  1483. addlog("invalid address type: %d\n", aw1->type);
  1484. return (1);
  1485. }
  1486. }
  1487. void
  1488. pf_change_ap(struct pf_pdesc *pd, struct pf_addr *a, u_int16_t *p,
  1489. struct pf_addr *an, u_int16_t pn, sa_family_t naf)
  1490. {
  1491. if (pd->csum_status == PF_CSUM_UNKNOWN)
  1492. pf_check_proto_cksum(pd, pd->off, pd->tot_len - pd->off,
  1493. pd->proto, pd->af);
  1494. if (pd->af == naf)
  1495. PF_ACPY(a, an, naf);
  1496. if (p != NULL)
  1497. *p = pn;
  1498. }
  1499. /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
  1500. void
  1501. pf_change_a(struct pf_pdesc *pd, void *a, u_int32_t an)
  1502. {
  1503. if (pd->csum_status == PF_CSUM_UNKNOWN)
  1504. pf_check_proto_cksum(pd, pd->off, pd->tot_len - pd->off,
  1505. pd->proto, pd->af);
  1506. memcpy(a, &an, sizeof(u_int32_t));
  1507. }
  1508. #ifdef INET6
  1509. void
  1510. pf_change_a6(struct pf_pdesc *pd, struct pf_addr *a, struct pf_addr *an)
  1511. {
  1512. if (pd->csum_status == PF_CSUM_UNKNOWN)
  1513. pf_check_proto_cksum(pd, pd->off, pd->tot_len - pd->off,
  1514. pd->proto, pd->af);
  1515. PF_ACPY(a, an, AF_INET6);
  1516. }
  1517. #endif /* INET6 */
  1518. int
  1519. pf_icmp_mapping(struct pf_pdesc *pd, u_int8_t type, int *icmp_dir,
  1520. u_int16_t *virtual_id, u_int16_t *virtual_type)
  1521. {
  1522. /*
  1523. * ICMP types marked with PF_OUT are typically responses to
  1524. * PF_IN, and will match states in the opposite direction.
  1525. * PF_IN ICMP types need to match a state with that type.
  1526. */
  1527. *icmp_dir = PF_OUT;
  1528. /* Queries (and responses) */
  1529. switch (pd->af) {
  1530. case AF_INET:
  1531. switch (type) {
  1532. case ICMP_ECHO:
  1533. *icmp_dir = PF_IN;
  1534. /* FALLTHROUGH */
  1535. case ICMP_ECHOREPLY:
  1536. *virtual_type = ICMP_ECHO;
  1537. *virtual_id = pd->hdr.icmp->icmp_id;
  1538. break;
  1539. case ICMP_TSTAMP:
  1540. *icmp_dir = PF_IN;
  1541. /* FALLTHROUGH */
  1542. case ICMP_TSTAMPREPLY:
  1543. *virtual_type = ICMP_TSTAMP;
  1544. *virtual_id = pd->hdr.icmp->icmp_id;
  1545. break;
  1546. case ICMP_IREQ:
  1547. *icmp_dir = PF_IN;
  1548. /* FALLTHROUGH */
  1549. case ICMP_IREQREPLY:
  1550. *virtual_type = ICMP_IREQ;
  1551. *virtual_id = pd->hdr.icmp->icmp_id;
  1552. break;
  1553. case ICMP_MASKREQ:
  1554. *icmp_dir = PF_IN;
  1555. /* FALLTHROUGH */
  1556. case ICMP_MASKREPLY:
  1557. *virtual_type = ICMP_MASKREQ;
  1558. *virtual_id = pd->hdr.icmp->icmp_id;
  1559. break;
  1560. case ICMP_IPV6_WHEREAREYOU:
  1561. *icmp_dir = PF_IN;
  1562. /* FALLTHROUGH */
  1563. case ICMP_IPV6_IAMHERE:
  1564. *virtual_type = ICMP_IPV6_WHEREAREYOU;
  1565. *virtual_id = 0; /* Nothing sane to match on! */
  1566. break;
  1567. case ICMP_MOBILE_REGREQUEST:
  1568. *icmp_dir = PF_IN;
  1569. /* FALLTHROUGH */
  1570. case ICMP_MOBILE_REGREPLY:
  1571. *virtual_type = ICMP_MOBILE_REGREQUEST;
  1572. *virtual_id = 0; /* Nothing sane to match on! */
  1573. break;
  1574. case ICMP_ROUTERSOLICIT:
  1575. *icmp_dir = PF_IN;
  1576. /* FALLTHROUGH */
  1577. case ICMP_ROUTERADVERT:
  1578. *virtual_type = ICMP_ROUTERSOLICIT;
  1579. *virtual_id = 0; /* Nothing sane to match on! */
  1580. break;
  1581. /* These ICMP types map to other connections */
  1582. case ICMP_UNREACH:
  1583. case ICMP_SOURCEQUENCH:
  1584. case ICMP_REDIRECT:
  1585. case ICMP_TIMXCEED:
  1586. case ICMP_PARAMPROB:
  1587. /* These will not be used, but set them anyway */
  1588. *icmp_dir = PF_IN;
  1589. *virtual_type = htons(type);
  1590. *virtual_id = 0;
  1591. return (1); /* These types match to another state */
  1592. /*
  1593. * All remaining ICMP types get their own states,
  1594. * and will only match in one direction.
  1595. */
  1596. default:
  1597. *icmp_dir = PF_IN;
  1598. *virtual_type = type;
  1599. *virtual_id = 0;
  1600. break;
  1601. }
  1602. break;
  1603. #ifdef INET6
  1604. case AF_INET6:
  1605. switch (type) {
  1606. case ICMP6_ECHO_REQUEST:
  1607. *icmp_dir = PF_IN;
  1608. /* FALLTHROUGH */
  1609. case ICMP6_ECHO_REPLY:
  1610. *virtual_type = ICMP6_ECHO_REQUEST;
  1611. *virtual_id = pd->hdr.icmp6->icmp6_id;
  1612. break;
  1613. case MLD_LISTENER_QUERY:
  1614. *icmp_dir = PF_IN;
  1615. /* FALLTHROUGH */
  1616. case MLD_LISTENER_REPORT: {
  1617. struct mld_hdr *mld = (void *)pd->hdr.icmp6;
  1618. u_int32_t h;
  1619. *virtual_type = MLD_LISTENER_QUERY;
  1620. /* generate fake id for these messages */
  1621. h = mld->mld_addr.s6_addr32[0] ^
  1622. mld->mld_addr.s6_addr32[1] ^
  1623. mld->mld_addr.s6_addr32[2] ^
  1624. mld->mld_addr.s6_addr32[3];
  1625. *virtual_id = (h >> 16) ^ (h & 0xffff);
  1626. break;
  1627. }
  1628. /*
  1629. * ICMP6_FQDN and ICMP6_NI query/reply are the same type as
  1630. * ICMP6_WRU
  1631. */
  1632. case ICMP6_WRUREQUEST:
  1633. *icmp_dir = PF_IN;
  1634. /* FALLTHROUGH */
  1635. case ICMP6_WRUREPLY:
  1636. *virtual_type = ICMP6_WRUREQUEST;
  1637. *virtual_id = 0; /* Nothing sane to match on! */
  1638. break;
  1639. case MLD_MTRACE:
  1640. *icmp_dir = PF_IN;
  1641. /* FALLTHROUGH */
  1642. case MLD_MTRACE_RESP:
  1643. *virtual_type = MLD_MTRACE;
  1644. *virtual_id = 0; /* Nothing sane to match on! */
  1645. break;
  1646. case ND_NEIGHBOR_SOLICIT:
  1647. *icmp_dir = PF_IN;
  1648. /* FALLTHROUGH */
  1649. case ND_NEIGHBOR_ADVERT: {
  1650. struct nd_neighbor_solicit *nd = (void *)pd->hdr.icmp6;
  1651. u_int32_t h;
  1652. *virtual_type = ND_NEIGHBOR_SOLICIT;
  1653. /* generate fake id for these messages */
  1654. h = nd->nd_ns_target.s6_addr32[0] ^
  1655. nd->nd_ns_target.s6_addr32[1] ^
  1656. nd->nd_ns_target.s6_addr32[2] ^
  1657. nd->nd_ns_target.s6_addr32[3];
  1658. *virtual_id = (h >> 16) ^ (h & 0xffff);
  1659. break;
  1660. }
  1661. /*
  1662. * These ICMP types map to other connections.
  1663. * ND_REDIRECT can't be in this list because the triggering
  1664. * packet header is optional.
  1665. */
  1666. case ICMP6_DST_UNREACH:
  1667. case ICMP6_PACKET_TOO_BIG:
  1668. case ICMP6_TIME_EXCEEDED:
  1669. case ICMP6_PARAM_PROB:
  1670. /* These will not be used, but set them anyway */
  1671. *icmp_dir = PF_IN;
  1672. *virtual_type = htons(type);
  1673. *virtual_id = 0;
  1674. return (1); /* These types match to another state */
  1675. /*
  1676. * All remaining ICMP6 types get their own states,
  1677. * and will only match in one direction.
  1678. */
  1679. default:
  1680. *icmp_dir = PF_IN;
  1681. *virtual_type = type;
  1682. *virtual_id = 0;
  1683. break;
  1684. }
  1685. break;
  1686. #endif /* INET6 */
  1687. }
  1688. *virtual_type = htons(*virtual_type);
  1689. return (0); /* These types match to their own state */
  1690. }
  1691. void
  1692. pf_change_icmp(struct pf_pdesc *pd, struct pf_addr *ia, u_int16_t *ip,
  1693. struct pf_addr *oa, struct pf_addr *na, u_int16_t np, sa_family_t af)
  1694. {
  1695. if (pd->csum_status == PF_CSUM_UNKNOWN)
  1696. pf_check_proto_cksum(pd, pd->off, pd->tot_len - pd->off,
  1697. pd->proto, pd->af);
  1698. /* Change inner protocol port */
  1699. if (ip != NULL)
  1700. *ip = np;
  1701. /* Change inner ip address */
  1702. PF_ACPY(ia, na, af);
  1703. /* Outer ip address, fix outer icmpv6 checksum, if necessary. */
  1704. if (oa)
  1705. PF_ACPY(oa, na, af);
  1706. }
  1707. #if INET6
  1708. int
  1709. pf_translate_af(struct pf_pdesc *pd)
  1710. {
  1711. struct mbuf *mp;
  1712. struct ip *ip4;
  1713. struct ip6_hdr *ip6;
  1714. struct icmp6_hdr *icmp;
  1715. int hlen;
  1716. if (pd->csum_status == PF_CSUM_UNKNOWN)
  1717. pf_check_proto_cksum(pd, pd->off, pd->tot_len - pd->off,
  1718. pd->proto, pd->af);
  1719. hlen = pd->naf == AF_INET ? sizeof(*ip4) : sizeof(*ip6);
  1720. /* trim the old header */
  1721. m_adj(pd->m, pd->off);
  1722. /* prepend a new one */
  1723. if ((M_PREPEND(pd->m, hlen, M_DONTWAIT)) == NULL)
  1724. return (-1);
  1725. switch (pd->naf) {
  1726. case AF_INET:
  1727. ip4 = mtod(pd->m, struct ip *);
  1728. bzero(ip4, hlen);
  1729. ip4->ip_v = IPVERSION;
  1730. ip4->ip_hl = hlen >> 2;
  1731. ip4->ip_tos = pd->tos;
  1732. ip4->ip_len = htons(hlen + (pd->tot_len - pd->off));
  1733. ip4->ip_id = htons(ip_randomid());
  1734. ip4->ip_off = htons(IP_DF);
  1735. ip4->ip_ttl = pd->ttl;
  1736. ip4->ip_p = pd->proto;
  1737. ip4->ip_src = pd->nsaddr.v4;
  1738. ip4->ip_dst = pd->ndaddr.v4;
  1739. break;
  1740. case AF_INET6:
  1741. ip6 = mtod(pd->m, struct ip6_hdr *);
  1742. bzero(ip6, hlen);
  1743. ip6->ip6_vfc = IPV6_VERSION;
  1744. ip6->ip6_flow |= htonl((u_int32_t)pd->tos << 20);
  1745. ip6->ip6_plen = htons(pd->tot_len - pd->off);
  1746. ip6->ip6_nxt = pd->proto;
  1747. if (!pd->ttl || pd->ttl > IPV6_DEFHLIM)
  1748. ip6->ip6_hlim = IPV6_DEFHLIM;
  1749. else
  1750. ip6->ip6_hlim = pd->ttl;
  1751. ip6->ip6_src = pd->nsaddr.v6;
  1752. ip6->ip6_dst = pd->ndaddr.v6;
  1753. break;
  1754. default:
  1755. return (-1);
  1756. }
  1757. /* recalculate icmp/icmp6 checksums */
  1758. if (pd->proto == IPPROTO_ICMP || pd->proto == IPPROTO_ICMPV6) {
  1759. int off;
  1760. if ((mp = m_pulldown(pd->m, hlen, sizeof(*icmp), &off)) ==
  1761. NULL) {
  1762. pd->m = NULL;
  1763. return (-1);
  1764. }
  1765. icmp = (struct icmp6_hdr *)(mp->m_data + off);
  1766. icmp->icmp6_cksum = 0;
  1767. icmp->icmp6_cksum = pd->naf == AF_INET ?
  1768. in4_cksum(pd->m, 0, hlen, ntohs(ip4->ip_len) - hlen) :
  1769. in6_cksum(pd->m, IPPROTO_ICMPV6, hlen,
  1770. ntohs(ip6->ip6_plen));
  1771. }
  1772. return (0);
  1773. }
  1774. int
  1775. pf_change_icmp_af(struct mbuf *m, int off, struct pf_pdesc *pd,
  1776. struct pf_pdesc *pd2, struct pf_addr *src, struct pf_addr *dst,
  1777. sa_family_t af, sa_family_t naf)
  1778. {
  1779. struct mbuf *n = NULL;
  1780. struct ip *ip4;
  1781. struct ip6_hdr *ip6;
  1782. int hlen, olen, mlen;
  1783. if (pd->csum_status == PF_CSUM_UNKNOWN)
  1784. pf_check_proto_cksum(pd, pd->off, pd->tot_len - pd->off,
  1785. pd->proto, pd->af);
  1786. if (af == naf || (af != AF_INET && af != AF_INET6) ||
  1787. (naf != AF_INET && naf != AF_INET6))
  1788. return (-1);
  1789. /* split the mbuf chain on the inner ip/ip6 header boundary */
  1790. if ((n = m_split(m, off, M_DONTWAIT)) == NULL)
  1791. return (-1);
  1792. /* old header */
  1793. olen = pd2->off - off;
  1794. /* new header */
  1795. hlen = naf == AF_INET ? sizeof(*ip4) : sizeof(*ip6);
  1796. /* trim old header */
  1797. m_adj(n, olen);
  1798. /* prepend a new one */
  1799. if ((M_PREPEND(n, hlen, M_DONTWAIT)) == NULL)
  1800. return (-1);
  1801. /* translate inner ip/ip6 header */
  1802. switch (naf) {
  1803. case AF_INET:
  1804. ip4 = mtod(n, struct ip *);
  1805. bzero(ip4, sizeof(*ip4));
  1806. ip4->ip_v = IPVERSION;
  1807. ip4->ip_hl = sizeof(*ip4) >> 2;
  1808. ip4->ip_len = htons(sizeof(*ip4) + pd2->tot_len - olen);
  1809. ip4->ip_id = htons(ip_randomid());
  1810. ip4->ip_off = htons(IP_DF);
  1811. ip4->ip_ttl = pd2->ttl;
  1812. if (pd2->proto == IPPROTO_ICMPV6)
  1813. ip4->ip_p = IPPROTO_ICMP;
  1814. else
  1815. ip4->ip_p = pd2->proto;
  1816. ip4->ip_src = src->v4;
  1817. ip4->ip_dst = dst->v4;
  1818. ip4->ip_sum = in_cksum(n, ip4->ip_hl << 2);
  1819. break;
  1820. case AF_INET6:
  1821. ip6 = mtod(n, struct ip6_hdr *);
  1822. bzero(ip6, sizeof(*ip6));
  1823. ip6->ip6_vfc = IPV6_VERSION;
  1824. ip6->ip6_plen = htons(pd2->tot_len - olen);
  1825. if (pd2->proto == IPPROTO_ICMP)
  1826. ip6->ip6_nxt = IPPROTO_ICMPV6;
  1827. else
  1828. ip6->ip6_nxt = pd2->proto;
  1829. if (!pd2->ttl || pd2->ttl > IPV6_DEFHLIM)
  1830. ip6->ip6_hlim = IPV6_DEFHLIM;
  1831. else
  1832. ip6->ip6_hlim = pd2->ttl;
  1833. ip6->ip6_src = src->v6;
  1834. ip6->ip6_dst = dst->v6;
  1835. break;
  1836. }
  1837. /* adjust payload offset and total packet length */
  1838. pd2->off += hlen - olen;
  1839. pd->tot_len += hlen - olen;
  1840. /* merge modified inner packet with the original header */
  1841. mlen = n->m_pkthdr.len;
  1842. m_cat(m, n);
  1843. m->m_pkthdr.len += mlen;
  1844. return (0);
  1845. }
  1846. #define PTR_IP(field) (offsetof(struct ip, field))
  1847. #define PTR_IP6(field) (offsetof(struct ip6_hdr, field))
  1848. int
  1849. pf_translate_icmp_af(int af, void *arg)
  1850. {
  1851. struct icmp *icmp4;
  1852. struct icmp6_hdr *icmp6;
  1853. u_int32_t mtu;
  1854. int32_t ptr = -1;
  1855. u_int8_t type;
  1856. u_int8_t code;
  1857. switch (af) {
  1858. case AF_INET:
  1859. icmp6 = arg;
  1860. type = icmp6->icmp6_type;
  1861. code = icmp6->icmp6_code;
  1862. mtu = ntohl(icmp6->icmp6_mtu);
  1863. switch (type) {
  1864. case ICMP6_ECHO_REQUEST:
  1865. type = ICMP_ECHO;
  1866. break;
  1867. case ICMP6_ECHO_REPLY:
  1868. type = ICMP_ECHOREPLY;
  1869. break;
  1870. case ICMP6_DST_UNREACH:
  1871. type = ICMP_UNREACH;
  1872. switch (code) {
  1873. case ICMP6_DST_UNREACH_NOROUTE:
  1874. case ICMP6_DST_UNREACH_BEYONDSCOPE:
  1875. case ICMP6_DST_UNREACH_ADDR:
  1876. code = ICMP_UNREACH_HOST;
  1877. break;
  1878. case ICMP6_DST_UNREACH_ADMIN:
  1879. code = ICMP_UNREACH_HOST_PROHIB;
  1880. break;
  1881. case ICMP6_DST_UNREACH_NOPORT:
  1882. code = ICMP_UNREACH_PORT;
  1883. break;
  1884. default:
  1885. return (-1);
  1886. }
  1887. break;
  1888. case ICMP6_PACKET_TOO_BIG:
  1889. type = ICMP_UNREACH;
  1890. code = ICMP_UNREACH_NEEDFRAG;
  1891. mtu -= 20;
  1892. break;
  1893. case ICMP6_TIME_EXCEEDED:
  1894. type = ICMP_TIMXCEED;
  1895. break;
  1896. case ICMP6_PARAM_PROB:
  1897. switch (code) {
  1898. case ICMP6_PARAMPROB_HEADER:
  1899. type = ICMP_PARAMPROB;
  1900. code = ICMP_PARAMPROB_ERRATPTR;
  1901. ptr = ntohl(icmp6->icmp6_pptr);
  1902. if (ptr == PTR_IP6(ip6_vfc))
  1903. ; /* preserve */
  1904. else if (ptr == PTR_IP6(ip6_vfc) + 1)
  1905. ptr = PTR_IP(ip_tos);
  1906. else if (ptr == PTR_IP6(ip6_plen) ||
  1907. ptr == PTR_IP6(ip6_plen) + 1)
  1908. ptr = PTR_IP(ip_len);
  1909. else if (ptr == PTR_IP6(ip6_nxt))
  1910. ptr = PTR_IP(ip_p);
  1911. else if (ptr == PTR_IP6(ip6_hlim))
  1912. ptr = PTR_IP(ip_ttl);
  1913. else if (ptr >= PTR_IP6(ip6_src) &&
  1914. ptr < PTR_IP6(ip6_dst))
  1915. ptr = PTR_IP(ip_src);
  1916. else if (ptr >= PTR_IP6(ip6_dst) &&
  1917. ptr < sizeof(struct ip6_hdr))
  1918. ptr = PTR_IP(ip_dst);
  1919. else {
  1920. return (-1);
  1921. }
  1922. break;
  1923. case ICMP6_PARAMPROB_NEXTHEADER:
  1924. type = ICMP_UNREACH;
  1925. code = ICMP_UNREACH_PROTOCOL;
  1926. break;
  1927. default:
  1928. return (-1);
  1929. }
  1930. break;
  1931. default:
  1932. return (-1);
  1933. }
  1934. icmp6->icmp6_type = type;
  1935. icmp6->icmp6_code = code;
  1936. /* aligns well with a icmpv4 nextmtu */
  1937. icmp6->icmp6_mtu = htonl(mtu);
  1938. /* icmpv4 pptr is a one most significant byte */
  1939. if (ptr >= 0)
  1940. icmp6->icmp6_pptr = htonl(ptr << 24);
  1941. break;
  1942. case AF_INET6:
  1943. icmp4 = arg;
  1944. type = icmp4->icmp_type;
  1945. code = icmp4->icmp_code;
  1946. mtu = ntohs(icmp4->icmp_nextmtu);
  1947. switch (type) {
  1948. case ICMP_ECHO:
  1949. type = ICMP6_ECHO_REQUEST;
  1950. break;
  1951. case ICMP_ECHOREPLY:
  1952. type = ICMP6_ECHO_REPLY;
  1953. break;
  1954. case ICMP_UNREACH:
  1955. type = ICMP6_DST_UNREACH;
  1956. switch (code) {
  1957. case ICMP_UNREACH_NET:
  1958. case ICMP_UNREACH_HOST:
  1959. case ICMP_UNREACH_NET_UNKNOWN:
  1960. case ICMP_UNREACH_HOST_UNKNOWN:
  1961. case ICMP_UNREACH_ISOLATED:
  1962. case ICMP_UNREACH_TOSNET:
  1963. case ICMP_UNREACH_TOSHOST:
  1964. code = ICMP6_DST_UNREACH_NOROUTE;
  1965. break;
  1966. case ICMP_UNREACH_PORT:
  1967. code = ICMP6_DST_UNREACH_NOPORT;
  1968. break;
  1969. case ICMP_UNREACH_NET_PROHIB:
  1970. case ICMP_UNREACH_HOST_PROHIB:
  1971. case ICMP_UNREACH_FILTER_PROHIB:
  1972. case ICMP_UNREACH_PRECEDENCE_CUTOFF:
  1973. code = ICMP6_DST_UNREACH_ADMIN;
  1974. break;
  1975. case ICMP_UNREACH_PROTOCOL:
  1976. type = ICMP6_PARAM_PROB;
  1977. code = ICMP6_PARAMPROB_NEXTHEADER;
  1978. ptr = offsetof(struct ip6_hdr, ip6_nxt);
  1979. break;
  1980. case ICMP_UNREACH_NEEDFRAG:
  1981. type = ICMP6_PACKET_TOO_BIG;
  1982. code = 0;
  1983. mtu += 20;
  1984. break;
  1985. default:
  1986. return (-1);
  1987. }
  1988. break;
  1989. case ICMP_TIMXCEED:
  1990. type = ICMP6_TIME_EXCEEDED;
  1991. break;
  1992. case ICMP_PARAMPROB:
  1993. type = ICMP6_PARAM_PROB;
  1994. switch (code) {
  1995. case ICMP_PARAMPROB_ERRATPTR:
  1996. code = ICMP6_PARAMPROB_HEADER;
  1997. break;
  1998. case ICMP_PARAMPROB_LENGTH:
  1999. code = ICMP6_PARAMPROB_HEADER;
  2000. break;
  2001. default:
  2002. return (-1);
  2003. }
  2004. ptr = icmp4->icmp_pptr;
  2005. if (ptr == 0 || ptr == PTR_IP(ip_tos))
  2006. ; /* preserve */
  2007. else if (ptr == PTR_IP(ip_len) ||
  2008. ptr == PTR_IP(ip_len) + 1)
  2009. ptr = PTR_IP6(ip6_plen);
  2010. else if (ptr == PTR_IP(ip_ttl))
  2011. ptr = PTR_IP6(ip6_hlim);
  2012. else if (ptr == PTR_IP(ip_p))
  2013. ptr = PTR_IP6(ip6_nxt);
  2014. else if (ptr >= PTR_IP(ip_src) &&
  2015. ptr < PTR_IP(ip_dst))
  2016. ptr = PTR_IP6(ip6_src);
  2017. else if (ptr >= PTR_IP(ip_dst) &&
  2018. ptr < sizeof(struct ip))
  2019. ptr = PTR_IP6(ip6_dst);
  2020. else {
  2021. return (-1);
  2022. }
  2023. break;
  2024. default:
  2025. return (-1);
  2026. }
  2027. icmp4->icmp_type = type;
  2028. icmp4->icmp_code = code;
  2029. icmp4->icmp_nextmtu = htons(mtu);
  2030. if (ptr >= 0)
  2031. icmp4->icmp_void = htonl(ptr);
  2032. break;
  2033. }
  2034. return (0);
  2035. }
  2036. #endif /* INET6 */
  2037. /*
  2038. * Need to modulate the sequence numbers in the TCP SACK option
  2039. * (credits to Krzysztof Pfaff for report and patch)
  2040. */
  2041. int
  2042. pf_modulate_sack(struct pf_pdesc *pd, struct pf_state_peer *dst)
  2043. {
  2044. struct tcphdr *th = pd->hdr.tcp;
  2045. int hlen = (th->th_off << 2) - sizeof(*th);
  2046. int thoptlen = hlen;
  2047. u_int8_t opts[MAX_TCPOPTLEN], *opt = opts;
  2048. int copyback = 0, i, olen;
  2049. struct sackblk sack;
  2050. #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
  2051. if (hlen < TCPOLEN_SACKLEN || hlen > MAX_TCPOPTLEN || !pf_pull_hdr(
  2052. pd->m, pd->off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
  2053. return 0;
  2054. while (hlen >= TCPOLEN_SACKLEN) {
  2055. olen = opt[1];
  2056. switch (*opt) {
  2057. case TCPOPT_EOL: /* FALLTHROUGH */
  2058. case TCPOPT_NOP:
  2059. opt++;
  2060. hlen--;
  2061. break;
  2062. case TCPOPT_SACK:
  2063. if (olen > hlen)
  2064. olen = hlen;
  2065. if (olen >= TCPOLEN_SACKLEN) {
  2066. for (i = 2; i + TCPOLEN_SACK <= olen;
  2067. i += TCPOLEN_SACK) {
  2068. memcpy(&sack, &opt[i], sizeof(sack));
  2069. pf_change_a(pd, &sack.start,
  2070. htonl(ntohl(sack.start) -
  2071. dst->seqdiff));
  2072. pf_change_a(pd, &sack.end,
  2073. htonl(ntohl(sack.end) -
  2074. dst->seqdiff));
  2075. memcpy(&opt[i], &sack, sizeof(sack));
  2076. }
  2077. copyback = 1;
  2078. }
  2079. /* FALLTHROUGH */
  2080. default:
  2081. if (olen < 2)
  2082. olen = 2;
  2083. hlen -= olen;
  2084. opt += olen;
  2085. }
  2086. }
  2087. if (copyback)
  2088. m_copyback(pd->m, pd->off + sizeof(*th), thoptlen, opts,
  2089. M_NOWAIT);
  2090. return (copyback);
  2091. }
  2092. void
  2093. pf_send_tcp(const struct pf_rule *r, sa_family_t af,
  2094. const struct pf_addr *saddr, const struct pf_addr *daddr,
  2095. u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
  2096. u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
  2097. u_int16_t rtag, u_int rdom)
  2098. {
  2099. struct mbuf *m;
  2100. int len, tlen;
  2101. struct ip *h;
  2102. #ifdef INET6
  2103. struct ip6_hdr *h6;
  2104. #endif /* INET6 */
  2105. struct tcphdr *th;
  2106. char *opt;
  2107. /* maximum segment size tcp option */
  2108. tlen = sizeof(struct tcphdr);
  2109. if (mss)
  2110. tlen += 4;
  2111. switch (af) {
  2112. case AF_INET:
  2113. len = sizeof(struct ip) + tlen;
  2114. break;
  2115. #ifdef INET6
  2116. case AF_INET6:
  2117. len = sizeof(struct ip6_hdr) + tlen;
  2118. break;
  2119. #endif /* INET6 */
  2120. default:
  2121. unhandled_af(af);
  2122. }
  2123. /* create outgoing mbuf */
  2124. m = m_gethdr(M_DONTWAIT, MT_HEADER);
  2125. if (m == NULL)
  2126. return;
  2127. if (tag)
  2128. m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
  2129. m->m_pkthdr.pf.tag = rtag;
  2130. m->m_pkthdr.ph_rtableid = rdom;
  2131. if (r && (r->scrub_flags & PFSTATE_SETPRIO))
  2132. m->m_pkthdr.pf.prio = r->set_prio[0];
  2133. if (r && r->qid)
  2134. m->m_pkthdr.pf.qid = r->qid;
  2135. m->m_data += max_linkhdr;
  2136. m->m_pkthdr.len = m->m_len = len;
  2137. m->m_pkthdr.ph_ifidx = 0;
  2138. m->m_pkthdr.csum_flags |= M_TCP_CSUM_OUT;
  2139. bzero(m->m_data, len);
  2140. switch (af) {
  2141. case AF_INET:
  2142. h = mtod(m, struct ip *);
  2143. h->ip_p = IPPROTO_TCP;
  2144. h->ip_len = htons(tlen);
  2145. h->ip_v = 4;
  2146. h->ip_hl = sizeof(*h) >> 2;
  2147. h->ip_tos = IPTOS_LOWDELAY;
  2148. h->ip_len = htons(len);
  2149. h->ip_off = htons(ip_mtudisc ? IP_DF : 0);
  2150. h->ip_ttl = ttl ? ttl : ip_defttl;
  2151. h->ip_sum = 0;
  2152. h->ip_src.s_addr = saddr->v4.s_addr;
  2153. h->ip_dst.s_addr = daddr->v4.s_addr;
  2154. th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
  2155. break;
  2156. #ifdef INET6
  2157. case AF_INET6:
  2158. h6 = mtod(m, struct ip6_hdr *);
  2159. h6->ip6_nxt = IPPROTO_TCP;
  2160. h6->ip6_plen = htons(tlen);
  2161. h6->ip6_vfc |= IPV6_VERSION;
  2162. h6->ip6_hlim = IPV6_DEFHLIM;
  2163. memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
  2164. memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
  2165. th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
  2166. break;
  2167. #endif /* INET6 */
  2168. default:
  2169. unhandled_af(af);
  2170. }
  2171. /* TCP header */
  2172. th->th_sport = sport;
  2173. th->th_dport = dport;
  2174. th->th_seq = htonl(seq);
  2175. th->th_ack = htonl(ack);
  2176. th->th_off = tlen >> 2;
  2177. th->th_flags = flags;
  2178. th->th_win = htons(win);
  2179. if (mss) {
  2180. opt = (char *)(th + 1);
  2181. opt[0] = TCPOPT_MAXSEG;
  2182. opt[1] = 4;
  2183. mss = htons(mss);
  2184. memcpy((opt + 2), &mss, 2);
  2185. }
  2186. switch (af) {
  2187. case AF_INET:
  2188. ip_output(m, NULL, NULL, 0, NULL, NULL, 0);
  2189. break;
  2190. #ifdef INET6
  2191. case AF_INET6:
  2192. ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
  2193. break;
  2194. #endif /* INET6 */
  2195. }
  2196. }
  2197. void
  2198. pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
  2199. struct pf_rule *r, u_int rdomain)
  2200. {
  2201. struct mbuf *m0;
  2202. if ((m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT)) == NULL)
  2203. return;
  2204. m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
  2205. m0->m_pkthdr.ph_rtableid = rdomain;
  2206. if (r && (r->scrub_flags & PFSTATE_SETPRIO))
  2207. m0->m_pkthdr.pf.prio = r->set_prio[0];
  2208. if (r && r->qid)
  2209. m0->m_pkthdr.pf.qid = r->qid;
  2210. switch (af) {
  2211. case AF_INET:
  2212. icmp_error(m0, type, code, 0, 0);
  2213. break;
  2214. #ifdef INET6
  2215. case AF_INET6:
  2216. icmp6_error(m0, type, code, 0);
  2217. break;
  2218. #endif /* INET6 */
  2219. }
  2220. }
  2221. /*
  2222. * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
  2223. * If n is 0, they match if they are equal. If n is != 0, they match if they
  2224. * are different.
  2225. */
  2226. int
  2227. pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
  2228. struct pf_addr *b, sa_family_t af)
  2229. {
  2230. int match = 0;
  2231. switch (af) {
  2232. case AF_INET:
  2233. if ((a->addr32[0] & m->addr32[0]) ==
  2234. (b->addr32[0] & m->addr32[0]))
  2235. match++;
  2236. break;
  2237. #ifdef INET6
  2238. case AF_INET6:
  2239. if (((a->addr32[0] & m->addr32[0]) ==
  2240. (b->addr32[0] & m->addr32[0])) &&
  2241. ((a->addr32[1] & m->addr32[1]) ==
  2242. (b->addr32[1] & m->addr32[1])) &&
  2243. ((a->addr32[2] & m->addr32[2]) ==
  2244. (b->addr32[2] & m->addr32[2])) &&
  2245. ((a->addr32[3] & m->addr32[3]) ==
  2246. (b->addr32[3] & m->addr32[3])))
  2247. match++;
  2248. break;
  2249. #endif /* INET6 */
  2250. }
  2251. if (match) {
  2252. if (n)
  2253. return (0);
  2254. else
  2255. return (1);
  2256. } else {
  2257. if (n)
  2258. return (1);
  2259. else
  2260. return (0);
  2261. }
  2262. }
  2263. /*
  2264. * Return 1 if b <= a <= e, otherwise return 0.
  2265. */
  2266. int
  2267. pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
  2268. struct pf_addr *a, sa_family_t af)
  2269. {
  2270. switch (af) {
  2271. case AF_INET:
  2272. if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
  2273. (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
  2274. return (0);
  2275. break;
  2276. #ifdef INET6
  2277. case AF_INET6: {
  2278. int i;
  2279. /* check a >= b */
  2280. for (i = 0; i < 4; ++i)
  2281. if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
  2282. break;
  2283. else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
  2284. return (0);
  2285. /* check a <= e */
  2286. for (i = 0; i < 4; ++i)
  2287. if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
  2288. break;
  2289. else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
  2290. return (0);
  2291. break;
  2292. }
  2293. #endif /* INET6 */
  2294. }
  2295. return (1);
  2296. }
  2297. int
  2298. pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
  2299. {
  2300. switch (op) {
  2301. case PF_OP_IRG:
  2302. return ((p > a1) && (p < a2));
  2303. case PF_OP_XRG:
  2304. return ((p < a1) || (p > a2));
  2305. case PF_OP_RRG:
  2306. return ((p >= a1) && (p <= a2));
  2307. case PF_OP_EQ:
  2308. return (p == a1);
  2309. case PF_OP_NE:
  2310. return (p != a1);
  2311. case PF_OP_LT:
  2312. return (p < a1);
  2313. case PF_OP_LE:
  2314. return (p <= a1);
  2315. case PF_OP_GT:
  2316. return (p > a1);
  2317. case PF_OP_GE:
  2318. return (p >= a1);
  2319. }
  2320. return (0); /* never reached */
  2321. }
  2322. int
  2323. pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
  2324. {
  2325. return (pf_match(op, ntohs(a1), ntohs(a2), ntohs(p)));
  2326. }
  2327. int
  2328. pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
  2329. {
  2330. if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
  2331. return (0);
  2332. return (pf_match(op, a1, a2, u));
  2333. }
  2334. int
  2335. pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
  2336. {
  2337. if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
  2338. return (0);
  2339. return (pf_match(op, a1, a2, g));
  2340. }
  2341. int
  2342. pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag)
  2343. {
  2344. if (*tag == -1)
  2345. *tag = m->m_pkthdr.pf.tag;
  2346. return ((!r->match_tag_not && r->match_tag == *tag) ||
  2347. (r->match_tag_not && r->match_tag != *tag));
  2348. }
  2349. int
  2350. pf_match_rcvif(struct mbuf *m, struct pf_rule *r)
  2351. {
  2352. struct ifnet *ifp;
  2353. struct pfi_kif *kif;
  2354. ifp = if_get(m->m_pkthdr.ph_ifidx);
  2355. if (ifp == NULL)
  2356. return (0);
  2357. if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
  2358. kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
  2359. else
  2360. kif = (struct pfi_kif *)ifp->if_pf_kif;
  2361. if (kif == NULL) {
  2362. DPFPRINTF(LOG_ERR,
  2363. "pf_test_via: kif == NULL, @%d via %s",
  2364. r->nr, r->rcv_ifname);
  2365. return (0);
  2366. }
  2367. return (pfi_kif_match(r->rcv_kif, kif));
  2368. }
  2369. void
  2370. pf_tag_packet(struct mbuf *m, int tag, int rtableid)
  2371. {
  2372. if (tag > 0)
  2373. m->m_pkthdr.pf.tag = tag;
  2374. if (rtableid >= 0)
  2375. m->m_pkthdr.ph_rtableid = (u_int)rtableid;
  2376. }
  2377. void
  2378. pf_step_into_anchor(int *depth, struct pf_ruleset **rs,
  2379. struct pf_rule **r, struct pf_rule **a)
  2380. {
  2381. struct pf_anchor_stackframe *f;
  2382. if (*depth >= sizeof(pf_anchor_stack) /
  2383. sizeof(pf_anchor_stack[0])) {
  2384. log(LOG_ERR, "pf_step_into_anchor: stack overflow\n");
  2385. *r = TAILQ_NEXT(*r, entries);
  2386. return;
  2387. } else if (a != NULL)
  2388. *a = *r;
  2389. f = pf_anchor_stack + (*depth)++;
  2390. f->rs = *rs;
  2391. f->r = *r;
  2392. if ((*r)->anchor_wildcard) {
  2393. f->parent = &(*r)->anchor->children;
  2394. if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == NULL) {
  2395. *r = NULL;
  2396. return;
  2397. }
  2398. *rs = &f->child->ruleset;
  2399. } else {
  2400. f->parent = NULL;
  2401. f->child = NULL;
  2402. *rs = &(*r)->anchor->ruleset;
  2403. }
  2404. *r = TAILQ_FIRST((*rs)->rules.active.ptr);
  2405. }
  2406. int
  2407. pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs,
  2408. struct pf_rule **r, struct pf_rule **a, int *match)
  2409. {
  2410. struct pf_anchor_stackframe *f;
  2411. int quick = 0;
  2412. do {
  2413. if (*depth <= 0)
  2414. break;
  2415. f = pf_anchor_stack + *depth - 1;
  2416. if (f->parent != NULL && f->child != NULL) {
  2417. f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
  2418. if (f->child != NULL) {
  2419. *rs = &f->child->ruleset;
  2420. *r = TAILQ_FIRST((*rs)->rules.active.ptr);
  2421. if (*r == NULL)
  2422. continue;
  2423. else
  2424. break;
  2425. }
  2426. }
  2427. (*depth)--;
  2428. if (*depth == 0 && a != NULL)
  2429. *a = NULL;
  2430. else if (a != NULL)
  2431. *a = f->r;
  2432. *rs = f->rs;
  2433. if (*match > *depth) {
  2434. *match = *depth;
  2435. if (f->r->quick)
  2436. quick = 1;
  2437. }
  2438. *r = TAILQ_NEXT(f->r, entries);
  2439. } while (*r == NULL);
  2440. return (quick);
  2441. }
  2442. void
  2443. pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
  2444. struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
  2445. {
  2446. switch (af) {
  2447. case AF_INET:
  2448. naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
  2449. ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
  2450. break;
  2451. #ifdef INET6
  2452. case AF_INET6:
  2453. naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
  2454. ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
  2455. naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
  2456. ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
  2457. naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
  2458. ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
  2459. naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
  2460. ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
  2461. break;
  2462. #endif /* INET6 */
  2463. default:
  2464. unhandled_af(af);
  2465. }
  2466. }
  2467. void
  2468. pf_addr_inc(struct pf_addr *addr, sa_family_t af)
  2469. {
  2470. switch (af) {
  2471. case AF_INET:
  2472. addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
  2473. break;
  2474. #ifdef INET6
  2475. case AF_INET6:
  2476. if (addr->addr32[3] == 0xffffffff) {
  2477. addr->addr32[3] = 0;
  2478. if (addr->addr32[2] == 0xffffffff) {
  2479. addr->addr32[2] = 0;
  2480. if (addr->addr32[1] == 0xffffffff) {
  2481. addr->addr32[1] = 0;
  2482. addr->addr32[0] =
  2483. htonl(ntohl(addr->addr32[0]) + 1);
  2484. } else
  2485. addr->addr32[1] =
  2486. htonl(ntohl(addr->addr32[1]) + 1);
  2487. } else
  2488. addr->addr32[2] =
  2489. htonl(ntohl(addr->addr32[2]) + 1);
  2490. } else
  2491. addr->addr32[3] =
  2492. htonl(ntohl(addr->addr32[3]) + 1);
  2493. break;
  2494. #endif /* INET6 */
  2495. default:
  2496. unhandled_af(af);
  2497. }
  2498. }
  2499. int
  2500. pf_socket_lookup(struct pf_pdesc *pd)
  2501. {
  2502. struct pf_addr *saddr, *daddr;
  2503. u_int16_t sport, dport;
  2504. struct inpcbtable *tb;
  2505. struct inpcb *inp;
  2506. if (pd == NULL)
  2507. return (-1);
  2508. pd->lookup.uid = UID_MAX;
  2509. pd->lookup.gid = GID_MAX;
  2510. pd->lookup.pid = NO_PID;
  2511. switch (pd->proto) {
  2512. case IPPROTO_TCP:
  2513. if (pd->hdr.tcp == NULL)
  2514. return (-1);
  2515. sport = pd->hdr.tcp->th_sport;
  2516. dport = pd->hdr.tcp->th_dport;
  2517. tb = &tcbtable;
  2518. break;
  2519. case IPPROTO_UDP:
  2520. if (pd->hdr.udp == NULL)
  2521. return (-1);
  2522. sport = pd->hdr.udp->uh_sport;
  2523. dport = pd->hdr.udp->uh_dport;
  2524. tb = &udbtable;
  2525. break;
  2526. default:
  2527. return (-1);
  2528. }
  2529. if (pd->dir == PF_IN) {
  2530. saddr = pd->src;
  2531. daddr = pd->dst;
  2532. } else {
  2533. u_int16_t p;
  2534. p = sport;
  2535. sport = dport;
  2536. dport = p;
  2537. saddr = pd->dst;
  2538. daddr = pd->src;
  2539. }
  2540. switch (pd->af) {
  2541. case AF_INET:
  2542. /*
  2543. * Fails when rtable is changed while evaluating the ruleset
  2544. * The socket looked up will not match the one hit in the end.
  2545. */
  2546. inp = in_pcbhashlookup(tb, saddr->v4, sport, daddr->v4, dport,
  2547. pd->rdomain);
  2548. if (inp == NULL) {
  2549. inp = in_pcblookup_listen(tb, daddr->v4, dport, 0,
  2550. NULL, pd->rdomain);
  2551. if (inp == NULL)
  2552. return (-1);
  2553. }
  2554. break;
  2555. #ifdef INET6
  2556. case AF_INET6:
  2557. inp = in6_pcbhashlookup(tb, &saddr->v6, sport, &daddr->v6,
  2558. dport, pd->rdomain);
  2559. if (inp == NULL) {
  2560. inp = in6_pcblookup_listen(tb, &daddr->v6, dport, 0,
  2561. NULL, pd->rdomain);
  2562. if (inp == NULL)
  2563. return (-1);
  2564. }
  2565. break;
  2566. #endif /* INET6 */
  2567. default:
  2568. unhandled_af(pd->af);
  2569. }
  2570. pd->lookup.uid = inp->inp_socket->so_euid;
  2571. pd->lookup.gid = inp->inp_socket->so_egid;
  2572. pd->lookup.pid = inp->inp_socket->so_cpid;
  2573. return (1);
  2574. }
  2575. u_int8_t
  2576. pf_get_wscale(struct pf_pdesc *pd)
  2577. {
  2578. struct tcphdr *th = pd->hdr.tcp;
  2579. int hlen;
  2580. u_int8_t hdr[60];
  2581. u_int8_t *opt, optlen;
  2582. u_int8_t wscale = 0;
  2583. hlen = th->th_off << 2; /* hlen <= sizeof(hdr) */
  2584. if (hlen <= sizeof(struct tcphdr))
  2585. return (0);
  2586. if (!pf_pull_hdr(pd->m, pd->off, hdr, hlen, NULL, NULL, pd->af))
  2587. return (0);
  2588. opt = hdr + sizeof(struct tcphdr);
  2589. hlen -= sizeof(struct tcphdr);
  2590. while (hlen >= 3) {
  2591. switch (*opt) {
  2592. case TCPOPT_EOL:
  2593. case TCPOPT_NOP:
  2594. ++opt;
  2595. --hlen;
  2596. break;
  2597. case TCPOPT_WINDOW:
  2598. wscale = opt[2];
  2599. if (wscale > TCP_MAX_WINSHIFT)
  2600. wscale = TCP_MAX_WINSHIFT;
  2601. wscale |= PF_WSCALE_FLAG;
  2602. /* FALLTHROUGH */
  2603. default:
  2604. optlen = opt[1];
  2605. if (optlen < 2)
  2606. optlen = 2;
  2607. hlen -= optlen;
  2608. opt += optlen;
  2609. break;
  2610. }
  2611. }
  2612. return (wscale);
  2613. }
  2614. u_int16_t
  2615. pf_get_mss(struct pf_pdesc *pd)
  2616. {
  2617. struct tcphdr *th = pd->hdr.tcp;
  2618. int hlen;
  2619. u_int8_t hdr[60];
  2620. u_int8_t *opt, optlen;
  2621. u_int16_t mss = tcp_mssdflt;
  2622. hlen = th->th_off << 2; /* hlen <= sizeof(hdr) */
  2623. if (hlen <= sizeof(struct tcphdr))
  2624. return (0);
  2625. if (!pf_pull_hdr(pd->m, pd->off, hdr, hlen, NULL, NULL, pd->af))
  2626. return (0);
  2627. opt = hdr + sizeof(struct tcphdr);
  2628. hlen -= sizeof(struct tcphdr);
  2629. while (hlen >= TCPOLEN_MAXSEG) {
  2630. switch (*opt) {
  2631. case TCPOPT_EOL:
  2632. case TCPOPT_NOP:
  2633. ++opt;
  2634. --hlen;
  2635. break;
  2636. case TCPOPT_MAXSEG:
  2637. memcpy(&mss, (opt + 2), 2);
  2638. mss = ntohs(mss);
  2639. /* FALLTHROUGH */
  2640. default:
  2641. optlen = opt[1];
  2642. if (optlen < 2)
  2643. optlen = 2;
  2644. hlen -= optlen;
  2645. opt += optlen;
  2646. break;
  2647. }
  2648. }
  2649. return (mss);
  2650. }
  2651. u_int16_t
  2652. pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
  2653. {
  2654. struct sockaddr_in *dst;
  2655. #ifdef INET6
  2656. struct sockaddr_in6 *dst6;
  2657. #endif /* INET6 */
  2658. struct rtentry *rt = NULL;
  2659. struct sockaddr_storage ss;
  2660. int hlen;
  2661. u_int16_t mss = tcp_mssdflt;
  2662. memset(&ss, 0, sizeof(ss));
  2663. switch (af) {
  2664. case AF_INET:
  2665. hlen = sizeof(struct ip);
  2666. dst = (struct sockaddr_in *)&ss;
  2667. dst->sin_family = AF_INET;
  2668. dst->sin_len = sizeof(*dst);
  2669. dst->sin_addr = addr->v4;
  2670. rt = rtalloc(sintosa(dst), RT_REPORT, rtableid);
  2671. break;
  2672. #ifdef INET6
  2673. case AF_INET6:
  2674. hlen = sizeof(struct ip6_hdr);
  2675. dst6 = (struct sockaddr_in6 *)&ss;
  2676. dst6->sin6_family = AF_INET6;
  2677. dst6->sin6_len = sizeof(*dst6);
  2678. dst6->sin6_addr = addr->v6;
  2679. rt = rtalloc(sin6tosa(dst6), RT_REPORT, rtableid);
  2680. break;
  2681. #endif /* INET6 */
  2682. }
  2683. if (rt && rt->rt_ifp) {
  2684. mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
  2685. mss = max(tcp_mssdflt, mss);
  2686. rtfree(rt);
  2687. }
  2688. mss = min(mss, offer);
  2689. mss = max(mss, 64); /* sanity - at least max opt space */
  2690. return (mss);
  2691. }
  2692. static __inline int
  2693. pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr, sa_family_t af)
  2694. {
  2695. struct pf_rule *r = s->rule.ptr;
  2696. struct pf_src_node *sns[PF_SN_MAX];
  2697. int rv;
  2698. s->rt_kif = NULL;
  2699. if (!r->rt)
  2700. return (0);
  2701. bzero(sns, sizeof(sns));
  2702. switch (af) {
  2703. case AF_INET:
  2704. rv = pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, sns,
  2705. &r->route, PF_SN_ROUTE);
  2706. break;
  2707. #ifdef INET6
  2708. case AF_INET6:
  2709. rv = pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, sns,
  2710. &r->route, PF_SN_ROUTE);
  2711. break;
  2712. #endif /* INET6 */
  2713. default:
  2714. rv = 1;
  2715. }
  2716. if (rv == 0) {
  2717. s->rt_kif = r->route.kif;
  2718. s->natrule.ptr = r;
  2719. }
  2720. return (rv);
  2721. }
  2722. u_int32_t
  2723. pf_tcp_iss(struct pf_pdesc *pd)
  2724. {
  2725. SHA2_CTX ctx;
  2726. union {
  2727. uint8_t bytes[SHA512_DIGEST_LENGTH];
  2728. uint32_t words[1];
  2729. } digest;
  2730. if (pf_tcp_secret_init == 0) {
  2731. arc4random_buf(pf_tcp_secret, sizeof(pf_tcp_secret));
  2732. SHA512Init(&pf_tcp_secret_ctx);
  2733. SHA512Update(&pf_tcp_secret_ctx, pf_tcp_secret,
  2734. sizeof(pf_tcp_secret));
  2735. pf_tcp_secret_init = 1;
  2736. }
  2737. ctx = pf_tcp_secret_ctx;
  2738. SHA512Update(&ctx, &pd->rdomain, sizeof(pd->rdomain));
  2739. SHA512Update(&ctx, &pd->hdr.tcp->th_sport, sizeof(u_short));
  2740. SHA512Update(&ctx, &pd->hdr.tcp->th_dport, sizeof(u_short));
  2741. switch (pd->af) {
  2742. case AF_INET:
  2743. SHA512Update(&ctx, &pd->src->v4, sizeof(struct in_addr));
  2744. SHA512Update(&ctx, &pd->dst->v4, sizeof(struct in_addr));
  2745. break;
  2746. #ifdef INET6
  2747. case AF_INET6:
  2748. SHA512Update(&ctx, &pd->src->v6, sizeof(struct in6_addr));
  2749. SHA512Update(&ctx, &pd->dst->v6, sizeof(struct in6_addr));
  2750. break;
  2751. #endif /* INET6 */
  2752. }
  2753. SHA512Final(digest.bytes, &ctx);
  2754. pf_tcp_iss_off += 4096;
  2755. return (digest.words[0] + tcp_iss + pf_tcp_iss_off);
  2756. }
  2757. void
  2758. pf_rule_to_actions(struct pf_rule *r, struct pf_rule_actions *a)
  2759. {
  2760. if (r->qid)
  2761. a->qid = r->qid;
  2762. if (r->pqid)
  2763. a->pqid = r->pqid;
  2764. if (r->rtableid >= 0)
  2765. a->rtableid = r->rtableid;
  2766. #if NPFLOG > 0
  2767. a->log |= r->log;
  2768. #endif /* NPFLOG > 0 */
  2769. if (r->scrub_flags & PFSTATE_SETTOS)
  2770. a->set_tos = r->set_tos;
  2771. if (r->min_ttl)
  2772. a->min_ttl = r->min_ttl;
  2773. if (r->max_mss)
  2774. a->max_mss = r->max_mss;
  2775. a->flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID|
  2776. PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO));
  2777. if (r->scrub_flags & PFSTATE_SETPRIO) {
  2778. a->set_prio[0] = r->set_prio[0];
  2779. a->set_prio[1] = r->set_prio[1];
  2780. }
  2781. }
  2782. #define PF_TEST_ATTRIB(t, a) \
  2783. do { \
  2784. if (t) { \
  2785. r = a; \
  2786. goto nextrule; \
  2787. } \
  2788. } while (0)
  2789. int
  2790. pf_test_rule(struct pf_pdesc *pd, struct pf_rule **rm, struct pf_state **sm,
  2791. struct pf_rule **am, struct pf_ruleset **rsm)
  2792. {
  2793. struct pf_rule *r;
  2794. struct pf_rule *nr = NULL;
  2795. struct pf_rule *a = NULL;
  2796. struct pf_ruleset *arsm = NULL;
  2797. struct pf_ruleset *aruleset = NULL;
  2798. struct pf_ruleset *ruleset = NULL;
  2799. struct pf_rule_slist rules;
  2800. struct pf_rule_item *ri;
  2801. struct pf_src_node *sns[PF_SN_MAX];
  2802. struct tcphdr *th = pd->hdr.tcp;
  2803. struct pf_state_key *skw = NULL, *sks = NULL;
  2804. struct pf_rule_actions act;
  2805. u_short reason;
  2806. int rewrite = 0;
  2807. int tag = -1;
  2808. int asd = 0;
  2809. int match = 0;
  2810. int state_icmp = 0, icmp_dir = 0;
  2811. u_int16_t virtual_type, virtual_id;
  2812. u_int8_t icmptype = 0, icmpcode = 0;
  2813. int action = PF_DROP;
  2814. bzero(&act, sizeof(act));
  2815. bzero(sns, sizeof(sns));
  2816. act.rtableid = pd->rdomain;
  2817. SLIST_INIT(&rules);
  2818. if (pd->dir == PF_IN && if_congested()) {
  2819. REASON_SET(&reason, PFRES_CONGEST);
  2820. return (PF_DROP);
  2821. }
  2822. switch (pd->virtual_proto) {
  2823. case IPPROTO_ICMP:
  2824. icmptype = pd->hdr.icmp->icmp_type;
  2825. icmpcode = pd->hdr.icmp->icmp_code;
  2826. state_icmp = pf_icmp_mapping(pd, icmptype,
  2827. &icmp_dir, &virtual_id, &virtual_type);
  2828. if (icmp_dir == PF_IN) {
  2829. pd->osport = pd->nsport = virtual_id;
  2830. pd->odport = pd->ndport = virtual_type;
  2831. } else {
  2832. pd->osport = pd->nsport = virtual_type;
  2833. pd->odport = pd->ndport = virtual_id;
  2834. }
  2835. break;
  2836. #ifdef INET6
  2837. case IPPROTO_ICMPV6:
  2838. icmptype = pd->hdr.icmp6->icmp6_type;
  2839. icmpcode = pd->hdr.icmp6->icmp6_code;
  2840. state_icmp = pf_icmp_mapping(pd, icmptype,
  2841. &icmp_dir, &virtual_id, &virtual_type);
  2842. if (icmp_dir == PF_IN) {
  2843. pd->osport = pd->nsport = virtual_id;
  2844. pd->odport = pd->ndport = virtual_type;
  2845. } else {
  2846. pd->osport = pd->nsport = virtual_type;
  2847. pd->odport = pd->ndport = virtual_id;
  2848. }
  2849. break;
  2850. #endif /* INET6 */
  2851. }
  2852. ruleset = &pf_main_ruleset;
  2853. r = TAILQ_FIRST(pf_main_ruleset.rules.active.ptr);
  2854. while (r != NULL) {
  2855. r->evaluations++;
  2856. PF_TEST_ATTRIB((pfi_kif_match(r->kif, pd->kif) == r->ifnot),
  2857. r->skip[PF_SKIP_IFP].ptr);
  2858. PF_TEST_ATTRIB((r->direction && r->direction != pd->dir),
  2859. r->skip[PF_SKIP_DIR].ptr);
  2860. PF_TEST_ATTRIB((r->onrdomain >= 0 &&
  2861. (r->onrdomain == pd->rdomain) == r->ifnot),
  2862. r->skip[PF_SKIP_RDOM].ptr);
  2863. PF_TEST_ATTRIB((r->af && r->af != pd->af),
  2864. r->skip[PF_SKIP_AF].ptr);
  2865. PF_TEST_ATTRIB((r->proto && r->proto != pd->proto),
  2866. r->skip[PF_SKIP_PROTO].ptr);
  2867. PF_TEST_ATTRIB((PF_MISMATCHAW(&r->src.addr, &pd->nsaddr,
  2868. pd->naf, r->src.neg, pd->kif, act.rtableid)),
  2869. r->skip[PF_SKIP_SRC_ADDR].ptr);
  2870. PF_TEST_ATTRIB((PF_MISMATCHAW(&r->dst.addr, &pd->ndaddr, pd->af,
  2871. r->dst.neg, NULL, act.rtableid)),
  2872. r->skip[PF_SKIP_DST_ADDR].ptr);
  2873. switch (pd->virtual_proto) {
  2874. case PF_VPROTO_FRAGMENT:
  2875. /* tcp/udp only. port_op always 0 in other cases */
  2876. PF_TEST_ATTRIB((r->src.port_op || r->dst.port_op),
  2877. TAILQ_NEXT(r, entries));
  2878. PF_TEST_ATTRIB((pd->proto == IPPROTO_TCP && r->flagset),
  2879. TAILQ_NEXT(r, entries));
  2880. /* icmp only. type/code always 0 in other cases */
  2881. PF_TEST_ATTRIB((r->type || r->code),
  2882. TAILQ_NEXT(r, entries));
  2883. /* tcp/udp only. {uid|gid}.op always 0 in other cases */
  2884. PF_TEST_ATTRIB((r->gid.op || r->uid.op),
  2885. TAILQ_NEXT(r, entries));
  2886. break;
  2887. case IPPROTO_TCP:
  2888. PF_TEST_ATTRIB(((r->flagset & th->th_flags) !=
  2889. r->flags),
  2890. TAILQ_NEXT(r, entries));
  2891. PF_TEST_ATTRIB((r->os_fingerprint != PF_OSFP_ANY &&
  2892. !pf_osfp_match(pf_osfp_fingerprint(pd),
  2893. r->os_fingerprint)),
  2894. TAILQ_NEXT(r, entries));
  2895. /* FALLTHROUGH */
  2896. case IPPROTO_UDP:
  2897. /* tcp/udp only. port_op always 0 in other cases */
  2898. PF_TEST_ATTRIB((r->src.port_op &&
  2899. !pf_match_port(r->src.port_op, r->src.port[0],
  2900. r->src.port[1], pd->nsport)),
  2901. r->skip[PF_SKIP_SRC_PORT].ptr);
  2902. PF_TEST_ATTRIB((r->dst.port_op &&
  2903. !pf_match_port(r->dst.port_op, r->dst.port[0],
  2904. r->dst.port[1], pd->ndport)),
  2905. r->skip[PF_SKIP_DST_PORT].ptr);
  2906. /* tcp/udp only. uid.op always 0 in other cases */
  2907. PF_TEST_ATTRIB((r->uid.op && (pd->lookup.done ||
  2908. (pd->lookup.done =
  2909. pf_socket_lookup(pd), 1)) &&
  2910. !pf_match_uid(r->uid.op, r->uid.uid[0],
  2911. r->uid.uid[1], pd->lookup.uid)),
  2912. TAILQ_NEXT(r, entries));
  2913. /* tcp/udp only. gid.op always 0 in other cases */
  2914. PF_TEST_ATTRIB((r->gid.op && (pd->lookup.done ||
  2915. (pd->lookup.done =
  2916. pf_socket_lookup(pd), 1)) &&
  2917. !pf_match_gid(r->gid.op, r->gid.gid[0],
  2918. r->gid.gid[1], pd->lookup.gid)),
  2919. TAILQ_NEXT(r, entries));
  2920. break;
  2921. case IPPROTO_ICMP:
  2922. case IPPROTO_ICMPV6:
  2923. /* icmp only. type always 0 in other cases */
  2924. PF_TEST_ATTRIB((r->type && r->type != icmptype + 1),
  2925. TAILQ_NEXT(r, entries));
  2926. /* icmp only. type always 0 in other cases */
  2927. PF_TEST_ATTRIB((r->code && r->code != icmpcode + 1),
  2928. TAILQ_NEXT(r, entries));
  2929. /* icmp only. don't create states on replies */
  2930. PF_TEST_ATTRIB((r->keep_state && !state_icmp &&
  2931. (r->rule_flag & PFRULE_STATESLOPPY) == 0 &&
  2932. icmp_dir != PF_IN),
  2933. TAILQ_NEXT(r, entries));
  2934. break;
  2935. default:
  2936. break;
  2937. }
  2938. PF_TEST_ATTRIB((r->rule_flag & PFRULE_FRAGMENT &&
  2939. pd->virtual_proto != PF_VPROTO_FRAGMENT),
  2940. TAILQ_NEXT(r, entries));
  2941. PF_TEST_ATTRIB((r->tos && !(r->tos == pd->tos)),
  2942. TAILQ_NEXT(r, entries));
  2943. PF_TEST_ATTRIB((r->prob &&
  2944. r->prob <= arc4random_uniform(UINT_MAX - 1) + 1),
  2945. TAILQ_NEXT(r, entries));
  2946. PF_TEST_ATTRIB((r->match_tag && !pf_match_tag(pd->m, r, &tag)),
  2947. TAILQ_NEXT(r, entries));
  2948. PF_TEST_ATTRIB((r->rcv_kif && pf_match_rcvif(pd->m, r) ==
  2949. r->rcvifnot),
  2950. TAILQ_NEXT(r, entries));
  2951. PF_TEST_ATTRIB((r->prio &&
  2952. (r->prio == PF_PRIO_ZERO ? 0 : r->prio) != pd->m->m_pkthdr.pf.prio),
  2953. TAILQ_NEXT(r, entries));
  2954. /* FALLTHROUGH */
  2955. if (r->tag)
  2956. tag = r->tag;
  2957. if (r->anchor == NULL) {
  2958. if (r->action == PF_MATCH) {
  2959. if ((ri = pool_get(&pf_rule_item_pl,
  2960. PR_NOWAIT)) == NULL) {
  2961. REASON_SET(&reason, PFRES_MEMORY);
  2962. goto cleanup;
  2963. }
  2964. ri->r = r;
  2965. /* order is irrelevant */
  2966. SLIST_INSERT_HEAD(&rules, ri, entry);
  2967. pf_rule_to_actions(r, &act);
  2968. if (r->rule_flag & PFRULE_AFTO)
  2969. pd->naf = r->naf;
  2970. if (pf_get_transaddr(r, pd, sns, &nr) == -1) {
  2971. REASON_SET(&reason, PFRES_TRANSLATE);
  2972. goto cleanup;
  2973. }
  2974. #if NPFLOG > 0
  2975. if (r->log) {
  2976. REASON_SET(&reason, PFRES_MATCH);
  2977. PFLOG_PACKET(pd, reason, r, a, ruleset,
  2978. NULL);
  2979. }
  2980. #endif /* NPFLOG > 0 */
  2981. } else {
  2982. match = asd;
  2983. *rm = r;
  2984. *am = a;
  2985. *rsm = ruleset;
  2986. arsm = aruleset;
  2987. }
  2988. #if NPFLOG > 0
  2989. if (act.log & PF_LOG_MATCHES)
  2990. pf_log_matches(pd, r, a, ruleset, &rules);
  2991. #endif /* NPFLOG > 0 */
  2992. if (r->quick)
  2993. break;
  2994. r = TAILQ_NEXT(r, entries);
  2995. } else {
  2996. aruleset = ruleset;
  2997. pf_step_into_anchor(&asd, &ruleset, &r, &a);
  2998. }
  2999. nextrule:
  3000. if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
  3001. &r, &a, &match))
  3002. break;
  3003. }
  3004. r = *rm; /* matching rule */
  3005. a = *am; /* rule that defines an anchor containing 'r' */
  3006. ruleset = *rsm; /* ruleset of the anchor defined by the rule 'a' */
  3007. aruleset = arsm;/* ruleset of the 'a' rule itself */
  3008. /* apply actions for last matching pass/block rule */
  3009. pf_rule_to_actions(r, &act);
  3010. if (r->rule_flag & PFRULE_AFTO)
  3011. pd->naf = r->naf;
  3012. if (pf_get_transaddr(r, pd, sns, &nr) == -1) {
  3013. REASON_SET(&reason, PFRES_TRANSLATE);
  3014. goto cleanup;
  3015. }
  3016. REASON_SET(&reason, PFRES_MATCH);
  3017. #if NPFLOG > 0
  3018. if (r->log)
  3019. PFLOG_PACKET(pd, reason, r, a, ruleset, NULL);
  3020. if (act.log & PF_LOG_MATCHES)
  3021. pf_log_matches(pd, r, a, ruleset, &rules);
  3022. #endif /* NPFLOG > 0 */
  3023. if (pd->virtual_proto != PF_VPROTO_FRAGMENT &&
  3024. (r->action == PF_DROP) &&
  3025. ((r->rule_flag & PFRULE_RETURNRST) ||
  3026. (r->rule_flag & PFRULE_RETURNICMP) ||
  3027. (r->rule_flag & PFRULE_RETURN))) {
  3028. if (pd->proto == IPPROTO_TCP &&
  3029. ((r->rule_flag & PFRULE_RETURNRST) ||
  3030. (r->rule_flag & PFRULE_RETURN)) &&
  3031. !(th->th_flags & TH_RST)) {
  3032. u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
  3033. if (pf_check_proto_cksum(pd, pd->off,
  3034. pd->tot_len - pd->off, IPPROTO_TCP, pd->af))
  3035. REASON_SET(&reason, PFRES_PROTCKSUM);
  3036. else {
  3037. if (th->th_flags & TH_SYN)
  3038. ack++;
  3039. if (th->th_flags & TH_FIN)
  3040. ack++;
  3041. pf_send_tcp(r, pd->af, pd->dst,
  3042. pd->src, th->th_dport, th->th_sport,
  3043. ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
  3044. r->return_ttl, 1, 0, pd->rdomain);
  3045. }
  3046. } else if ((pd->proto != IPPROTO_ICMP ||
  3047. ICMP_INFOTYPE(icmptype)) && pd->af == AF_INET &&
  3048. r->return_icmp)
  3049. pf_send_icmp(pd->m, r->return_icmp >> 8,
  3050. r->return_icmp & 255, pd->af, r, pd->rdomain);
  3051. else if ((pd->proto != IPPROTO_ICMPV6 ||
  3052. (icmptype >= ICMP6_ECHO_REQUEST &&
  3053. icmptype != ND_REDIRECT)) && pd->af == AF_INET6 &&
  3054. r->return_icmp6)
  3055. pf_send_icmp(pd->m, r->return_icmp6 >> 8,
  3056. r->return_icmp6 & 255, pd->af, r, pd->rdomain);
  3057. }
  3058. if (r->action == PF_DROP)
  3059. goto cleanup;
  3060. pf_tag_packet(pd->m, tag, act.rtableid);
  3061. if (act.rtableid >= 0 &&
  3062. rtable_l2(act.rtableid) != pd->rdomain)
  3063. pd->destchg = 1;
  3064. if (r->action == PF_PASS && pd->badopts && ! r->allow_opts) {
  3065. REASON_SET(&reason, PFRES_IPOPTIONS);
  3066. #if NPFLOG > 0
  3067. pd->pflog |= PF_LOG_FORCE;
  3068. #endif /* NPFLOG > 0 */
  3069. DPFPRINTF(LOG_NOTICE, "dropping packet with "
  3070. "ip/ipv6 options in pf_test_rule()");
  3071. goto cleanup;
  3072. }
  3073. if (pd->virtual_proto != PF_VPROTO_FRAGMENT
  3074. && !state_icmp && r->keep_state) {
  3075. if (r->rule_flag & PFRULE_SRCTRACK &&
  3076. pf_insert_src_node(&sns[PF_SN_NONE], r, PF_SN_NONE, pd->af,
  3077. pd->src, NULL, 0) != 0) {
  3078. REASON_SET(&reason, PFRES_SRCLIMIT);
  3079. goto cleanup;
  3080. }
  3081. if (r->max_states && (r->states_cur >= r->max_states)) {
  3082. pf_status.lcounters[LCNT_STATES]++;
  3083. REASON_SET(&reason, PFRES_MAXSTATES);
  3084. goto cleanup;
  3085. }
  3086. action = pf_create_state(pd, r, a, nr, &skw, &sks, &rewrite,
  3087. sm, tag, &rules, &act, sns);
  3088. if (action != PF_PASS)
  3089. goto cleanup;
  3090. if (sks != skw) {
  3091. struct pf_state_key *sk;
  3092. if (pd->dir == PF_IN)
  3093. sk = sks;
  3094. else
  3095. sk = skw;
  3096. rewrite += pf_translate(pd,
  3097. &sk->addr[pd->af == pd->naf ? pd->sidx : pd->didx],
  3098. sk->port[pd->af == pd->naf ? pd->sidx : pd->didx],
  3099. &sk->addr[pd->af == pd->naf ? pd->didx : pd->sidx],
  3100. sk->port[pd->af == pd->naf ? pd->didx : pd->sidx],
  3101. virtual_type, icmp_dir);
  3102. }
  3103. } else {
  3104. while ((ri = SLIST_FIRST(&rules))) {
  3105. SLIST_REMOVE_HEAD(&rules, entry);
  3106. pool_put(&pf_rule_item_pl, ri);
  3107. }
  3108. }
  3109. /* copy back packet headers if needed */
  3110. if (rewrite && pd->hdrlen) {
  3111. pf_cksum(pd, pd->m);
  3112. m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any, M_NOWAIT);
  3113. }
  3114. #if NPFSYNC > 0
  3115. if (*sm != NULL && !ISSET((*sm)->state_flags, PFSTATE_NOSYNC) &&
  3116. pd->dir == PF_OUT && pfsync_up()) {
  3117. /*
  3118. * We want the state created, but we dont
  3119. * want to send this in case a partner
  3120. * firewall has to know about it to allow
  3121. * replies through it.
  3122. */
  3123. if (pfsync_defer(*sm, pd->m))
  3124. return (PF_DEFER);
  3125. }
  3126. #endif /* NPFSYNC > 0 */
  3127. if (r->rule_flag & PFRULE_ONCE)
  3128. pf_purge_rule(ruleset, r, aruleset, a);
  3129. #ifdef INET6
  3130. if (rewrite && skw->af != sks->af)
  3131. return (PF_AFRT);
  3132. #endif /* INET6 */
  3133. return (PF_PASS);
  3134. cleanup:
  3135. while ((ri = SLIST_FIRST(&rules))) {
  3136. SLIST_REMOVE_HEAD(&rules, entry);
  3137. pool_put(&pf_rule_item_pl, ri);
  3138. }
  3139. return (action);
  3140. }
  3141. static __inline int
  3142. pf_create_state(struct pf_pdesc *pd, struct pf_rule *r, struct pf_rule *a,
  3143. struct pf_rule *nr, struct pf_state_key **skw, struct pf_state_key **sks,
  3144. int *rewrite, struct pf_state **sm, int tag, struct pf_rule_slist *rules,
  3145. struct pf_rule_actions *act, struct pf_src_node *sns[PF_SN_MAX])
  3146. {
  3147. struct pf_state *s = NULL;
  3148. struct tcphdr *th = pd->hdr.tcp;
  3149. u_int16_t mss = tcp_mssdflt;
  3150. u_short reason;
  3151. u_int i;
  3152. s = pool_get(&pf_state_pl, PR_NOWAIT | PR_ZERO);
  3153. if (s == NULL) {
  3154. REASON_SET(&reason, PFRES_MEMORY);
  3155. goto csfailed;
  3156. }
  3157. s->rule.ptr = r;
  3158. s->anchor.ptr = a;
  3159. s->natrule.ptr = nr;
  3160. if (r->allow_opts)
  3161. s->state_flags |= PFSTATE_ALLOWOPTS;
  3162. if (r->rule_flag & PFRULE_STATESLOPPY)
  3163. s->state_flags |= PFSTATE_SLOPPY;
  3164. if (r->rule_flag & PFRULE_PFLOW)
  3165. s->state_flags |= PFSTATE_PFLOW;
  3166. #if NPFLOG > 0
  3167. s->log = act->log & PF_LOG_ALL;
  3168. #endif /* NPFLOG > 0 */
  3169. s->qid = act->qid;
  3170. s->pqid = act->pqid;
  3171. s->rtableid[pd->didx] = act->rtableid;
  3172. s->rtableid[pd->sidx] = -1; /* return traffic is routed normally */
  3173. s->min_ttl = act->min_ttl;
  3174. s->set_tos = act->set_tos;
  3175. s->max_mss = act->max_mss;
  3176. s->state_flags |= act->flags;
  3177. #if NPFSYNC > 0
  3178. s->sync_state = PFSYNC_S_NONE;
  3179. #endif /* NPFSYNC > 0 */
  3180. s->set_prio[0] = act->set_prio[0];
  3181. s->set_prio[1] = act->set_prio[1];
  3182. SLIST_INIT(&s->src_nodes);
  3183. switch (pd->proto) {
  3184. case IPPROTO_TCP:
  3185. s->src.seqlo = ntohl(th->th_seq);
  3186. s->src.seqhi = s->src.seqlo + pd->p_len + 1;
  3187. if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
  3188. r->keep_state == PF_STATE_MODULATE) {
  3189. /* Generate sequence number modulator */
  3190. if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
  3191. 0)
  3192. s->src.seqdiff = 1;
  3193. pf_change_a(pd, &th->th_seq,
  3194. htonl(s->src.seqlo + s->src.seqdiff));
  3195. *rewrite = 1;
  3196. } else
  3197. s->src.seqdiff = 0;
  3198. if (th->th_flags & TH_SYN) {
  3199. s->src.seqhi++;
  3200. s->src.wscale = pf_get_wscale(pd);
  3201. }
  3202. s->src.max_win = MAX(ntohs(th->th_win), 1);
  3203. if (s->src.wscale & PF_WSCALE_MASK) {
  3204. /* Remove scale factor from initial window */
  3205. int win = s->src.max_win;
  3206. win += 1 << (s->src.wscale & PF_WSCALE_MASK);
  3207. s->src.max_win = (win - 1) >>
  3208. (s->src.wscale & PF_WSCALE_MASK);
  3209. }
  3210. if (th->th_flags & TH_FIN)
  3211. s->src.seqhi++;
  3212. s->dst.seqhi = 1;
  3213. s->dst.max_win = 1;
  3214. s->src.state = TCPS_SYN_SENT;
  3215. s->dst.state = TCPS_CLOSED;
  3216. s->timeout = PFTM_TCP_FIRST_PACKET;
  3217. break;
  3218. case IPPROTO_UDP:
  3219. s->src.state = PFUDPS_SINGLE;
  3220. s->dst.state = PFUDPS_NO_TRAFFIC;
  3221. s->timeout = PFTM_UDP_FIRST_PACKET;
  3222. break;
  3223. case IPPROTO_ICMP:
  3224. #ifdef INET6
  3225. case IPPROTO_ICMPV6:
  3226. #endif /* INET6 */
  3227. s->timeout = PFTM_ICMP_FIRST_PACKET;
  3228. break;
  3229. default:
  3230. s->src.state = PFOTHERS_SINGLE;
  3231. s->dst.state = PFOTHERS_NO_TRAFFIC;
  3232. s->timeout = PFTM_OTHER_FIRST_PACKET;
  3233. }
  3234. s->creation = time_uptime;
  3235. s->expire = time_uptime;
  3236. if (pd->proto == IPPROTO_TCP) {
  3237. if (s->state_flags & PFSTATE_SCRUB_TCP &&
  3238. pf_normalize_tcp_init(pd, &s->src)) {
  3239. REASON_SET(&reason, PFRES_MEMORY);
  3240. goto csfailed;
  3241. }
  3242. if (s->state_flags & PFSTATE_SCRUB_TCP && s->src.scrub &&
  3243. pf_normalize_tcp_stateful(pd, &reason, s, &s->src, &s->dst,
  3244. rewrite)) {
  3245. /* This really shouldn't happen!!! */
  3246. DPFPRINTF(LOG_ERR,
  3247. "pf_normalize_tcp_stateful failed on first pkt");
  3248. goto csfailed;
  3249. }
  3250. }
  3251. s->direction = pd->dir;
  3252. if (pf_state_key_setup(pd, skw, sks, act->rtableid)) {
  3253. REASON_SET(&reason, PFRES_MEMORY);
  3254. goto csfailed;
  3255. }
  3256. for (i = 0; i < PF_SN_MAX; i++)
  3257. if (sns[i] != NULL) {
  3258. struct pf_sn_item *sni;
  3259. sni = pool_get(&pf_sn_item_pl, PR_NOWAIT);
  3260. if (sni == NULL) {
  3261. REASON_SET(&reason, PFRES_MEMORY);
  3262. goto csfailed;
  3263. }
  3264. sni->sn = sns[i];
  3265. SLIST_INSERT_HEAD(&s->src_nodes, sni, next);
  3266. sni->sn->states++;
  3267. }
  3268. if (pf_set_rt_ifp(s, pd->src, (*skw)->af) != 0) {
  3269. REASON_SET(&reason, PFRES_NOROUTE);
  3270. goto csfailed;
  3271. }
  3272. if (pf_state_insert(BOUND_IFACE(r, pd->kif), skw, sks, s)) {
  3273. pf_detach_state(s);
  3274. *sks = *skw = NULL;
  3275. REASON_SET(&reason, PFRES_STATEINS);
  3276. goto csfailed;
  3277. } else
  3278. *sm = s;
  3279. /*
  3280. * Make state responsible for rules it binds here.
  3281. */
  3282. memcpy(&s->match_rules, rules, sizeof(s->match_rules));
  3283. bzero(rules, sizeof(*rules));
  3284. STATE_INC_COUNTERS(s);
  3285. if (tag > 0) {
  3286. pf_tag_ref(tag);
  3287. s->tag = tag;
  3288. }
  3289. if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
  3290. TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
  3291. int rtid = pd->rdomain;
  3292. if (act->rtableid >= 0)
  3293. rtid = act->rtableid;
  3294. s->src.state = PF_TCPS_PROXY_SRC;
  3295. s->src.seqhi = htonl(arc4random());
  3296. /* Find mss option */
  3297. mss = pf_get_mss(pd);
  3298. mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
  3299. mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
  3300. s->src.mss = mss;
  3301. pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
  3302. th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
  3303. TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, pd->rdomain);
  3304. REASON_SET(&reason, PFRES_SYNPROXY);
  3305. return (PF_SYNPROXY_DROP);
  3306. }
  3307. return (PF_PASS);
  3308. csfailed:
  3309. if (s) {
  3310. pf_normalize_tcp_cleanup(s); /* safe even w/o init */
  3311. pf_src_tree_remove_state(s);
  3312. pool_put(&pf_state_pl, s);
  3313. }
  3314. for (i = 0; i < PF_SN_MAX; i++)
  3315. if (sns[i] != NULL)
  3316. pf_remove_src_node(sns[i]);
  3317. return (PF_DROP);
  3318. }
  3319. int
  3320. pf_translate(struct pf_pdesc *pd, struct pf_addr *saddr, u_int16_t sport,
  3321. struct pf_addr *daddr, u_int16_t dport, u_int16_t virtual_type,
  3322. int icmp_dir)
  3323. {
  3324. /*
  3325. * when called from bpf_mtap_pflog, there are extra constraints:
  3326. * -mbuf is faked, m_data is the bpf buffer
  3327. * -pd is not fully set up
  3328. */
  3329. int rewrite = 0;
  3330. int afto = pd->af != pd->naf;
  3331. if (afto || PF_ANEQ(daddr, pd->dst, pd->af))
  3332. pd->destchg = 1;
  3333. switch (pd->proto) {
  3334. case IPPROTO_TCP:
  3335. if (afto || PF_ANEQ(saddr, pd->src, pd->af) ||
  3336. *pd->sport != sport) {
  3337. pf_change_ap(pd, pd->src, pd->sport, saddr, sport,
  3338. pd->naf);
  3339. rewrite = 1;
  3340. }
  3341. if (afto || PF_ANEQ(daddr, pd->dst, pd->af) ||
  3342. *pd->dport != dport) {
  3343. pf_change_ap(pd, pd->dst, pd->dport, daddr, dport,
  3344. pd->naf);
  3345. rewrite = 1;
  3346. }
  3347. break;
  3348. case IPPROTO_UDP:
  3349. if (afto || PF_ANEQ(saddr, pd->src, pd->af) ||
  3350. *pd->sport != sport) {
  3351. pf_change_ap(pd, pd->src, pd->sport, saddr, sport,
  3352. pd->naf);
  3353. rewrite = 1;
  3354. }
  3355. if (afto || PF_ANEQ(daddr, pd->dst, pd->af) ||
  3356. *pd->dport != dport) {
  3357. pf_change_ap(pd, pd->dst, pd->dport, daddr, dport,
  3358. pd->naf);
  3359. rewrite = 1;
  3360. }
  3361. break;
  3362. case IPPROTO_ICMP:
  3363. /* pf_translate() is also used when logging invalid packets */
  3364. if (pd->af != AF_INET)
  3365. return (0);
  3366. if (afto) {
  3367. #ifdef INET6
  3368. if (pf_translate_icmp_af(AF_INET6, pd->hdr.icmp))
  3369. return (0);
  3370. pd->proto = IPPROTO_ICMPV6;
  3371. rewrite = 1;
  3372. #endif /* INET6 */
  3373. } else {
  3374. if (PF_ANEQ(saddr, pd->src, pd->af)) {
  3375. pf_change_a(pd, &pd->src->v4.s_addr,
  3376. saddr->v4.s_addr);
  3377. rewrite = 1;
  3378. }
  3379. if (PF_ANEQ(daddr, pd->dst, pd->af)) {
  3380. pf_change_a(pd, &pd->dst->v4.s_addr,
  3381. daddr->v4.s_addr);
  3382. rewrite = 1;
  3383. }
  3384. }
  3385. if (virtual_type == htons(ICMP_ECHO)) {
  3386. u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport;
  3387. if (icmpid != pd->hdr.icmp->icmp_id) {
  3388. if (pd->csum_status == PF_CSUM_UNKNOWN)
  3389. pf_check_proto_cksum(pd, pd->off,
  3390. pd->tot_len - pd->off, pd->proto,
  3391. pd->af);
  3392. pd->hdr.icmp->icmp_id = icmpid;
  3393. rewrite = 1;
  3394. }
  3395. }
  3396. break;
  3397. #ifdef INET6
  3398. case IPPROTO_ICMPV6:
  3399. /* pf_translate() is also used when logging invalid packets */
  3400. if (pd->af != AF_INET6)
  3401. return (0);
  3402. if (afto) {
  3403. /* ip_sum will be recalculated in pf_translate_af */
  3404. if (pf_translate_icmp_af(AF_INET, pd->hdr.icmp6))
  3405. return (0);
  3406. pd->proto = IPPROTO_ICMP;
  3407. rewrite = 1;
  3408. } else {
  3409. if (PF_ANEQ(saddr, pd->src, pd->af)) {
  3410. pf_change_a6(pd, pd->src, saddr);
  3411. rewrite = 1;
  3412. }
  3413. if (PF_ANEQ(daddr, pd->dst, pd->af)) {
  3414. pf_change_a6(pd, pd->dst, daddr);
  3415. rewrite = 1;
  3416. }
  3417. }
  3418. if (virtual_type == htons(ICMP6_ECHO_REQUEST)) {
  3419. u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport;
  3420. if (icmpid != pd->hdr.icmp6->icmp6_id) {
  3421. if (pd->csum_status == PF_CSUM_UNKNOWN)
  3422. pf_check_proto_cksum(pd, pd->off,
  3423. pd->tot_len - pd->off, pd->proto,
  3424. pd->af);
  3425. pd->hdr.icmp6->icmp6_id = icmpid;
  3426. rewrite = 1;
  3427. }
  3428. }
  3429. break;
  3430. #endif /* INET6 */
  3431. default:
  3432. switch (pd->af) {
  3433. case AF_INET:
  3434. if (!afto && PF_ANEQ(saddr, pd->src, pd->af)) {
  3435. pf_change_a(pd, &pd->src->v4.s_addr,
  3436. saddr->v4.s_addr);
  3437. rewrite = 1;
  3438. }
  3439. if (!afto && PF_ANEQ(daddr, pd->dst, pd->af)) {
  3440. pf_change_a(pd, &pd->dst->v4.s_addr,
  3441. daddr->v4.s_addr);
  3442. rewrite = 1;
  3443. }
  3444. break;
  3445. #ifdef INET6
  3446. case AF_INET6:
  3447. if (!afto && PF_ANEQ(saddr, pd->src, pd->af)) {
  3448. pf_change_a6(pd, pd->src, saddr);
  3449. rewrite = 1;
  3450. }
  3451. if (!afto && PF_ANEQ(daddr, pd->dst, pd->af)) {
  3452. pf_change_a6(pd, pd->dst, daddr);
  3453. rewrite = 1;
  3454. }
  3455. break;
  3456. #endif /* INET6 */
  3457. }
  3458. }
  3459. return (rewrite);
  3460. }
  3461. int
  3462. pf_tcp_track_full(struct pf_pdesc *pd, struct pf_state_peer *src,
  3463. struct pf_state_peer *dst, struct pf_state **state, u_short *reason,
  3464. int *copyback)
  3465. {
  3466. struct tcphdr *th = pd->hdr.tcp;
  3467. u_int16_t win = ntohs(th->th_win);
  3468. u_int32_t ack, end, data_end, seq, orig_seq;
  3469. u_int8_t sws, dws;
  3470. int ackskew;
  3471. if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
  3472. sws = src->wscale & PF_WSCALE_MASK;
  3473. dws = dst->wscale & PF_WSCALE_MASK;
  3474. } else
  3475. sws = dws = 0;
  3476. /*
  3477. * Sequence tracking algorithm from Guido van Rooij's paper:
  3478. * http://www.madison-gurkha.com/publications/tcp_filtering/
  3479. * tcp_filtering.ps
  3480. */
  3481. orig_seq = seq = ntohl(th->th_seq);
  3482. if (src->seqlo == 0) {
  3483. /* First packet from this end. Set its state */
  3484. if (((*state)->state_flags & PFSTATE_SCRUB_TCP || dst->scrub) &&
  3485. src->scrub == NULL) {
  3486. if (pf_normalize_tcp_init(pd, src)) {
  3487. REASON_SET(reason, PFRES_MEMORY);
  3488. return (PF_DROP);
  3489. }
  3490. }
  3491. /* Deferred generation of sequence number modulator */
  3492. if (dst->seqdiff && !src->seqdiff) {
  3493. /* use random iss for the TCP server */
  3494. while ((src->seqdiff = arc4random() - seq) == 0)
  3495. ;
  3496. ack = ntohl(th->th_ack) - dst->seqdiff;
  3497. pf_change_a(pd, &th->th_seq, htonl(seq + src->seqdiff));
  3498. pf_change_a(pd, &th->th_ack, htonl(ack));
  3499. *copyback = 1;
  3500. } else {
  3501. ack = ntohl(th->th_ack);
  3502. }
  3503. end = seq + pd->p_len;
  3504. if (th->th_flags & TH_SYN) {
  3505. end++;
  3506. if (dst->wscale & PF_WSCALE_FLAG) {
  3507. src->wscale = pf_get_wscale(pd);
  3508. if (src->wscale & PF_WSCALE_FLAG) {
  3509. /* Remove scale factor from initial
  3510. * window */
  3511. sws = src->wscale & PF_WSCALE_MASK;
  3512. win = ((u_int32_t)win + (1 << sws) - 1)
  3513. >> sws;
  3514. dws = dst->wscale & PF_WSCALE_MASK;
  3515. } else {
  3516. /* fixup other window */
  3517. dst->max_win = MIN(TCP_MAXWIN,
  3518. (u_int32_t)dst->max_win <<
  3519. (dst->wscale & PF_WSCALE_MASK));
  3520. /* in case of a retrans SYN|ACK */
  3521. dst->wscale = 0;
  3522. }
  3523. }
  3524. }
  3525. data_end = end;
  3526. if (th->th_flags & TH_FIN)
  3527. end++;
  3528. src->seqlo = seq;
  3529. if (src->state < TCPS_SYN_SENT)
  3530. src->state = TCPS_SYN_SENT;
  3531. /*
  3532. * May need to slide the window (seqhi may have been set by
  3533. * the crappy stack check or if we picked up the connection
  3534. * after establishment)
  3535. */
  3536. if (src->seqhi == 1 ||
  3537. SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
  3538. src->seqhi = end + MAX(1, dst->max_win << dws);
  3539. if (win > src->max_win)
  3540. src->max_win = win;
  3541. } else {
  3542. ack = ntohl(th->th_ack) - dst->seqdiff;
  3543. if (src->seqdiff) {
  3544. /* Modulate sequence numbers */
  3545. pf_change_a(pd, &th->th_seq, htonl(seq + src->seqdiff));
  3546. pf_change_a(pd, &th->th_ack, htonl(ack));
  3547. *copyback = 1;
  3548. }
  3549. end = seq + pd->p_len;
  3550. if (th->th_flags & TH_SYN)
  3551. end++;
  3552. data_end = end;
  3553. if (th->th_flags & TH_FIN)
  3554. end++;
  3555. }
  3556. if ((th->th_flags & TH_ACK) == 0) {
  3557. /* Let it pass through the ack skew check */
  3558. ack = dst->seqlo;
  3559. } else if ((ack == 0 &&
  3560. (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
  3561. /* broken tcp stacks do not set ack */
  3562. (dst->state < TCPS_SYN_SENT)) {
  3563. /*
  3564. * Many stacks (ours included) will set the ACK number in an
  3565. * FIN|ACK if the SYN times out -- no sequence to ACK.
  3566. */
  3567. ack = dst->seqlo;
  3568. }
  3569. if (seq == end) {
  3570. /* Ease sequencing restrictions on no data packets */
  3571. seq = src->seqlo;
  3572. data_end = end = seq;
  3573. }
  3574. ackskew = dst->seqlo - ack;
  3575. /*
  3576. * Need to demodulate the sequence numbers in any TCP SACK options
  3577. * (Selective ACK). We could optionally validate the SACK values
  3578. * against the current ACK window, either forwards or backwards, but
  3579. * I'm not confident that SACK has been implemented properly
  3580. * everywhere. It wouldn't surprise me if several stacks accidently
  3581. * SACK too far backwards of previously ACKed data. There really aren't
  3582. * any security implications of bad SACKing unless the target stack
  3583. * doesn't validate the option length correctly. Someone trying to
  3584. * spoof into a TCP connection won't bother blindly sending SACK
  3585. * options anyway.
  3586. */
  3587. if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
  3588. if (pf_modulate_sack(pd, dst))
  3589. *copyback = 1;
  3590. }
  3591. #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
  3592. if (SEQ_GEQ(src->seqhi, data_end) &&
  3593. /* Last octet inside other's window space */
  3594. SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
  3595. /* Retrans: not more than one window back */
  3596. (ackskew >= -MAXACKWINDOW) &&
  3597. /* Acking not more than one reassembled fragment backwards */
  3598. (ackskew <= (MAXACKWINDOW << sws)) &&
  3599. /* Acking not more than one window forward */
  3600. ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
  3601. (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) {
  3602. /* Require an exact/+1 sequence match on resets when possible */
  3603. if (dst->scrub || src->scrub) {
  3604. if (pf_normalize_tcp_stateful(pd, reason, *state, src,
  3605. dst, copyback))
  3606. return (PF_DROP);
  3607. }
  3608. /* update max window */
  3609. if (src->max_win < win)
  3610. src->max_win = win;
  3611. /* synchronize sequencing */
  3612. if (SEQ_GT(end, src->seqlo))
  3613. src->seqlo = end;
  3614. /* slide the window of what the other end can send */
  3615. if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
  3616. dst->seqhi = ack + MAX((win << sws), 1);
  3617. /* update states */
  3618. if (th->th_flags & TH_SYN)
  3619. if (src->state < TCPS_SYN_SENT)
  3620. src->state = TCPS_SYN_SENT;
  3621. if (th->th_flags & TH_FIN)
  3622. if (src->state < TCPS_CLOSING)
  3623. src->state = TCPS_CLOSING;
  3624. if (th->th_flags & TH_ACK) {
  3625. if (dst->state == TCPS_SYN_SENT) {
  3626. dst->state = TCPS_ESTABLISHED;
  3627. if (src->state == TCPS_ESTABLISHED &&
  3628. !SLIST_EMPTY(&(*state)->src_nodes) &&
  3629. pf_src_connlimit(state)) {
  3630. REASON_SET(reason, PFRES_SRCLIMIT);
  3631. return (PF_DROP);
  3632. }
  3633. } else if (dst->state == TCPS_CLOSING)
  3634. dst->state = TCPS_FIN_WAIT_2;
  3635. }
  3636. if (th->th_flags & TH_RST)
  3637. src->state = dst->state = TCPS_TIME_WAIT;
  3638. /* update expire time */
  3639. (*state)->expire = time_uptime;
  3640. if (src->state >= TCPS_FIN_WAIT_2 &&
  3641. dst->state >= TCPS_FIN_WAIT_2)
  3642. (*state)->timeout = PFTM_TCP_CLOSED;
  3643. else if (src->state >= TCPS_CLOSING &&
  3644. dst->state >= TCPS_CLOSING)
  3645. (*state)->timeout = PFTM_TCP_FIN_WAIT;
  3646. else if (src->state < TCPS_ESTABLISHED ||
  3647. dst->state < TCPS_ESTABLISHED)
  3648. (*state)->timeout = PFTM_TCP_OPENING;
  3649. else if (src->state >= TCPS_CLOSING ||
  3650. dst->state >= TCPS_CLOSING)
  3651. (*state)->timeout = PFTM_TCP_CLOSING;
  3652. else
  3653. (*state)->timeout = PFTM_TCP_ESTABLISHED;
  3654. /* Fall through to PASS packet */
  3655. } else if ((dst->state < TCPS_SYN_SENT ||
  3656. dst->state >= TCPS_FIN_WAIT_2 ||
  3657. src->state >= TCPS_FIN_WAIT_2) &&
  3658. SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end) &&
  3659. /* Within a window forward of the originating packet */
  3660. SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
  3661. /* Within a window backward of the originating packet */
  3662. /*
  3663. * This currently handles three situations:
  3664. * 1) Stupid stacks will shotgun SYNs before their peer
  3665. * replies.
  3666. * 2) When PF catches an already established stream (the
  3667. * firewall rebooted, the state table was flushed, routes
  3668. * changed...)
  3669. * 3) Packets get funky immediately after the connection
  3670. * closes (this should catch Solaris spurious ACK|FINs
  3671. * that web servers like to spew after a close)
  3672. *
  3673. * This must be a little more careful than the above code
  3674. * since packet floods will also be caught here. We don't
  3675. * update the TTL here to mitigate the damage of a packet
  3676. * flood and so the same code can handle awkward establishment
  3677. * and a loosened connection close.
  3678. * In the establishment case, a correct peer response will
  3679. * validate the connection, go through the normal state code
  3680. * and keep updating the state TTL.
  3681. */
  3682. if (pf_status.debug >= LOG_NOTICE) {
  3683. log(LOG_NOTICE, "pf: loose state match: ");
  3684. pf_print_state(*state);
  3685. pf_print_flags(th->th_flags);
  3686. addlog(" seq=%u (%u) ack=%u len=%u ackskew=%d "
  3687. "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
  3688. pd->p_len, ackskew, (*state)->packets[0],
  3689. (*state)->packets[1],
  3690. pd->dir == PF_IN ? "in" : "out",
  3691. pd->dir == (*state)->direction ? "fwd" : "rev");
  3692. }
  3693. if (dst->scrub || src->scrub) {
  3694. if (pf_normalize_tcp_stateful(pd, reason, *state, src,
  3695. dst, copyback))
  3696. return (PF_DROP);
  3697. }
  3698. /* update max window */
  3699. if (src->max_win < win)
  3700. src->max_win = win;
  3701. /* synchronize sequencing */
  3702. if (SEQ_GT(end, src->seqlo))
  3703. src->seqlo = end;
  3704. /* slide the window of what the other end can send */
  3705. if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
  3706. dst->seqhi = ack + MAX((win << sws), 1);
  3707. /*
  3708. * Cannot set dst->seqhi here since this could be a shotgunned
  3709. * SYN and not an already established connection.
  3710. */
  3711. if (th->th_flags & TH_FIN)
  3712. if (src->state < TCPS_CLOSING)
  3713. src->state = TCPS_CLOSING;
  3714. if (th->th_flags & TH_RST)
  3715. src->state = dst->state = TCPS_TIME_WAIT;
  3716. /* Fall through to PASS packet */
  3717. } else {
  3718. if ((*state)->dst.state == TCPS_SYN_SENT &&
  3719. (*state)->src.state == TCPS_SYN_SENT) {
  3720. /* Send RST for state mismatches during handshake */
  3721. if (!(th->th_flags & TH_RST))
  3722. pf_send_tcp((*state)->rule.ptr, pd->af,
  3723. pd->dst, pd->src, th->th_dport,
  3724. th->th_sport, ntohl(th->th_ack), 0,
  3725. TH_RST, 0, 0,
  3726. (*state)->rule.ptr->return_ttl, 1, 0,
  3727. pd->rdomain);
  3728. src->seqlo = 0;
  3729. src->seqhi = 1;
  3730. src->max_win = 1;
  3731. } else if (pf_status.debug >= LOG_NOTICE) {
  3732. log(LOG_NOTICE, "pf: BAD state: ");
  3733. pf_print_state(*state);
  3734. pf_print_flags(th->th_flags);
  3735. addlog(" seq=%u (%u) ack=%u len=%u ackskew=%d "
  3736. "pkts=%llu:%llu dir=%s,%s\n",
  3737. seq, orig_seq, ack, pd->p_len, ackskew,
  3738. (*state)->packets[0], (*state)->packets[1],
  3739. pd->dir == PF_IN ? "in" : "out",
  3740. pd->dir == (*state)->direction ? "fwd" : "rev");
  3741. addlog("pf: State failure on: %c %c %c %c | %c %c\n",
  3742. SEQ_GEQ(src->seqhi, data_end) ? ' ' : '1',
  3743. SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
  3744. ' ': '2',
  3745. (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
  3746. (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
  3747. SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end) ?
  3748. ' ' :'5',
  3749. SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
  3750. }
  3751. REASON_SET(reason, PFRES_BADSTATE);
  3752. return (PF_DROP);
  3753. }
  3754. return (PF_PASS);
  3755. }
  3756. int
  3757. pf_tcp_track_sloppy(struct pf_pdesc *pd, struct pf_state_peer *src,
  3758. struct pf_state_peer *dst, struct pf_state **state, u_short *reason)
  3759. {
  3760. struct tcphdr *th = pd->hdr.tcp;
  3761. if (th->th_flags & TH_SYN)
  3762. if (src->state < TCPS_SYN_SENT)
  3763. src->state = TCPS_SYN_SENT;
  3764. if (th->th_flags & TH_FIN)
  3765. if (src->state < TCPS_CLOSING)
  3766. src->state = TCPS_CLOSING;
  3767. if (th->th_flags & TH_ACK) {
  3768. if (dst->state == TCPS_SYN_SENT) {
  3769. dst->state = TCPS_ESTABLISHED;
  3770. if (src->state == TCPS_ESTABLISHED &&
  3771. !SLIST_EMPTY(&(*state)->src_nodes) &&
  3772. pf_src_connlimit(state)) {
  3773. REASON_SET(reason, PFRES_SRCLIMIT);
  3774. return (PF_DROP);
  3775. }
  3776. } else if (dst->state == TCPS_CLOSING) {
  3777. dst->state = TCPS_FIN_WAIT_2;
  3778. } else if (src->state == TCPS_SYN_SENT &&
  3779. dst->state < TCPS_SYN_SENT) {
  3780. /*
  3781. * Handle a special sloppy case where we only see one
  3782. * half of the connection. If there is a ACK after
  3783. * the initial SYN without ever seeing a packet from
  3784. * the destination, set the connection to established.
  3785. */
  3786. dst->state = src->state = TCPS_ESTABLISHED;
  3787. if (!SLIST_EMPTY(&(*state)->src_nodes) &&
  3788. pf_src_connlimit(state)) {
  3789. REASON_SET(reason, PFRES_SRCLIMIT);
  3790. return (PF_DROP);
  3791. }
  3792. } else if (src->state == TCPS_CLOSING &&
  3793. dst->state == TCPS_ESTABLISHED &&
  3794. dst->seqlo == 0) {
  3795. /*
  3796. * Handle the closing of half connections where we
  3797. * don't see the full bidirectional FIN/ACK+ACK
  3798. * handshake.
  3799. */
  3800. dst->state = TCPS_CLOSING;
  3801. }
  3802. }
  3803. if (th->th_flags & TH_RST)
  3804. src->state = dst->state = TCPS_TIME_WAIT;
  3805. /* update expire time */
  3806. (*state)->expire = time_uptime;
  3807. if (src->state >= TCPS_FIN_WAIT_2 &&
  3808. dst->state >= TCPS_FIN_WAIT_2)
  3809. (*state)->timeout = PFTM_TCP_CLOSED;
  3810. else if (src->state >= TCPS_CLOSING &&
  3811. dst->state >= TCPS_CLOSING)
  3812. (*state)->timeout = PFTM_TCP_FIN_WAIT;
  3813. else if (src->state < TCPS_ESTABLISHED ||
  3814. dst->state < TCPS_ESTABLISHED)
  3815. (*state)->timeout = PFTM_TCP_OPENING;
  3816. else if (src->state >= TCPS_CLOSING ||
  3817. dst->state >= TCPS_CLOSING)
  3818. (*state)->timeout = PFTM_TCP_CLOSING;
  3819. else
  3820. (*state)->timeout = PFTM_TCP_ESTABLISHED;
  3821. return (PF_PASS);
  3822. }
  3823. static __inline int
  3824. pf_synproxy(struct pf_pdesc *pd, struct pf_state **state, u_short *reason)
  3825. {
  3826. struct pf_state_key *sk = (*state)->key[pd->didx];
  3827. if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
  3828. struct tcphdr *th = pd->hdr.tcp;
  3829. if (pd->dir != (*state)->direction) {
  3830. REASON_SET(reason, PFRES_SYNPROXY);
  3831. return (PF_SYNPROXY_DROP);
  3832. }
  3833. if (th->th_flags & TH_SYN) {
  3834. if (ntohl(th->th_seq) != (*state)->src.seqlo) {
  3835. REASON_SET(reason, PFRES_SYNPROXY);
  3836. return (PF_DROP);
  3837. }
  3838. pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
  3839. pd->src, th->th_dport, th->th_sport,
  3840. (*state)->src.seqhi, ntohl(th->th_seq) + 1,
  3841. TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
  3842. 0, pd->rdomain);
  3843. REASON_SET(reason, PFRES_SYNPROXY);
  3844. return (PF_SYNPROXY_DROP);
  3845. } else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
  3846. (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
  3847. (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
  3848. REASON_SET(reason, PFRES_SYNPROXY);
  3849. return (PF_DROP);
  3850. } else if (!SLIST_EMPTY(&(*state)->src_nodes) &&
  3851. pf_src_connlimit(state)) {
  3852. REASON_SET(reason, PFRES_SRCLIMIT);
  3853. return (PF_DROP);
  3854. } else
  3855. (*state)->src.state = PF_TCPS_PROXY_DST;
  3856. }
  3857. if ((*state)->src.state == PF_TCPS_PROXY_DST) {
  3858. struct tcphdr *th = pd->hdr.tcp;
  3859. if (pd->dir == (*state)->direction) {
  3860. if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
  3861. (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
  3862. (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
  3863. REASON_SET(reason, PFRES_SYNPROXY);
  3864. return (PF_DROP);
  3865. }
  3866. (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
  3867. if ((*state)->dst.seqhi == 1)
  3868. (*state)->dst.seqhi = htonl(arc4random());
  3869. pf_send_tcp((*state)->rule.ptr, pd->af,
  3870. &sk->addr[pd->sidx], &sk->addr[pd->didx],
  3871. sk->port[pd->sidx], sk->port[pd->didx],
  3872. (*state)->dst.seqhi, 0, TH_SYN, 0,
  3873. (*state)->src.mss, 0, 0, (*state)->tag,
  3874. sk->rdomain);
  3875. REASON_SET(reason, PFRES_SYNPROXY);
  3876. return (PF_SYNPROXY_DROP);
  3877. } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
  3878. (TH_SYN|TH_ACK)) ||
  3879. (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
  3880. REASON_SET(reason, PFRES_SYNPROXY);
  3881. return (PF_DROP);
  3882. } else {
  3883. (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
  3884. (*state)->dst.seqlo = ntohl(th->th_seq);
  3885. pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
  3886. pd->src, th->th_dport, th->th_sport,
  3887. ntohl(th->th_ack), ntohl(th->th_seq) + 1,
  3888. TH_ACK, (*state)->src.max_win, 0, 0, 0,
  3889. (*state)->tag, pd->rdomain);
  3890. pf_send_tcp((*state)->rule.ptr, pd->af,
  3891. &sk->addr[pd->sidx], &sk->addr[pd->didx],
  3892. sk->port[pd->sidx], sk->port[pd->didx],
  3893. (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
  3894. TH_ACK, (*state)->dst.max_win, 0, 0, 1,
  3895. 0, sk->rdomain);
  3896. (*state)->src.seqdiff = (*state)->dst.seqhi -
  3897. (*state)->src.seqlo;
  3898. (*state)->dst.seqdiff = (*state)->src.seqhi -
  3899. (*state)->dst.seqlo;
  3900. (*state)->src.seqhi = (*state)->src.seqlo +
  3901. (*state)->dst.max_win;
  3902. (*state)->dst.seqhi = (*state)->dst.seqlo +
  3903. (*state)->src.max_win;
  3904. (*state)->src.wscale = (*state)->dst.wscale = 0;
  3905. (*state)->src.state = (*state)->dst.state =
  3906. TCPS_ESTABLISHED;
  3907. REASON_SET(reason, PFRES_SYNPROXY);
  3908. return (PF_SYNPROXY_DROP);
  3909. }
  3910. }
  3911. return (PF_PASS);
  3912. }
  3913. int
  3914. pf_test_state(struct pf_pdesc *pd, struct pf_state **state, u_short *reason)
  3915. {
  3916. struct pf_state_key_cmp key;
  3917. int copyback = 0;
  3918. struct pf_state_peer *src, *dst;
  3919. int action = PF_PASS;
  3920. struct inpcb *inp;
  3921. key.af = pd->af;
  3922. key.proto = pd->virtual_proto;
  3923. key.rdomain = pd->rdomain;
  3924. PF_ACPY(&key.addr[pd->sidx], pd->src, key.af);
  3925. PF_ACPY(&key.addr[pd->didx], pd->dst, key.af);
  3926. key.port[pd->sidx] = pd->osport;
  3927. key.port[pd->didx] = pd->odport;
  3928. inp = pd->m->m_pkthdr.pf.inp;
  3929. STATE_LOOKUP(pd->kif, &key, pd->dir, *state, pd->m);
  3930. if (pd->dir == (*state)->direction) {
  3931. src = &(*state)->src;
  3932. dst = &(*state)->dst;
  3933. } else {
  3934. src = &(*state)->dst;
  3935. dst = &(*state)->src;
  3936. }
  3937. switch (pd->virtual_proto) {
  3938. case IPPROTO_TCP:
  3939. if ((action = pf_synproxy(pd, state, reason)) != PF_PASS)
  3940. return (action);
  3941. if (((pd->hdr.tcp->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
  3942. dst->state >= TCPS_FIN_WAIT_2 &&
  3943. src->state >= TCPS_FIN_WAIT_2) {
  3944. if (pf_status.debug >= LOG_NOTICE) {
  3945. log(LOG_NOTICE, "pf: state reuse ");
  3946. pf_print_state(*state);
  3947. pf_print_flags(pd->hdr.tcp->th_flags);
  3948. addlog("\n");
  3949. }
  3950. /* XXX make sure it's the same direction ?? */
  3951. (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
  3952. pf_unlink_state(*state);
  3953. *state = NULL;
  3954. pd->m->m_pkthdr.pf.inp = inp;
  3955. return (PF_DROP);
  3956. }
  3957. if ((*state)->state_flags & PFSTATE_SLOPPY) {
  3958. if (pf_tcp_track_sloppy(pd, src, dst, state, reason) ==
  3959. PF_DROP)
  3960. return (PF_DROP);
  3961. } else {
  3962. int ret;
  3963. if (PF_REVERSED_KEY((*state)->key, pd->af))
  3964. ret = pf_tcp_track_full(pd, dst, src, state,
  3965. reason, &copyback);
  3966. else
  3967. ret = pf_tcp_track_full(pd, src, dst, state,
  3968. reason, &copyback);
  3969. if (ret == PF_DROP)
  3970. return (PF_DROP);
  3971. }
  3972. break;
  3973. case IPPROTO_UDP:
  3974. /* update states */
  3975. if (src->state < PFUDPS_SINGLE)
  3976. src->state = PFUDPS_SINGLE;
  3977. if (dst->state == PFUDPS_SINGLE)
  3978. dst->state = PFUDPS_MULTIPLE;
  3979. /* update expire time */
  3980. (*state)->expire = time_uptime;
  3981. if (src->state == PFUDPS_MULTIPLE &&
  3982. dst->state == PFUDPS_MULTIPLE)
  3983. (*state)->timeout = PFTM_UDP_MULTIPLE;
  3984. else
  3985. (*state)->timeout = PFTM_UDP_SINGLE;
  3986. break;
  3987. default:
  3988. /* update states */
  3989. if (src->state < PFOTHERS_SINGLE)
  3990. src->state = PFOTHERS_SINGLE;
  3991. if (dst->state == PFOTHERS_SINGLE)
  3992. dst->state = PFOTHERS_MULTIPLE;
  3993. /* update expire time */
  3994. (*state)->expire = time_uptime;
  3995. if (src->state == PFOTHERS_MULTIPLE &&
  3996. dst->state == PFOTHERS_MULTIPLE)
  3997. (*state)->timeout = PFTM_OTHER_MULTIPLE;
  3998. else
  3999. (*state)->timeout = PFTM_OTHER_SINGLE;
  4000. break;
  4001. }
  4002. /* translate source/destination address, if necessary */
  4003. if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
  4004. struct pf_state_key *nk;
  4005. int afto, sidx, didx;
  4006. if (PF_REVERSED_KEY((*state)->key, pd->af))
  4007. nk = (*state)->key[pd->sidx];
  4008. else
  4009. nk = (*state)->key[pd->didx];
  4010. afto = pd->af != nk->af;
  4011. sidx = afto ? pd->didx : pd->sidx;
  4012. didx = afto ? pd->sidx : pd->didx;
  4013. if (afto || PF_ANEQ(pd->src, &nk->addr[sidx], pd->af) ||
  4014. nk->port[sidx] != pd->osport)
  4015. pf_change_ap(pd, pd->src, pd->sport,
  4016. &nk->addr[sidx], nk->port[sidx], nk->af);
  4017. if (afto || PF_ANEQ(pd->dst, &nk->addr[didx], pd->af) ||
  4018. pd->rdomain != nk->rdomain)
  4019. pd->destchg = 1;
  4020. if (afto || PF_ANEQ(pd->dst, &nk->addr[didx], pd->af) ||
  4021. nk->port[didx] != pd->odport)
  4022. pf_change_ap(pd, pd->dst, pd->dport,
  4023. &nk->addr[didx], nk->port[didx], nk->af);
  4024. #ifdef INET6
  4025. if (afto) {
  4026. PF_ACPY(&pd->nsaddr, &nk->addr[sidx], nk->af);
  4027. PF_ACPY(&pd->ndaddr, &nk->addr[didx], nk->af);
  4028. pd->naf = nk->af;
  4029. action = PF_AFRT;
  4030. }
  4031. #endif /* INET6 */
  4032. pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
  4033. copyback = 1;
  4034. }
  4035. if (copyback && pd->hdrlen > 0) {
  4036. pf_cksum(pd, pd->m);
  4037. m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any, M_NOWAIT);
  4038. }
  4039. return (action);
  4040. }
  4041. int
  4042. pf_icmp_state_lookup(struct pf_pdesc *pd, struct pf_state_key_cmp *key,
  4043. struct pf_state **state, u_int16_t icmpid, u_int16_t type,
  4044. int icmp_dir, int *iidx, int multi, int inner)
  4045. {
  4046. int direction;
  4047. key->af = pd->af;
  4048. key->proto = pd->proto;
  4049. key->rdomain = pd->rdomain;
  4050. if (icmp_dir == PF_IN) {
  4051. *iidx = pd->sidx;
  4052. key->port[pd->sidx] = icmpid;
  4053. key->port[pd->didx] = type;
  4054. } else {
  4055. *iidx = pd->didx;
  4056. key->port[pd->sidx] = type;
  4057. key->port[pd->didx] = icmpid;
  4058. }
  4059. if (pf_state_key_addr_setup(pd, key, pd->sidx, pd->src, pd->didx,
  4060. pd->dst, pd->af, multi))
  4061. return (PF_DROP);
  4062. STATE_LOOKUP(pd->kif, key, pd->dir, *state, pd->m);
  4063. if ((*state)->state_flags & PFSTATE_SLOPPY)
  4064. return (-1);
  4065. /* Is this ICMP message flowing in right direction? */
  4066. if ((*state)->key[PF_SK_WIRE]->af != (*state)->key[PF_SK_STACK]->af)
  4067. direction = (pd->af == (*state)->key[PF_SK_WIRE]->af) ?
  4068. PF_IN : PF_OUT;
  4069. else
  4070. direction = (*state)->direction;
  4071. if ((((!inner && direction == pd->dir) ||
  4072. (inner && direction != pd->dir)) ?
  4073. PF_IN : PF_OUT) != icmp_dir) {
  4074. if (pf_status.debug >= LOG_NOTICE) {
  4075. log(LOG_NOTICE,
  4076. "pf: icmp type %d in wrong direction (%d): ",
  4077. ntohs(type), icmp_dir);
  4078. pf_print_state(*state);
  4079. addlog("\n");
  4080. }
  4081. return (PF_DROP);
  4082. }
  4083. return (-1);
  4084. }
  4085. int
  4086. pf_test_state_icmp(struct pf_pdesc *pd, struct pf_state **state,
  4087. u_short *reason)
  4088. {
  4089. struct pf_addr *saddr = pd->src, *daddr = pd->dst;
  4090. u_int16_t virtual_id, virtual_type;
  4091. u_int8_t icmptype;
  4092. int icmp_dir, iidx, ret, copyback = 0;
  4093. struct pf_state_key_cmp key;
  4094. switch (pd->proto) {
  4095. case IPPROTO_ICMP:
  4096. icmptype = pd->hdr.icmp->icmp_type;
  4097. break;
  4098. #ifdef INET6
  4099. case IPPROTO_ICMPV6:
  4100. icmptype = pd->hdr.icmp6->icmp6_type;
  4101. break;
  4102. #endif /* INET6 */
  4103. default:
  4104. panic("unhandled proto %d", pd->proto);
  4105. }
  4106. if (pf_icmp_mapping(pd, icmptype, &icmp_dir, &virtual_id,
  4107. &virtual_type) == 0) {
  4108. /*
  4109. * ICMP query/reply message not related to a TCP/UDP packet.
  4110. * Search for an ICMP state.
  4111. */
  4112. ret = pf_icmp_state_lookup(pd, &key, state,
  4113. virtual_id, virtual_type, icmp_dir, &iidx,
  4114. 0, 0);
  4115. /* IPv6? try matching a multicast address */
  4116. if (ret == PF_DROP && pd->af == AF_INET6 && icmp_dir == PF_OUT)
  4117. ret = pf_icmp_state_lookup(pd, &key, state, virtual_id,
  4118. virtual_type, icmp_dir, &iidx, 1, 0);
  4119. if (ret >= 0)
  4120. return (ret);
  4121. (*state)->expire = time_uptime;
  4122. (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
  4123. /* translate source/destination address, if necessary */
  4124. if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
  4125. struct pf_state_key *nk;
  4126. int afto, sidx, didx;
  4127. if (PF_REVERSED_KEY((*state)->key, pd->af))
  4128. nk = (*state)->key[pd->sidx];
  4129. else
  4130. nk = (*state)->key[pd->didx];
  4131. afto = pd->af != nk->af;
  4132. sidx = afto ? pd->didx : pd->sidx;
  4133. didx = afto ? pd->sidx : pd->didx;
  4134. iidx = afto ? !iidx : iidx;
  4135. if (pd->rdomain != nk->rdomain)
  4136. pd->destchg = 1;
  4137. pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
  4138. switch (pd->af) {
  4139. case AF_INET:
  4140. #ifdef INET6
  4141. if (afto) {
  4142. if (pf_translate_icmp_af(AF_INET6,
  4143. pd->hdr.icmp))
  4144. return (PF_DROP);
  4145. pd->proto = IPPROTO_ICMPV6;
  4146. }
  4147. #endif /* INET6 */
  4148. if (!afto && PF_ANEQ(pd->src,
  4149. &nk->addr[sidx], AF_INET))
  4150. pf_change_a(pd, &saddr->v4.s_addr,
  4151. nk->addr[sidx].v4.s_addr);
  4152. if (!afto && PF_ANEQ(pd->dst,
  4153. &nk->addr[didx], AF_INET)) {
  4154. pf_change_a(pd, &daddr->v4.s_addr,
  4155. nk->addr[didx].v4.s_addr);
  4156. pd->destchg = 1;
  4157. }
  4158. if (nk->port[iidx] != pd->hdr.icmp->icmp_id) {
  4159. if (pd->csum_status == PF_CSUM_UNKNOWN)
  4160. pf_check_proto_cksum(pd,
  4161. pd->off, pd->tot_len -
  4162. pd->off, pd->proto, pd->af);
  4163. pd->hdr.icmp->icmp_id = nk->port[iidx];
  4164. }
  4165. m_copyback(pd->m, pd->off, ICMP_MINLEN,
  4166. pd->hdr.icmp, M_NOWAIT);
  4167. copyback = 1;
  4168. break;
  4169. #ifdef INET6
  4170. case AF_INET6:
  4171. if (afto) {
  4172. if (pf_translate_icmp_af(AF_INET,
  4173. pd->hdr.icmp6))
  4174. return (PF_DROP);
  4175. pd->proto = IPPROTO_ICMP;
  4176. }
  4177. if (!afto && PF_ANEQ(pd->src,
  4178. &nk->addr[sidx], AF_INET6))
  4179. pf_change_a6(pd, saddr,
  4180. &nk->addr[sidx]);
  4181. if (!afto && PF_ANEQ(pd->dst,
  4182. &nk->addr[didx], AF_INET6)) {
  4183. pf_change_a6(pd, daddr,
  4184. &nk->addr[didx]);
  4185. pd->destchg = 1;
  4186. }
  4187. if (nk->port[iidx] != pd->hdr.icmp6->icmp6_id) {
  4188. if (pd->csum_status == PF_CSUM_UNKNOWN)
  4189. pf_check_proto_cksum(pd,
  4190. pd->off, pd->tot_len -
  4191. pd->off, pd->proto, pd->af);
  4192. pd->hdr.icmp6->icmp6_id =
  4193. nk->port[iidx];
  4194. }
  4195. m_copyback(pd->m, pd->off,
  4196. sizeof(struct icmp6_hdr), pd->hdr.icmp6,
  4197. M_NOWAIT);
  4198. copyback = 1;
  4199. break;
  4200. #endif /* INET6 */
  4201. }
  4202. #ifdef INET6
  4203. if (afto) {
  4204. PF_ACPY(&pd->nsaddr, &nk->addr[sidx], nk->af);
  4205. PF_ACPY(&pd->ndaddr, &nk->addr[didx], nk->af);
  4206. pd->naf = nk->af;
  4207. return (PF_AFRT);
  4208. }
  4209. #endif /* INET6 */
  4210. }
  4211. } else {
  4212. /*
  4213. * ICMP error message in response to a TCP/UDP packet.
  4214. * Extract the inner TCP/UDP header and search for that state.
  4215. */
  4216. struct pf_pdesc pd2;
  4217. struct ip h2;
  4218. #ifdef INET6
  4219. struct ip6_hdr h2_6;
  4220. #endif /* INET6 */
  4221. int ipoff2;
  4222. /* Initialize pd2 fields valid for both packets with pd. */
  4223. bzero(&pd2, sizeof(pd2));
  4224. pd2.af = pd->af;
  4225. pd2.dir = pd->dir;
  4226. pd2.kif = pd->kif;
  4227. pd2.m = pd->m;
  4228. pd2.rdomain = pd->rdomain;
  4229. /* Payload packet is from the opposite direction. */
  4230. pd2.sidx = (pd2.dir == PF_IN) ? 1 : 0;
  4231. pd2.didx = (pd2.dir == PF_IN) ? 0 : 1;
  4232. switch (pd->af) {
  4233. case AF_INET:
  4234. /* offset of h2 in mbuf chain */
  4235. ipoff2 = pd->off + ICMP_MINLEN;
  4236. if (!pf_pull_hdr(pd2.m, ipoff2, &h2, sizeof(h2),
  4237. NULL, reason, pd2.af)) {
  4238. DPFPRINTF(LOG_NOTICE,
  4239. "ICMP error message too short (ip)");
  4240. return (PF_DROP);
  4241. }
  4242. /*
  4243. * ICMP error messages don't refer to non-first
  4244. * fragments
  4245. */
  4246. if (h2.ip_off & htons(IP_OFFMASK)) {
  4247. REASON_SET(reason, PFRES_FRAG);
  4248. return (PF_DROP);
  4249. }
  4250. /* offset of protocol header that follows h2 */
  4251. pd2.off = ipoff2 + (h2.ip_hl << 2);
  4252. pd2.proto = h2.ip_p;
  4253. pd2.tot_len = ntohs(h2.ip_len);
  4254. pd2.src = (struct pf_addr *)&h2.ip_src;
  4255. pd2.dst = (struct pf_addr *)&h2.ip_dst;
  4256. break;
  4257. #ifdef INET6
  4258. case AF_INET6:
  4259. ipoff2 = pd->off + sizeof(struct icmp6_hdr);
  4260. if (!pf_pull_hdr(pd2.m, ipoff2, &h2_6, sizeof(h2_6),
  4261. NULL, reason, pd2.af)) {
  4262. DPFPRINTF(LOG_NOTICE,
  4263. "ICMP error message too short (ip6)");
  4264. return (PF_DROP);
  4265. }
  4266. pd2.off = ipoff2;
  4267. if (pf_walk_header6(&pd2, &h2_6, reason) != PF_PASS)
  4268. return (PF_DROP);
  4269. pd2.tot_len = ntohs(h2_6.ip6_plen) +
  4270. sizeof(struct ip6_hdr);
  4271. pd2.src = (struct pf_addr *)&h2_6.ip6_src;
  4272. pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
  4273. break;
  4274. #endif /* INET6 */
  4275. default:
  4276. unhandled_af(pd->af);
  4277. }
  4278. switch (pd2.proto) {
  4279. case IPPROTO_TCP: {
  4280. struct tcphdr th;
  4281. u_int32_t seq;
  4282. struct pf_state_peer *src, *dst;
  4283. u_int8_t dws;
  4284. /*
  4285. * Only the first 8 bytes of the TCP header can be
  4286. * expected. Don't access any TCP header fields after
  4287. * th_seq, an ackskew test is not possible.
  4288. */
  4289. if (!pf_pull_hdr(pd2.m, pd2.off, &th, 8, NULL, reason,
  4290. pd2.af)) {
  4291. DPFPRINTF(LOG_NOTICE,
  4292. "ICMP error message too short (tcp)");
  4293. return (PF_DROP);
  4294. }
  4295. key.af = pd2.af;
  4296. key.proto = IPPROTO_TCP;
  4297. key.rdomain = pd2.rdomain;
  4298. PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
  4299. PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
  4300. key.port[pd2.sidx] = th.th_sport;
  4301. key.port[pd2.didx] = th.th_dport;
  4302. STATE_LOOKUP(pd2.kif, &key, pd2.dir, *state, pd2.m);
  4303. if (pd2.dir == (*state)->direction) {
  4304. if (PF_REVERSED_KEY((*state)->key, pd->af)) {
  4305. src = &(*state)->src;
  4306. dst = &(*state)->dst;
  4307. } else {
  4308. src = &(*state)->dst;
  4309. dst = &(*state)->src;
  4310. }
  4311. } else {
  4312. if (PF_REVERSED_KEY((*state)->key, pd->af)) {
  4313. src = &(*state)->dst;
  4314. dst = &(*state)->src;
  4315. } else {
  4316. src = &(*state)->src;
  4317. dst = &(*state)->dst;
  4318. }
  4319. }
  4320. if (src->wscale && dst->wscale)
  4321. dws = dst->wscale & PF_WSCALE_MASK;
  4322. else
  4323. dws = 0;
  4324. /* Demodulate sequence number */
  4325. seq = ntohl(th.th_seq) - src->seqdiff;
  4326. if (src->seqdiff) {
  4327. pf_change_a(pd, &th.th_seq, htonl(seq));
  4328. copyback = 1;
  4329. }
  4330. if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
  4331. (!SEQ_GEQ(src->seqhi, seq) || !SEQ_GEQ(seq,
  4332. src->seqlo - (dst->max_win << dws)))) {
  4333. if (pf_status.debug >= LOG_NOTICE) {
  4334. log(LOG_NOTICE,
  4335. "pf: BAD ICMP %d:%d ",
  4336. icmptype, pd->hdr.icmp->icmp_code);
  4337. pf_print_host(pd->src, 0, pd->af);
  4338. addlog(" -> ");
  4339. pf_print_host(pd->dst, 0, pd->af);
  4340. addlog(" state: ");
  4341. pf_print_state(*state);
  4342. addlog(" seq=%u\n", seq);
  4343. }
  4344. REASON_SET(reason, PFRES_BADSTATE);
  4345. return (PF_DROP);
  4346. } else {
  4347. if (pf_status.debug >= LOG_DEBUG) {
  4348. log(LOG_DEBUG,
  4349. "pf: OK ICMP %d:%d ",
  4350. icmptype, pd->hdr.icmp->icmp_code);
  4351. pf_print_host(pd->src, 0, pd->af);
  4352. addlog(" -> ");
  4353. pf_print_host(pd->dst, 0, pd->af);
  4354. addlog(" state: ");
  4355. pf_print_state(*state);
  4356. addlog(" seq=%u\n", seq);
  4357. }
  4358. }
  4359. /* translate source/destination address, if necessary */
  4360. if ((*state)->key[PF_SK_WIRE] !=
  4361. (*state)->key[PF_SK_STACK]) {
  4362. struct pf_state_key *nk;
  4363. int afto, sidx, didx;
  4364. if (PF_REVERSED_KEY((*state)->key, pd->af))
  4365. nk = (*state)->key[pd->sidx];
  4366. else
  4367. nk = (*state)->key[pd->didx];
  4368. afto = pd->af != nk->af;
  4369. sidx = afto ? pd2.didx : pd2.sidx;
  4370. didx = afto ? pd2.sidx : pd2.didx;
  4371. #ifdef INET6
  4372. if (afto) {
  4373. if (pf_translate_icmp_af(nk->af,
  4374. pd->hdr.icmp))
  4375. return (PF_DROP);
  4376. m_copyback(pd->m, pd->off,
  4377. sizeof(struct icmp6_hdr),
  4378. pd->hdr.icmp6, M_NOWAIT);
  4379. if (pf_change_icmp_af(pd->m, ipoff2,
  4380. pd, &pd2, &nk->addr[sidx],
  4381. &nk->addr[didx], pd->af, nk->af))
  4382. return (PF_DROP);
  4383. if (nk->af == AF_INET)
  4384. pd->proto = IPPROTO_ICMP;
  4385. else
  4386. pd->proto = IPPROTO_ICMPV6;
  4387. pf_change_ap(pd, pd2.src, &th.th_sport,
  4388. &nk->addr[pd2.sidx], nk->port[sidx],
  4389. nk->af);
  4390. pf_change_ap(pd, pd2.dst, &th.th_dport,
  4391. &nk->addr[pd2.didx], nk->port[didx],
  4392. nk->af);
  4393. m_copyback(pd2.m, pd2.off, 8, &th,
  4394. M_NOWAIT);
  4395. pd->m->m_pkthdr.ph_rtableid =
  4396. nk->rdomain;
  4397. pd->destchg = 1;
  4398. PF_ACPY(&pd->nsaddr,
  4399. &nk->addr[pd2.sidx], nk->af);
  4400. PF_ACPY(&pd->ndaddr,
  4401. &nk->addr[pd2.didx], nk->af);
  4402. pd->naf = nk->af;
  4403. return (PF_AFRT);
  4404. }
  4405. #endif /* INET6 */
  4406. if (PF_ANEQ(pd2.src,
  4407. &nk->addr[pd2.sidx], pd2.af) ||
  4408. nk->port[pd2.sidx] != th.th_sport)
  4409. pf_change_icmp(pd, pd2.src,
  4410. &th.th_sport, daddr,
  4411. &nk->addr[pd2.sidx],
  4412. nk->port[pd2.sidx], pd2.af);
  4413. if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
  4414. pd2.af) || pd2.rdomain != nk->rdomain)
  4415. pd->destchg = 1;
  4416. pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
  4417. if (PF_ANEQ(pd2.dst,
  4418. &nk->addr[pd2.didx], pd2.af) ||
  4419. nk->port[pd2.didx] != th.th_dport)
  4420. pf_change_icmp(pd, pd2.dst,
  4421. &th.th_dport, saddr,
  4422. &nk->addr[pd2.didx],
  4423. nk->port[pd2.didx], pd2.af);
  4424. copyback = 1;
  4425. }
  4426. if (copyback) {
  4427. switch (pd2.af) {
  4428. case AF_INET:
  4429. m_copyback(pd->m, pd->off, ICMP_MINLEN,
  4430. pd->hdr.icmp, M_NOWAIT);
  4431. m_copyback(pd2.m, ipoff2, sizeof(h2),
  4432. &h2, M_NOWAIT);
  4433. break;
  4434. #ifdef INET6
  4435. case AF_INET6:
  4436. m_copyback(pd->m, pd->off,
  4437. sizeof(struct icmp6_hdr),
  4438. pd->hdr.icmp6, M_NOWAIT);
  4439. m_copyback(pd2.m, ipoff2, sizeof(h2_6),
  4440. &h2_6, M_NOWAIT);
  4441. break;
  4442. #endif /* INET6 */
  4443. }
  4444. m_copyback(pd2.m, pd2.off, 8, &th, M_NOWAIT);
  4445. }
  4446. break;
  4447. }
  4448. case IPPROTO_UDP: {
  4449. struct udphdr uh;
  4450. if (!pf_pull_hdr(pd2.m, pd2.off, &uh, sizeof(uh),
  4451. NULL, reason, pd2.af)) {
  4452. DPFPRINTF(LOG_NOTICE,
  4453. "ICMP error message too short (udp)");
  4454. return (PF_DROP);
  4455. }
  4456. key.af = pd2.af;
  4457. key.proto = IPPROTO_UDP;
  4458. key.rdomain = pd2.rdomain;
  4459. PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
  4460. PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
  4461. key.port[pd2.sidx] = uh.uh_sport;
  4462. key.port[pd2.didx] = uh.uh_dport;
  4463. STATE_LOOKUP(pd2.kif, &key, pd2.dir, *state, pd2.m);
  4464. /* translate source/destination address, if necessary */
  4465. if ((*state)->key[PF_SK_WIRE] !=
  4466. (*state)->key[PF_SK_STACK]) {
  4467. struct pf_state_key *nk;
  4468. int afto, sidx, didx;
  4469. if (PF_REVERSED_KEY((*state)->key, pd->af))
  4470. nk = (*state)->key[pd->sidx];
  4471. else
  4472. nk = (*state)->key[pd->didx];
  4473. afto = pd->af != nk->af;
  4474. sidx = afto ? pd2.didx : pd2.sidx;
  4475. didx = afto ? pd2.sidx : pd2.didx;
  4476. #ifdef INET6
  4477. if (afto) {
  4478. if (pf_translate_icmp_af(nk->af,
  4479. pd->hdr.icmp))
  4480. return (PF_DROP);
  4481. m_copyback(pd->m, pd->off,
  4482. sizeof(struct icmp6_hdr),
  4483. pd->hdr.icmp6, M_NOWAIT);
  4484. if (pf_change_icmp_af(pd->m, ipoff2,
  4485. pd, &pd2, &nk->addr[sidx],
  4486. &nk->addr[didx], pd->af, nk->af))
  4487. return (PF_DROP);
  4488. if (nk->af == AF_INET)
  4489. pd->proto = IPPROTO_ICMP;
  4490. else
  4491. pd->proto = IPPROTO_ICMPV6;
  4492. pf_change_ap(pd, pd2.src, &uh.uh_sport,
  4493. &nk->addr[pd2.sidx], nk->port[sidx],
  4494. nk->af);
  4495. pf_change_ap(pd, pd2.dst, &uh.uh_dport,
  4496. &nk->addr[pd2.didx], nk->port[didx],
  4497. nk->af);
  4498. m_copyback(pd2.m, pd2.off, sizeof(uh),
  4499. &uh, M_NOWAIT);
  4500. pd->m->m_pkthdr.ph_rtableid =
  4501. nk->rdomain;
  4502. pd->destchg = 1;
  4503. PF_ACPY(&pd->nsaddr,
  4504. &nk->addr[pd2.sidx], nk->af);
  4505. PF_ACPY(&pd->ndaddr,
  4506. &nk->addr[pd2.didx], nk->af);
  4507. pd->naf = nk->af;
  4508. return (PF_AFRT);
  4509. }
  4510. #endif /* INET6 */
  4511. if (PF_ANEQ(pd2.src,
  4512. &nk->addr[pd2.sidx], pd2.af) ||
  4513. nk->port[pd2.sidx] != uh.uh_sport)
  4514. pf_change_icmp(pd, pd2.src,
  4515. &uh.uh_sport, daddr,
  4516. &nk->addr[pd2.sidx],
  4517. nk->port[pd2.sidx], pd2.af);
  4518. if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
  4519. pd2.af) || pd2.rdomain != nk->rdomain)
  4520. pd->destchg = 1;
  4521. pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
  4522. if (PF_ANEQ(pd2.dst,
  4523. &nk->addr[pd2.didx], pd2.af) ||
  4524. nk->port[pd2.didx] != uh.uh_dport)
  4525. pf_change_icmp(pd, pd2.dst,
  4526. &uh.uh_dport, saddr,
  4527. &nk->addr[pd2.didx],
  4528. nk->port[pd2.didx], pd2.af);
  4529. switch (pd2.af) {
  4530. case AF_INET:
  4531. m_copyback(pd->m, pd->off, ICMP_MINLEN,
  4532. pd->hdr.icmp, M_NOWAIT);
  4533. m_copyback(pd2.m, ipoff2, sizeof(h2),
  4534. &h2, M_NOWAIT);
  4535. break;
  4536. #ifdef INET6
  4537. case AF_INET6:
  4538. m_copyback(pd->m, pd->off,
  4539. sizeof(struct icmp6_hdr),
  4540. pd->hdr.icmp6, M_NOWAIT);
  4541. m_copyback(pd2.m, ipoff2, sizeof(h2_6),
  4542. &h2_6, M_NOWAIT);
  4543. break;
  4544. #endif /* INET6 */
  4545. }
  4546. uh.uh_sum = 0;
  4547. m_copyback(pd2.m, pd2.off, sizeof(uh), &uh,
  4548. M_NOWAIT);
  4549. copyback = 1;
  4550. }
  4551. break;
  4552. }
  4553. case IPPROTO_ICMP: {
  4554. struct icmp iih;
  4555. if (pd2.af != AF_INET) {
  4556. REASON_SET(reason, PFRES_NORM);
  4557. return (PF_DROP);
  4558. }
  4559. if (!pf_pull_hdr(pd2.m, pd2.off, &iih, ICMP_MINLEN,
  4560. NULL, reason, pd2.af)) {
  4561. DPFPRINTF(LOG_NOTICE,
  4562. "ICMP error message too short (icmp)");
  4563. return (PF_DROP);
  4564. }
  4565. pd2.hdr.icmp = &iih;
  4566. pf_icmp_mapping(&pd2, iih.icmp_type,
  4567. &icmp_dir, &virtual_id, &virtual_type);
  4568. ret = pf_icmp_state_lookup(&pd2, &key, state,
  4569. virtual_id, virtual_type, icmp_dir, &iidx, 0, 1);
  4570. if (ret >= 0)
  4571. return (ret);
  4572. /* translate source/destination address, if necessary */
  4573. if ((*state)->key[PF_SK_WIRE] !=
  4574. (*state)->key[PF_SK_STACK]) {
  4575. struct pf_state_key *nk;
  4576. int afto, sidx, didx;
  4577. if (PF_REVERSED_KEY((*state)->key, pd->af))
  4578. nk = (*state)->key[pd->sidx];
  4579. else
  4580. nk = (*state)->key[pd->didx];
  4581. afto = pd->af != nk->af;
  4582. sidx = afto ? pd2.didx : pd2.sidx;
  4583. didx = afto ? pd2.sidx : pd2.didx;
  4584. iidx = afto ? !iidx : iidx;
  4585. #ifdef INET6
  4586. if (afto) {
  4587. if (nk->af != AF_INET6)
  4588. return (PF_DROP);
  4589. if (pf_translate_icmp_af(nk->af,
  4590. pd->hdr.icmp))
  4591. return (PF_DROP);
  4592. m_copyback(pd->m, pd->off,
  4593. sizeof(struct icmp6_hdr),
  4594. pd->hdr.icmp6, M_NOWAIT);
  4595. if (pf_change_icmp_af(pd->m, ipoff2,
  4596. pd, &pd2, &nk->addr[sidx],
  4597. &nk->addr[didx], pd->af, nk->af))
  4598. return (PF_DROP);
  4599. pd->proto = IPPROTO_ICMPV6;
  4600. if (pf_translate_icmp_af(nk->af, &iih))
  4601. return (PF_DROP);
  4602. if (virtual_type == htons(ICMP_ECHO) &&
  4603. nk->port[iidx] != iih.icmp_id)
  4604. iih.icmp_id = nk->port[iidx];
  4605. m_copyback(pd2.m, pd2.off, ICMP_MINLEN,
  4606. &iih, M_NOWAIT);
  4607. pd->m->m_pkthdr.ph_rtableid =
  4608. nk->rdomain;
  4609. pd->destchg = 1;
  4610. PF_ACPY(&pd->nsaddr,
  4611. &nk->addr[pd2.sidx], nk->af);
  4612. PF_ACPY(&pd->ndaddr,
  4613. &nk->addr[pd2.didx], nk->af);
  4614. pd->naf = nk->af;
  4615. return (PF_AFRT);
  4616. }
  4617. #endif /* INET6 */
  4618. if (PF_ANEQ(pd2.src,
  4619. &nk->addr[pd2.sidx], pd2.af) ||
  4620. (virtual_type == htons(ICMP_ECHO) &&
  4621. nk->port[iidx] != iih.icmp_id))
  4622. pf_change_icmp(pd, pd2.src,
  4623. (virtual_type == htons(ICMP_ECHO)) ?
  4624. &iih.icmp_id : NULL,
  4625. daddr, &nk->addr[pd2.sidx],
  4626. (virtual_type == htons(ICMP_ECHO)) ?
  4627. nk->port[iidx] : 0, AF_INET);
  4628. if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
  4629. pd2.af) || pd2.rdomain != nk->rdomain)
  4630. pd->destchg = 1;
  4631. pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
  4632. if (PF_ANEQ(pd2.dst,
  4633. &nk->addr[pd2.didx], pd2.af))
  4634. pf_change_icmp(pd, pd2.dst, NULL,
  4635. saddr, &nk->addr[pd2.didx], 0,
  4636. AF_INET);
  4637. m_copyback(pd->m, pd->off, ICMP_MINLEN,
  4638. pd->hdr.icmp, M_NOWAIT);
  4639. m_copyback(pd2.m, ipoff2, sizeof(h2), &h2,
  4640. M_NOWAIT);
  4641. m_copyback(pd2.m, pd2.off, ICMP_MINLEN, &iih,
  4642. M_NOWAIT);
  4643. copyback = 1;
  4644. }
  4645. break;
  4646. }
  4647. #ifdef INET6
  4648. case IPPROTO_ICMPV6: {
  4649. struct icmp6_hdr iih;
  4650. if (pd2.af != AF_INET6) {
  4651. REASON_SET(reason, PFRES_NORM);
  4652. return (PF_DROP);
  4653. }
  4654. if (!pf_pull_hdr(pd2.m, pd2.off, &iih,
  4655. sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
  4656. DPFPRINTF(LOG_NOTICE,
  4657. "ICMP error message too short (icmp6)");
  4658. return (PF_DROP);
  4659. }
  4660. pd2.hdr.icmp6 = &iih;
  4661. pf_icmp_mapping(&pd2, iih.icmp6_type,
  4662. &icmp_dir, &virtual_id, &virtual_type);
  4663. ret = pf_icmp_state_lookup(&pd2, &key, state,
  4664. virtual_id, virtual_type, icmp_dir, &iidx, 0, 1);
  4665. /* IPv6? try matching a multicast address */
  4666. if (ret == PF_DROP && pd2.af == AF_INET6 &&
  4667. icmp_dir == PF_OUT)
  4668. ret = pf_icmp_state_lookup(&pd2, &key, state,
  4669. virtual_id, virtual_type, icmp_dir, &iidx,
  4670. 1, 1);
  4671. if (ret >= 0)
  4672. return (ret);
  4673. /* translate source/destination address, if necessary */
  4674. if ((*state)->key[PF_SK_WIRE] !=
  4675. (*state)->key[PF_SK_STACK]) {
  4676. struct pf_state_key *nk;
  4677. int afto, sidx, didx;
  4678. if (PF_REVERSED_KEY((*state)->key, pd->af))
  4679. nk = (*state)->key[pd->sidx];
  4680. else
  4681. nk = (*state)->key[pd->didx];
  4682. afto = pd->af != nk->af;
  4683. sidx = afto ? pd2.didx : pd2.sidx;
  4684. didx = afto ? pd2.sidx : pd2.didx;
  4685. iidx = afto ? !iidx : iidx;
  4686. if (afto) {
  4687. if (nk->af != AF_INET)
  4688. return (PF_DROP);
  4689. if (pf_translate_icmp_af(nk->af,
  4690. pd->hdr.icmp))
  4691. return (PF_DROP);
  4692. m_copyback(pd->m, pd->off,
  4693. sizeof(struct icmp6_hdr),
  4694. pd->hdr.icmp6, M_NOWAIT);
  4695. if (pf_change_icmp_af(pd->m, ipoff2,
  4696. pd, &pd2, &nk->addr[sidx],
  4697. &nk->addr[didx], pd->af, nk->af))
  4698. return (PF_DROP);
  4699. pd->proto = IPPROTO_ICMP;
  4700. if (pf_translate_icmp_af(nk->af, &iih))
  4701. return (PF_DROP);
  4702. if (virtual_type ==
  4703. htons(ICMP6_ECHO_REQUEST) &&
  4704. nk->port[iidx] != iih.icmp6_id)
  4705. iih.icmp6_id = nk->port[iidx];
  4706. m_copyback(pd2.m, pd2.off,
  4707. sizeof(struct icmp6_hdr), &iih,
  4708. M_NOWAIT);
  4709. pd->m->m_pkthdr.ph_rtableid =
  4710. nk->rdomain;
  4711. pd->destchg = 1;
  4712. PF_ACPY(&pd->nsaddr,
  4713. &nk->addr[pd2.sidx], nk->af);
  4714. PF_ACPY(&pd->ndaddr,
  4715. &nk->addr[pd2.didx], nk->af);
  4716. pd->naf = nk->af;
  4717. return (PF_AFRT);
  4718. }
  4719. if (PF_ANEQ(pd2.src,
  4720. &nk->addr[pd2.sidx], pd2.af) ||
  4721. ((virtual_type ==
  4722. htons(ICMP6_ECHO_REQUEST)) &&
  4723. nk->port[pd2.sidx] != iih.icmp6_id))
  4724. pf_change_icmp(pd, pd2.src,
  4725. (virtual_type ==
  4726. htons(ICMP6_ECHO_REQUEST))
  4727. ? &iih.icmp6_id : NULL,
  4728. daddr, &nk->addr[pd2.sidx],
  4729. (virtual_type ==
  4730. htons(ICMP6_ECHO_REQUEST))
  4731. ? nk->port[iidx] : 0, AF_INET6);
  4732. if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
  4733. pd2.af) || pd2.rdomain != nk->rdomain)
  4734. pd->destchg = 1;
  4735. pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
  4736. if (PF_ANEQ(pd2.dst,
  4737. &nk->addr[pd2.didx], pd2.af))
  4738. pf_change_icmp(pd, pd2.dst, NULL,
  4739. saddr, &nk->addr[pd2.didx], 0,
  4740. AF_INET6);
  4741. m_copyback(pd->m, pd->off,
  4742. sizeof(struct icmp6_hdr), pd->hdr.icmp6,
  4743. M_NOWAIT);
  4744. m_copyback(pd2.m, ipoff2, sizeof(h2_6), &h2_6,
  4745. M_NOWAIT);
  4746. m_copyback(pd2.m, pd2.off,
  4747. sizeof(struct icmp6_hdr), &iih, M_NOWAIT);
  4748. copyback = 1;
  4749. }
  4750. break;
  4751. }
  4752. #endif /* INET6 */
  4753. default: {
  4754. key.af = pd2.af;
  4755. key.proto = pd2.proto;
  4756. key.rdomain = pd2.rdomain;
  4757. PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
  4758. PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
  4759. key.port[0] = key.port[1] = 0;
  4760. STATE_LOOKUP(pd2.kif, &key, pd2.dir, *state, pd2.m);
  4761. /* translate source/destination address, if necessary */
  4762. if ((*state)->key[PF_SK_WIRE] !=
  4763. (*state)->key[PF_SK_STACK]) {
  4764. struct pf_state_key *nk =
  4765. (*state)->key[pd->didx];
  4766. if (PF_ANEQ(pd2.src,
  4767. &nk->addr[pd2.sidx], pd2.af))
  4768. pf_change_icmp(pd, pd2.src, NULL,
  4769. daddr, &nk->addr[pd2.sidx], 0,
  4770. pd2.af);
  4771. if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
  4772. pd2.af) || pd2.rdomain != nk->rdomain)
  4773. pd->destchg = 1;
  4774. pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
  4775. if (PF_ANEQ(pd2.dst,
  4776. &nk->addr[pd2.didx], pd2.af))
  4777. pf_change_icmp(pd, pd2.dst, NULL,
  4778. saddr, &nk->addr[pd2.didx], 0,
  4779. pd2.af);
  4780. switch (pd2.af) {
  4781. case AF_INET:
  4782. m_copyback(pd->m, pd->off, ICMP_MINLEN,
  4783. pd->hdr.icmp, M_NOWAIT);
  4784. m_copyback(pd2.m, ipoff2, sizeof(h2),
  4785. &h2, M_NOWAIT);
  4786. break;
  4787. #ifdef INET6
  4788. case AF_INET6:
  4789. m_copyback(pd->m, pd->off,
  4790. sizeof(struct icmp6_hdr),
  4791. pd->hdr.icmp6, M_NOWAIT);
  4792. m_copyback(pd2.m, ipoff2, sizeof(h2_6),
  4793. &h2_6, M_NOWAIT);
  4794. break;
  4795. #endif /* INET6 */
  4796. }
  4797. copyback = 1;
  4798. }
  4799. break;
  4800. }
  4801. }
  4802. }
  4803. if (copyback) {
  4804. pf_cksum(pd, pd->m);
  4805. m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any, M_NOWAIT);
  4806. }
  4807. return (PF_PASS);
  4808. }
  4809. /*
  4810. * ipoff and off are measured from the start of the mbuf chain.
  4811. * h must be at "ipoff" on the mbuf chain.
  4812. */
  4813. void *
  4814. pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
  4815. u_short *actionp, u_short *reasonp, sa_family_t af)
  4816. {
  4817. switch (af) {
  4818. case AF_INET: {
  4819. struct ip *h = mtod(m, struct ip *);
  4820. u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
  4821. if (fragoff) {
  4822. if (fragoff >= len)
  4823. ACTION_SET(actionp, PF_PASS);
  4824. else {
  4825. ACTION_SET(actionp, PF_DROP);
  4826. REASON_SET(reasonp, PFRES_FRAG);
  4827. }
  4828. return (NULL);
  4829. }
  4830. if (m->m_pkthdr.len < off + len ||
  4831. ntohs(h->ip_len) < off + len) {
  4832. ACTION_SET(actionp, PF_DROP);
  4833. REASON_SET(reasonp, PFRES_SHORT);
  4834. return (NULL);
  4835. }
  4836. break;
  4837. }
  4838. #ifdef INET6
  4839. case AF_INET6: {
  4840. struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
  4841. if (m->m_pkthdr.len < off + len ||
  4842. (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
  4843. (unsigned)(off + len)) {
  4844. ACTION_SET(actionp, PF_DROP);
  4845. REASON_SET(reasonp, PFRES_SHORT);
  4846. return (NULL);
  4847. }
  4848. break;
  4849. }
  4850. #endif /* INET6 */
  4851. }
  4852. m_copydata(m, off, len, p);
  4853. return (p);
  4854. }
  4855. int
  4856. pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
  4857. int rtableid)
  4858. {
  4859. struct sockaddr_storage ss;
  4860. struct sockaddr_in *dst;
  4861. int ret = 1;
  4862. int check_mpath;
  4863. #ifdef INET6
  4864. struct sockaddr_in6 *dst6;
  4865. #endif /* INET6 */
  4866. struct rtentry *rt, *rt0 = NULL;
  4867. struct ifnet *ifp;
  4868. check_mpath = 0;
  4869. memset(&ss, 0, sizeof(ss));
  4870. switch (af) {
  4871. case AF_INET:
  4872. dst = (struct sockaddr_in *)&ss;
  4873. dst->sin_family = AF_INET;
  4874. dst->sin_len = sizeof(*dst);
  4875. dst->sin_addr = addr->v4;
  4876. if (ipmultipath)
  4877. check_mpath = 1;
  4878. break;
  4879. #ifdef INET6
  4880. case AF_INET6:
  4881. /*
  4882. * Skip check for addresses with embedded interface scope,
  4883. * as they would always match anyway.
  4884. */
  4885. if (IN6_IS_SCOPE_EMBED(&addr->v6))
  4886. goto out;
  4887. dst6 = (struct sockaddr_in6 *)&ss;
  4888. dst6->sin6_family = AF_INET6;
  4889. dst6->sin6_len = sizeof(*dst6);
  4890. dst6->sin6_addr = addr->v6;
  4891. if (ip6_multipath)
  4892. check_mpath = 1;
  4893. break;
  4894. #endif /* INET6 */
  4895. }
  4896. /* Skip checks for ipsec interfaces */
  4897. if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
  4898. goto out;
  4899. rt0 = rtalloc((struct sockaddr *)&ss, RT_REPORT, rtableid);
  4900. if (rt0 != NULL) {
  4901. /* No interface given, this is a no-route check */
  4902. if (kif == NULL)
  4903. goto out;
  4904. if (kif->pfik_ifp == NULL) {
  4905. ret = 0;
  4906. goto out;
  4907. }
  4908. /* Perform uRPF check if passed input interface */
  4909. ret = 0;
  4910. rt = rt0;
  4911. do {
  4912. if (rt->rt_ifp->if_type == IFT_CARP)
  4913. ifp = rt->rt_ifp->if_carpdev;
  4914. else
  4915. ifp = rt->rt_ifp;
  4916. if (kif->pfik_ifp == ifp)
  4917. ret = 1;
  4918. #ifndef SMALL_KERNEL
  4919. rt = rt_mpath_next(rt);
  4920. #else
  4921. rt = NULL;
  4922. #endif
  4923. } while (check_mpath == 1 && rt != NULL && ret == 0);
  4924. } else
  4925. ret = 0;
  4926. out:
  4927. if (rt0 != NULL)
  4928. rtfree(rt0);
  4929. return (ret);
  4930. }
  4931. int
  4932. pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw,
  4933. int rtableid)
  4934. {
  4935. struct sockaddr_storage ss;
  4936. struct sockaddr_in *dst;
  4937. #ifdef INET6
  4938. struct sockaddr_in6 *dst6;
  4939. #endif /* INET6 */
  4940. struct rtentry *rt;
  4941. int ret = 0;
  4942. memset(&ss, 0, sizeof(ss));
  4943. switch (af) {
  4944. case AF_INET:
  4945. dst = (struct sockaddr_in *)&ss;
  4946. dst->sin_family = AF_INET;
  4947. dst->sin_len = sizeof(*dst);
  4948. dst->sin_addr = addr->v4;
  4949. break;
  4950. #ifdef INET6
  4951. case AF_INET6:
  4952. dst6 = (struct sockaddr_in6 *)&ss;
  4953. dst6->sin6_family = AF_INET6;
  4954. dst6->sin6_len = sizeof(*dst6);
  4955. dst6->sin6_addr = addr->v6;
  4956. break;
  4957. #endif /* INET6 */
  4958. }
  4959. rt = rtalloc((struct sockaddr *)&ss, RT_REPORT|RT_RESOLVE, rtableid);
  4960. if (rt != NULL) {
  4961. if (rt->rt_labelid == aw->v.rtlabel)
  4962. ret = 1;
  4963. rtfree(rt);
  4964. }
  4965. return (ret);
  4966. }
  4967. void
  4968. pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
  4969. struct pf_state *s)
  4970. {
  4971. struct mbuf *m0, *m1;
  4972. struct sockaddr_in *dst, sin;
  4973. struct rtentry *rt = NULL;
  4974. struct ip *ip;
  4975. struct ifnet *ifp = NULL;
  4976. struct pf_addr naddr;
  4977. struct pf_src_node *sns[PF_SN_MAX];
  4978. int error = 0;
  4979. unsigned int rtableid;
  4980. if (m == NULL || *m == NULL || r == NULL ||
  4981. (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
  4982. panic("pf_route: invalid parameters");
  4983. if ((*m)->m_pkthdr.pf.routed++ > 3) {
  4984. m0 = *m;
  4985. *m = NULL;
  4986. goto bad;
  4987. }
  4988. if (r->rt == PF_DUPTO) {
  4989. if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
  4990. return;
  4991. } else {
  4992. if ((r->rt == PF_REPLYTO) == (r->direction == dir))
  4993. return;
  4994. m0 = *m;
  4995. }
  4996. if (m0->m_len < sizeof(struct ip)) {
  4997. DPFPRINTF(LOG_ERR,
  4998. "pf_route: m0->m_len < sizeof(struct ip)");
  4999. goto bad;
  5000. }
  5001. ip = mtod(m0, struct ip *);
  5002. memset(&sin, 0, sizeof(sin));
  5003. dst = &sin;
  5004. dst->sin_family = AF_INET;
  5005. dst->sin_len = sizeof(*dst);
  5006. dst->sin_addr = ip->ip_dst;
  5007. rtableid = m0->m_pkthdr.ph_rtableid;
  5008. if (!r->rt) {
  5009. rt = rtalloc(sintosa(dst), RT_REPORT|RT_RESOLVE, rtableid);
  5010. if (rt == NULL) {
  5011. ipstat.ips_noroute++;
  5012. goto bad;
  5013. }
  5014. ifp = rt->rt_ifp;
  5015. rt->rt_use++;
  5016. if (rt->rt_flags & RTF_GATEWAY)
  5017. dst = satosin(rt->rt_gateway);
  5018. m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
  5019. } else {
  5020. if (s == NULL) {
  5021. bzero(sns, sizeof(sns));
  5022. if (pf_map_addr(AF_INET, r,
  5023. (struct pf_addr *)&ip->ip_src,
  5024. &naddr, NULL, sns, &r->route, PF_SN_ROUTE)) {
  5025. DPFPRINTF(LOG_ERR,
  5026. "pf_route: pf_map_addr() failed.");
  5027. goto bad;
  5028. }
  5029. if (!PF_AZERO(&naddr, AF_INET))
  5030. dst->sin_addr.s_addr = naddr.v4.s_addr;
  5031. ifp = r->route.kif ?
  5032. r->route.kif->pfik_ifp : NULL;
  5033. } else {
  5034. if (!PF_AZERO(&s->rt_addr, AF_INET))
  5035. dst->sin_addr.s_addr =
  5036. s->rt_addr.v4.s_addr;
  5037. ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
  5038. }
  5039. }
  5040. if (ifp == NULL)
  5041. goto bad;
  5042. if (oifp != ifp) {
  5043. if (pf_test(AF_INET, PF_OUT, ifp, &m0) != PF_PASS)
  5044. goto bad;
  5045. else if (m0 == NULL)
  5046. goto done;
  5047. if (m0->m_len < sizeof(struct ip)) {
  5048. DPFPRINTF(LOG_ERR,
  5049. "pf_route: m0->m_len < sizeof(struct ip)");
  5050. goto bad;
  5051. }
  5052. ip = mtod(m0, struct ip *);
  5053. }
  5054. in_proto_cksum_out(m0, ifp);
  5055. if (ntohs(ip->ip_len) <= ifp->if_mtu) {
  5056. ip->ip_sum = 0;
  5057. if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
  5058. m0->m_pkthdr.csum_flags |= M_IPV4_CSUM_OUT;
  5059. else {
  5060. ipstat.ips_outswcsum++;
  5061. ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
  5062. }
  5063. error = (*ifp->if_output)(ifp, m0, sintosa(dst), NULL);
  5064. goto done;
  5065. }
  5066. /*
  5067. * Too large for interface; fragment if possible.
  5068. * Must be able to put at least 8 bytes per fragment.
  5069. */
  5070. if (ip->ip_off & htons(IP_DF)) {
  5071. ipstat.ips_cantfrag++;
  5072. if (r->rt != PF_DUPTO) {
  5073. icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
  5074. ifp->if_mtu);
  5075. goto done;
  5076. } else
  5077. goto bad;
  5078. }
  5079. m1 = m0;
  5080. error = ip_fragment(m0, ifp, ifp->if_mtu);
  5081. if (error) {
  5082. m0 = NULL;
  5083. goto bad;
  5084. }
  5085. for (m0 = m1; m0; m0 = m1) {
  5086. m1 = m0->m_nextpkt;
  5087. m0->m_nextpkt = 0;
  5088. if (error == 0)
  5089. error = (*ifp->if_output)(ifp, m0, sintosa(dst),
  5090. NULL);
  5091. else
  5092. m_freem(m0);
  5093. }
  5094. if (error == 0)
  5095. ipstat.ips_fragmented++;
  5096. done:
  5097. if (r->rt != PF_DUPTO)
  5098. *m = NULL;
  5099. if (rt != NULL)
  5100. rtfree(rt);
  5101. return;
  5102. bad:
  5103. m_freem(m0);
  5104. goto done;
  5105. }
  5106. #ifdef INET6
  5107. void
  5108. pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
  5109. struct pf_state *s)
  5110. {
  5111. struct mbuf *m0;
  5112. struct sockaddr_in6 *dst, sin6;
  5113. struct ip6_hdr *ip6;
  5114. struct ifnet *ifp = NULL;
  5115. struct pf_addr naddr;
  5116. struct pf_src_node *sns[PF_SN_MAX];
  5117. if (m == NULL || *m == NULL || r == NULL ||
  5118. (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
  5119. panic("pf_route6: invalid parameters");
  5120. if ((*m)->m_pkthdr.pf.routed++ > 3) {
  5121. m0 = *m;
  5122. *m = NULL;
  5123. goto bad;
  5124. }
  5125. if (r->rt == PF_DUPTO) {
  5126. if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
  5127. return;
  5128. } else {
  5129. if ((r->rt == PF_REPLYTO) == (r->direction == dir))
  5130. return;
  5131. m0 = *m;
  5132. }
  5133. if (m0->m_len < sizeof(struct ip6_hdr)) {
  5134. DPFPRINTF(LOG_ERR,
  5135. "pf_route6: m0->m_len < sizeof(struct ip6_hdr)");
  5136. goto bad;
  5137. }
  5138. ip6 = mtod(m0, struct ip6_hdr *);
  5139. memset(&sin6, 0, sizeof(sin6));
  5140. dst = &sin6;
  5141. dst->sin6_family = AF_INET6;
  5142. dst->sin6_len = sizeof(*dst);
  5143. dst->sin6_addr = ip6->ip6_dst;
  5144. if (!r->rt) {
  5145. m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
  5146. ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
  5147. return;
  5148. }
  5149. if (s == NULL) {
  5150. bzero(sns, sizeof(sns));
  5151. if (pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
  5152. &naddr, NULL, sns, &r->route, PF_SN_ROUTE)) {
  5153. DPFPRINTF(LOG_ERR,
  5154. "pf_route6: pf_map_addr() failed.");
  5155. goto bad;
  5156. }
  5157. if (!PF_AZERO(&naddr, AF_INET6))
  5158. PF_ACPY((struct pf_addr *)&dst->sin6_addr,
  5159. &naddr, AF_INET6);
  5160. ifp = r->route.kif ? r->route.kif->pfik_ifp : NULL;
  5161. } else {
  5162. if (!PF_AZERO(&s->rt_addr, AF_INET6))
  5163. PF_ACPY((struct pf_addr *)&dst->sin6_addr,
  5164. &s->rt_addr, AF_INET6);
  5165. ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
  5166. }
  5167. if (ifp == NULL)
  5168. goto bad;
  5169. if (oifp != ifp) {
  5170. if (pf_test(AF_INET6, PF_OUT, ifp, &m0) != PF_PASS)
  5171. goto bad;
  5172. else if (m0 == NULL)
  5173. goto done;
  5174. if (m0->m_len < sizeof(struct ip6_hdr)) {
  5175. DPFPRINTF(LOG_ERR,
  5176. "pf_route6: m0->m_len < sizeof(struct ip6_hdr)");
  5177. goto bad;
  5178. }
  5179. }
  5180. in6_proto_cksum_out(m0, ifp);
  5181. /*
  5182. * If the packet is too large for the outgoing interface,
  5183. * send back an icmp6 error.
  5184. */
  5185. if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr))
  5186. dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
  5187. if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
  5188. nd6_output(ifp, m0, dst, NULL);
  5189. } else {
  5190. in6_ifstat_inc(ifp, ifs6_in_toobig);
  5191. if (r->rt != PF_DUPTO)
  5192. icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
  5193. else
  5194. goto bad;
  5195. }
  5196. done:
  5197. if (r->rt != PF_DUPTO)
  5198. *m = NULL;
  5199. return;
  5200. bad:
  5201. m_freem(m0);
  5202. goto done;
  5203. }
  5204. #endif /* INET6 */
  5205. /*
  5206. * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
  5207. * off is the offset where the protocol header starts
  5208. * len is the total length of protocol header plus payload
  5209. * returns 0 when the checksum is valid, otherwise returns 1.
  5210. * if the _OUT flag is set the checksum isn't done yet, consider these ok
  5211. */
  5212. int
  5213. pf_check_proto_cksum(struct pf_pdesc *pd, int off, int len, u_int8_t p,
  5214. sa_family_t af)
  5215. {
  5216. u_int16_t flag_ok, flag_bad, flag_out;
  5217. u_int16_t sum;
  5218. if (pd->csum_status == PF_CSUM_OK)
  5219. return (0);
  5220. if (pd->csum_status == PF_CSUM_BAD)
  5221. return (1);
  5222. switch (p) {
  5223. case IPPROTO_TCP:
  5224. flag_ok = M_TCP_CSUM_IN_OK;
  5225. flag_out = M_TCP_CSUM_OUT;
  5226. flag_bad = M_TCP_CSUM_IN_BAD;
  5227. break;
  5228. case IPPROTO_UDP:
  5229. flag_ok = M_UDP_CSUM_IN_OK;
  5230. flag_out = M_UDP_CSUM_OUT;
  5231. flag_bad = M_UDP_CSUM_IN_BAD;
  5232. break;
  5233. case IPPROTO_ICMP:
  5234. #ifdef INET6
  5235. case IPPROTO_ICMPV6:
  5236. #endif /* INET6 */
  5237. flag_ok = M_ICMP_CSUM_IN_OK;
  5238. flag_out = M_ICMP_CSUM_OUT;
  5239. flag_bad = M_ICMP_CSUM_IN_BAD;
  5240. break;
  5241. default:
  5242. return (1);
  5243. }
  5244. if (pd->m->m_pkthdr.csum_flags & (flag_ok | flag_out)) {
  5245. pd->csum_status = PF_CSUM_OK;
  5246. return (0);
  5247. }
  5248. if (pd->m->m_pkthdr.csum_flags & flag_bad || off < sizeof(struct ip) ||
  5249. pd->m->m_pkthdr.len < off + len) {
  5250. pd->csum_status = PF_CSUM_BAD;
  5251. return (1);
  5252. }
  5253. /* need to do it in software */
  5254. if (p == IPPROTO_TCP)
  5255. tcpstat.tcps_inswcsum++;
  5256. else if (p == IPPROTO_UDP)
  5257. udpstat.udps_inswcsum++;
  5258. switch (af) {
  5259. case AF_INET:
  5260. if (pd->m->m_len < sizeof(struct ip)) {
  5261. pd->csum_status = PF_CSUM_BAD;
  5262. return (1);
  5263. }
  5264. sum = in4_cksum(pd->m, (p == IPPROTO_ICMP ? 0 : p), off, len);
  5265. break;
  5266. #ifdef INET6
  5267. case AF_INET6:
  5268. if (pd->m->m_len < sizeof(struct ip6_hdr)) {
  5269. pd->csum_status = PF_CSUM_BAD;
  5270. return (1);
  5271. }
  5272. sum = in6_cksum(pd->m, p, off, len);
  5273. break;
  5274. #endif /* INET6 */
  5275. default:
  5276. unhandled_af(af);
  5277. }
  5278. if (sum) {
  5279. switch (p) {
  5280. case IPPROTO_TCP:
  5281. tcpstat.tcps_rcvbadsum++;
  5282. break;
  5283. case IPPROTO_UDP:
  5284. udpstat.udps_badsum++;
  5285. break;
  5286. case IPPROTO_ICMP:
  5287. icmpstat.icps_checksum++;
  5288. break;
  5289. #ifdef INET6
  5290. case IPPROTO_ICMPV6:
  5291. icmp6stat.icp6s_checksum++;
  5292. break;
  5293. #endif /* INET6 */
  5294. }
  5295. pd->m->m_pkthdr.csum_flags |= flag_bad;
  5296. pd->csum_status = PF_CSUM_BAD;
  5297. return (1);
  5298. }
  5299. pd->m->m_pkthdr.csum_flags |= flag_ok;
  5300. pd->csum_status = PF_CSUM_OK;
  5301. return (0);
  5302. }
  5303. struct pf_divert *
  5304. pf_find_divert(struct mbuf *m)
  5305. {
  5306. struct m_tag *mtag;
  5307. if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL)
  5308. return (NULL);
  5309. return ((struct pf_divert *)(mtag + 1));
  5310. }
  5311. struct pf_divert *
  5312. pf_get_divert(struct mbuf *m)
  5313. {
  5314. struct m_tag *mtag;
  5315. if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) {
  5316. mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert),
  5317. M_NOWAIT);
  5318. if (mtag == NULL)
  5319. return (NULL);
  5320. bzero(mtag + 1, sizeof(struct pf_divert));
  5321. m_tag_prepend(m, mtag);
  5322. }
  5323. return ((struct pf_divert *)(mtag + 1));
  5324. }
  5325. #ifdef INET6
  5326. int
  5327. pf_walk_option6(struct pf_pdesc *pd, struct ip6_hdr *h, int off, int end,
  5328. u_short *reason)
  5329. {
  5330. struct ip6_opt opt;
  5331. struct ip6_opt_jumbo jumbo;
  5332. while (off < end) {
  5333. if (!pf_pull_hdr(pd->m, off, &opt.ip6o_type,
  5334. sizeof(opt.ip6o_type), NULL, reason, AF_INET6)) {
  5335. DPFPRINTF(LOG_NOTICE, "IPv6 short opt type");
  5336. return (PF_DROP);
  5337. }
  5338. if (opt.ip6o_type == IP6OPT_PAD1) {
  5339. off++;
  5340. continue;
  5341. }
  5342. if (!pf_pull_hdr(pd->m, off, &opt, sizeof(opt),
  5343. NULL, reason, AF_INET6)) {
  5344. DPFPRINTF(LOG_NOTICE, "IPv6 short opt");
  5345. return (PF_DROP);
  5346. }
  5347. if (off + sizeof(opt) + opt.ip6o_len > end) {
  5348. DPFPRINTF(LOG_NOTICE, "IPv6 long opt");
  5349. REASON_SET(reason, PFRES_IPOPTIONS);
  5350. return (PF_DROP);
  5351. }
  5352. switch (opt.ip6o_type) {
  5353. case IP6OPT_JUMBO:
  5354. if (pd->jumbolen != 0) {
  5355. DPFPRINTF(LOG_NOTICE, "IPv6 multiple jumbo");
  5356. REASON_SET(reason, PFRES_IPOPTIONS);
  5357. return (PF_DROP);
  5358. }
  5359. if (ntohs(h->ip6_plen) != 0) {
  5360. DPFPRINTF(LOG_NOTICE, "IPv6 bad jumbo plen");
  5361. REASON_SET(reason, PFRES_IPOPTIONS);
  5362. return (PF_DROP);
  5363. }
  5364. if (!pf_pull_hdr(pd->m, off, &jumbo, sizeof(jumbo),
  5365. NULL, reason, AF_INET6)) {
  5366. DPFPRINTF(LOG_NOTICE, "IPv6 short jumbo");
  5367. return (PF_DROP);
  5368. }
  5369. memcpy(&pd->jumbolen, jumbo.ip6oj_jumbo_len,
  5370. sizeof(pd->jumbolen));
  5371. pd->jumbolen = ntohl(pd->jumbolen);
  5372. if (pd->jumbolen < IPV6_MAXPACKET) {
  5373. DPFPRINTF(LOG_NOTICE, "IPv6 short jumbolen");
  5374. REASON_SET(reason, PFRES_IPOPTIONS);
  5375. return (PF_DROP);
  5376. }
  5377. break;
  5378. default:
  5379. break;
  5380. }
  5381. off += sizeof(opt) + opt.ip6o_len;
  5382. }
  5383. return (PF_PASS);
  5384. }
  5385. int
  5386. pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason)
  5387. {
  5388. struct ip6_frag frag;
  5389. struct ip6_ext ext;
  5390. struct ip6_rthdr rthdr;
  5391. u_int32_t end;
  5392. int fraghdr_cnt = 0, rthdr_cnt = 0;
  5393. pd->off += sizeof(struct ip6_hdr);
  5394. end = pd->off + ntohs(h->ip6_plen);
  5395. pd->fragoff = pd->extoff = pd->jumbolen = 0;
  5396. pd->proto = h->ip6_nxt;
  5397. for (;;) {
  5398. switch (pd->proto) {
  5399. case IPPROTO_FRAGMENT:
  5400. if (fraghdr_cnt++) {
  5401. DPFPRINTF(LOG_NOTICE, "IPv6 multiple fragment");
  5402. REASON_SET(reason, PFRES_FRAG);
  5403. return (PF_DROP);
  5404. }
  5405. /* jumbo payload packets cannot be fragmented */
  5406. if (pd->jumbolen != 0) {
  5407. DPFPRINTF(LOG_NOTICE, "IPv6 fragmented jumbo");
  5408. REASON_SET(reason, PFRES_FRAG);
  5409. return (PF_DROP);
  5410. }
  5411. if (!pf_pull_hdr(pd->m, pd->off, &frag, sizeof(frag),
  5412. NULL, reason, AF_INET6)) {
  5413. DPFPRINTF(LOG_NOTICE, "IPv6 short fragment");
  5414. return (PF_DROP);
  5415. }
  5416. /* stop walking over non initial fragments */
  5417. if (ntohs((frag.ip6f_offlg & IP6F_OFF_MASK)) != 0) {
  5418. pd->fragoff = pd->off;
  5419. return (PF_PASS);
  5420. }
  5421. /* RFC6946: reassemble only non atomic fragments */
  5422. if (frag.ip6f_offlg & IP6F_MORE_FRAG)
  5423. pd->fragoff = pd->off;
  5424. pd->off += sizeof(frag);
  5425. pd->proto = frag.ip6f_nxt;
  5426. break;
  5427. case IPPROTO_ROUTING:
  5428. if (rthdr_cnt++) {
  5429. DPFPRINTF(LOG_NOTICE, "IPv6 multiple rthdr");
  5430. REASON_SET(reason, PFRES_IPOPTIONS);
  5431. return (PF_DROP);
  5432. }
  5433. /* fragments may be short */
  5434. if (pd->fragoff != 0 && end < pd->off + sizeof(rthdr)) {
  5435. pd->off = pd->fragoff;
  5436. pd->proto = IPPROTO_FRAGMENT;
  5437. return (PF_PASS);
  5438. }
  5439. if (!pf_pull_hdr(pd->m, pd->off, &rthdr, sizeof(rthdr),
  5440. NULL, reason, AF_INET6)) {
  5441. DPFPRINTF(LOG_NOTICE, "IPv6 short rthdr");
  5442. return (PF_DROP);
  5443. }
  5444. if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
  5445. DPFPRINTF(LOG_NOTICE, "IPv6 rthdr0");
  5446. REASON_SET(reason, PFRES_IPOPTIONS);
  5447. return (PF_DROP);
  5448. }
  5449. /* FALLTHROUGH */
  5450. case IPPROTO_AH:
  5451. case IPPROTO_HOPOPTS:
  5452. case IPPROTO_DSTOPTS:
  5453. /* fragments may be short */
  5454. if (pd->fragoff != 0 && end < pd->off + sizeof(ext)) {
  5455. pd->off = pd->fragoff;
  5456. pd->proto = IPPROTO_FRAGMENT;
  5457. return (PF_PASS);
  5458. }
  5459. if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext),
  5460. NULL, reason, AF_INET6)) {
  5461. DPFPRINTF(LOG_NOTICE, "IPv6 short exthdr");
  5462. return (PF_DROP);
  5463. }
  5464. /* reassembly needs the ext header before the frag */
  5465. if (pd->fragoff == 0)
  5466. pd->extoff = pd->off;
  5467. if (pd->proto == IPPROTO_HOPOPTS && pd->fragoff == 0) {
  5468. if (pf_walk_option6(pd, h,
  5469. pd->off + sizeof(ext),
  5470. pd->off + (ext.ip6e_len + 1) * 8, reason)
  5471. != PF_PASS)
  5472. return (PF_DROP);
  5473. if (ntohs(h->ip6_plen) == 0 &&
  5474. pd->jumbolen != 0) {
  5475. DPFPRINTF(LOG_NOTICE,
  5476. "IPv6 missing jumbo");
  5477. REASON_SET(reason, PFRES_IPOPTIONS);
  5478. return (PF_DROP);
  5479. }
  5480. }
  5481. if (pd->proto == IPPROTO_AH)
  5482. pd->off += (ext.ip6e_len + 2) * 4;
  5483. else
  5484. pd->off += (ext.ip6e_len + 1) * 8;
  5485. pd->proto = ext.ip6e_nxt;
  5486. break;
  5487. case IPPROTO_TCP:
  5488. case IPPROTO_UDP:
  5489. case IPPROTO_ICMPV6:
  5490. /* fragments may be short, ignore inner header then */
  5491. if (pd->fragoff != 0 && end < pd->off +
  5492. (pd->proto == IPPROTO_TCP ? sizeof(struct tcphdr) :
  5493. pd->proto == IPPROTO_UDP ? sizeof(struct udphdr) :
  5494. sizeof(struct icmp6_hdr))) {
  5495. pd->off = pd->fragoff;
  5496. pd->proto = IPPROTO_FRAGMENT;
  5497. }
  5498. /* FALLTHROUGH */
  5499. default:
  5500. return (PF_PASS);
  5501. }
  5502. }
  5503. }
  5504. #endif /* INET6 */
  5505. int
  5506. pf_setup_pdesc(struct pf_pdesc *pd, void *pdhdrs, sa_family_t af, int dir,
  5507. struct pfi_kif *kif, struct mbuf *m, u_short *reason)
  5508. {
  5509. bzero(pd, sizeof(*pd));
  5510. pd->hdr.any = pdhdrs;
  5511. pd->dir = dir;
  5512. pd->kif = kif; /* kif is NULL when called by pflog */
  5513. pd->m = m;
  5514. pd->sidx = (dir == PF_IN) ? 0 : 1;
  5515. pd->didx = (dir == PF_IN) ? 1 : 0;
  5516. pd->af = pd->naf = af;
  5517. pd->rdomain = rtable_l2(pd->m->m_pkthdr.ph_rtableid);
  5518. switch (pd->af) {
  5519. case AF_INET: {
  5520. struct ip *h;
  5521. /* Check for illegal packets */
  5522. if (pd->m->m_pkthdr.len < (int)sizeof(struct ip)) {
  5523. REASON_SET(reason, PFRES_SHORT);
  5524. return (PF_DROP);
  5525. }
  5526. h = mtod(pd->m, struct ip *);
  5527. pd->off = h->ip_hl << 2;
  5528. if (pd->off < sizeof(struct ip) ||
  5529. pd->off > ntohs(h->ip_len) ||
  5530. pd->m->m_pkthdr.len < ntohs(h->ip_len)) {
  5531. REASON_SET(reason, PFRES_SHORT);
  5532. return (PF_DROP);
  5533. }
  5534. pd->src = (struct pf_addr *)&h->ip_src;
  5535. pd->dst = (struct pf_addr *)&h->ip_dst;
  5536. pd->virtual_proto = pd->proto = h->ip_p;
  5537. pd->tot_len = ntohs(h->ip_len);
  5538. pd->tos = h->ip_tos & ~IPTOS_ECN_MASK;
  5539. pd->ttl = h->ip_ttl;
  5540. if (h->ip_hl > 5) /* has options */
  5541. pd->badopts++;
  5542. if (h->ip_off & htons(IP_MF | IP_OFFMASK))
  5543. pd->virtual_proto = PF_VPROTO_FRAGMENT;
  5544. break;
  5545. }
  5546. #ifdef INET6
  5547. case AF_INET6: {
  5548. struct ip6_hdr *h;
  5549. /* Check for illegal packets */
  5550. if (pd->m->m_pkthdr.len < (int)sizeof(struct ip6_hdr)) {
  5551. REASON_SET(reason, PFRES_SHORT);
  5552. return (PF_DROP);
  5553. }
  5554. h = mtod(pd->m, struct ip6_hdr *);
  5555. pd->off = 0;
  5556. if (pd->m->m_pkthdr.len <
  5557. sizeof(struct ip6_hdr) + ntohs(h->ip6_plen)) {
  5558. REASON_SET(reason, PFRES_SHORT);
  5559. return (PF_DROP);
  5560. }
  5561. if (pf_walk_header6(pd, h, reason) != PF_PASS)
  5562. return (PF_DROP);
  5563. #if 1
  5564. /*
  5565. * we do not support jumbogram yet. if we keep going, zero
  5566. * ip6_plen will do something bad, so drop the packet for now.
  5567. */
  5568. if (pd->jumbolen != 0) {
  5569. REASON_SET(reason, PFRES_NORM);
  5570. return (PF_DROP);
  5571. }
  5572. #endif /* 1 */
  5573. pd->src = (struct pf_addr *)&h->ip6_src;
  5574. pd->dst = (struct pf_addr *)&h->ip6_dst;
  5575. pd->virtual_proto = pd->proto;
  5576. pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
  5577. pd->tos = (ntohl(h->ip6_flow) & 0x0fc00000) >> 20;
  5578. pd->ttl = h->ip6_hlim;
  5579. if (pd->fragoff != 0)
  5580. pd->virtual_proto = PF_VPROTO_FRAGMENT;
  5581. break;
  5582. }
  5583. #endif /* INET6 */
  5584. default:
  5585. panic("pf_setup_pdesc called with illegal af %u", pd->af);
  5586. }
  5587. PF_ACPY(&pd->nsaddr, pd->src, pd->af);
  5588. PF_ACPY(&pd->ndaddr, pd->dst, pd->af);
  5589. switch (pd->virtual_proto) {
  5590. case IPPROTO_TCP: {
  5591. struct tcphdr *th = pd->hdr.tcp;
  5592. if (!pf_pull_hdr(pd->m, pd->off, th, sizeof(*th),
  5593. NULL, reason, pd->af))
  5594. return (PF_DROP);
  5595. pd->hdrlen = sizeof(*th);
  5596. if (pd->off + (th->th_off << 2) > pd->tot_len ||
  5597. (th->th_off << 2) < sizeof(struct tcphdr)) {
  5598. REASON_SET(reason, PFRES_SHORT);
  5599. return (PF_DROP);
  5600. }
  5601. pd->p_len = pd->tot_len - pd->off - (th->th_off << 2);
  5602. pd->sport = &th->th_sport;
  5603. pd->dport = &th->th_dport;
  5604. pd->pcksum = &th->th_sum;
  5605. break;
  5606. }
  5607. case IPPROTO_UDP: {
  5608. struct udphdr *uh = pd->hdr.udp;
  5609. if (!pf_pull_hdr(pd->m, pd->off, uh, sizeof(*uh),
  5610. NULL, reason, pd->af))
  5611. return (PF_DROP);
  5612. pd->hdrlen = sizeof(*uh);
  5613. if (uh->uh_dport == 0 ||
  5614. pd->off + ntohs(uh->uh_ulen) > pd->tot_len ||
  5615. ntohs(uh->uh_ulen) < sizeof(struct udphdr)) {
  5616. REASON_SET(reason, PFRES_SHORT);
  5617. return (PF_DROP);
  5618. }
  5619. pd->sport = &uh->uh_sport;
  5620. pd->dport = &uh->uh_dport;
  5621. pd->pcksum = &uh->uh_sum;
  5622. break;
  5623. }
  5624. case IPPROTO_ICMP: {
  5625. if (!pf_pull_hdr(pd->m, pd->off, pd->hdr.icmp, ICMP_MINLEN,
  5626. NULL, reason, pd->af))
  5627. return (PF_DROP);
  5628. pd->hdrlen = ICMP_MINLEN;
  5629. if (pd->off + pd->hdrlen > pd->tot_len) {
  5630. REASON_SET(reason, PFRES_SHORT);
  5631. return (PF_DROP);
  5632. }
  5633. pd->pcksum = &pd->hdr.icmp->icmp_cksum;
  5634. break;
  5635. }
  5636. #ifdef INET6
  5637. case IPPROTO_ICMPV6: {
  5638. size_t icmp_hlen = sizeof(struct icmp6_hdr);
  5639. if (!pf_pull_hdr(pd->m, pd->off, pd->hdr.icmp6, icmp_hlen,
  5640. NULL, reason, pd->af))
  5641. return (PF_DROP);
  5642. /* ICMP headers we look further into to match state */
  5643. switch (pd->hdr.icmp6->icmp6_type) {
  5644. case MLD_LISTENER_QUERY:
  5645. case MLD_LISTENER_REPORT:
  5646. icmp_hlen = sizeof(struct mld_hdr);
  5647. break;
  5648. case ND_NEIGHBOR_SOLICIT:
  5649. case ND_NEIGHBOR_ADVERT:
  5650. icmp_hlen = sizeof(struct nd_neighbor_solicit);
  5651. break;
  5652. }
  5653. if (icmp_hlen > sizeof(struct icmp6_hdr) &&
  5654. !pf_pull_hdr(pd->m, pd->off, pd->hdr.icmp6, icmp_hlen,
  5655. NULL, reason, pd->af))
  5656. return (PF_DROP);
  5657. pd->hdrlen = icmp_hlen;
  5658. if (pd->off + pd->hdrlen > pd->tot_len) {
  5659. REASON_SET(reason, PFRES_SHORT);
  5660. return (PF_DROP);
  5661. }
  5662. break;
  5663. }
  5664. #endif /* INET6 */
  5665. }
  5666. if (pd->sport)
  5667. pd->osport = pd->nsport = *pd->sport;
  5668. if (pd->dport)
  5669. pd->odport = pd->ndport = *pd->dport;
  5670. return (PF_PASS);
  5671. }
  5672. void
  5673. pf_counters_inc(int action, struct pf_pdesc *pd, struct pf_state *s,
  5674. struct pf_rule *r, struct pf_rule *a)
  5675. {
  5676. int dirndx;
  5677. pd->kif->pfik_bytes[pd->af == AF_INET6][pd->dir == PF_OUT]
  5678. [action != PF_PASS] += pd->tot_len;
  5679. pd->kif->pfik_packets[pd->af == AF_INET6][pd->dir == PF_OUT]
  5680. [action != PF_PASS]++;
  5681. if (action == PF_PASS || action == PF_AFRT || r->action == PF_DROP) {
  5682. dirndx = (pd->dir == PF_OUT);
  5683. r->packets[dirndx]++;
  5684. r->bytes[dirndx] += pd->tot_len;
  5685. if (a != NULL) {
  5686. a->packets[dirndx]++;
  5687. a->bytes[dirndx] += pd->tot_len;
  5688. }
  5689. if (s != NULL) {
  5690. struct pf_rule_item *ri;
  5691. struct pf_sn_item *sni;
  5692. SLIST_FOREACH(sni, &s->src_nodes, next) {
  5693. sni->sn->packets[dirndx]++;
  5694. sni->sn->bytes[dirndx] += pd->tot_len;
  5695. }
  5696. dirndx = (pd->dir == s->direction) ? 0 : 1;
  5697. s->packets[dirndx]++;
  5698. s->bytes[dirndx] += pd->tot_len;
  5699. SLIST_FOREACH(ri, &s->match_rules, entry) {
  5700. ri->r->packets[dirndx]++;
  5701. ri->r->bytes[dirndx] += pd->tot_len;
  5702. if (ri->r->src.addr.type == PF_ADDR_TABLE)
  5703. pfr_update_stats(ri->r->src.addr.p.tbl,
  5704. &s->key[(s->direction == PF_IN)]->
  5705. addr[(s->direction == PF_OUT)],
  5706. pd, ri->r->action, ri->r->src.neg);
  5707. if (ri->r->dst.addr.type == PF_ADDR_TABLE)
  5708. pfr_update_stats(ri->r->dst.addr.p.tbl,
  5709. &s->key[(s->direction == PF_IN)]->
  5710. addr[(s->direction == PF_IN)],
  5711. pd, ri->r->action, ri->r->dst.neg);
  5712. }
  5713. }
  5714. if (r->src.addr.type == PF_ADDR_TABLE)
  5715. pfr_update_stats(r->src.addr.p.tbl,
  5716. (s == NULL) ? pd->src :
  5717. &s->key[(s->direction == PF_IN)]->
  5718. addr[(s->direction == PF_OUT)],
  5719. pd, r->action, r->src.neg);
  5720. if (r->dst.addr.type == PF_ADDR_TABLE)
  5721. pfr_update_stats(r->dst.addr.p.tbl,
  5722. (s == NULL) ? pd->dst :
  5723. &s->key[(s->direction == PF_IN)]->
  5724. addr[(s->direction == PF_IN)],
  5725. pd, r->action, r->dst.neg);
  5726. }
  5727. }
  5728. int
  5729. pf_test(sa_family_t af, int fwdir, struct ifnet *ifp, struct mbuf **m0)
  5730. {
  5731. struct pfi_kif *kif;
  5732. u_short action, reason = 0;
  5733. struct pf_rule *a = NULL, *r = &pf_default_rule;
  5734. struct pf_state *s = NULL;
  5735. struct pf_ruleset *ruleset = NULL;
  5736. struct pf_pdesc pd;
  5737. union pf_headers pdhdrs;
  5738. int dir = (fwdir == PF_FWD) ? PF_OUT : fwdir;
  5739. u_int32_t qid, pqid = 0;
  5740. if (!pf_status.running)
  5741. return (PF_PASS);
  5742. if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
  5743. kif = (struct pfi_kif *)ifp->if_carpdev->if_pf_kif;
  5744. else
  5745. kif = (struct pfi_kif *)ifp->if_pf_kif;
  5746. if (kif == NULL) {
  5747. DPFPRINTF(LOG_ERR,
  5748. "pf_test: kif == NULL, if_xname %s", ifp->if_xname);
  5749. return (PF_DROP);
  5750. }
  5751. if (kif->pfik_flags & PFI_IFLAG_SKIP)
  5752. return (PF_PASS);
  5753. #ifdef DIAGNOSTIC
  5754. if (((*m0)->m_flags & M_PKTHDR) == 0)
  5755. panic("non-M_PKTHDR is passed to pf_test");
  5756. #endif /* DIAGNOSTIC */
  5757. if ((*m0)->m_pkthdr.pf.flags & PF_TAG_GENERATED)
  5758. return (PF_PASS);
  5759. if ((*m0)->m_pkthdr.pf.flags & PF_TAG_DIVERTED_PACKET)
  5760. return (PF_PASS);
  5761. if ((*m0)->m_pkthdr.pf.flags & PF_TAG_REFRAGMENTED) {
  5762. (*m0)->m_pkthdr.pf.flags &= ~PF_TAG_REFRAGMENTED;
  5763. return (PF_PASS);
  5764. }
  5765. action = pf_setup_pdesc(&pd, &pdhdrs, af, dir, kif, *m0, &reason);
  5766. if (action != PF_PASS) {
  5767. #if NPFLOG > 0
  5768. pd.pflog |= PF_LOG_FORCE;
  5769. #endif /* NPFLOG > 0 */
  5770. goto done;
  5771. }
  5772. /* packet normalization and reassembly */
  5773. switch (pd.af) {
  5774. case AF_INET:
  5775. action = pf_normalize_ip(&pd, &reason);
  5776. break;
  5777. #ifdef INET6
  5778. case AF_INET6:
  5779. action = pf_normalize_ip6(&pd, &reason);
  5780. break;
  5781. #endif /* INET6 */
  5782. }
  5783. *m0 = pd.m;
  5784. /* if packet sits in reassembly queue, return without error */
  5785. if (pd.m == NULL)
  5786. return PF_PASS;
  5787. if (action != PF_PASS) {
  5788. #if NPFLOG > 0
  5789. pd.pflog |= PF_LOG_FORCE;
  5790. #endif /* NPFLOG > 0 */
  5791. goto done;
  5792. }
  5793. /* if packet has been reassembled, update packet description */
  5794. if (pf_status.reass && pd.virtual_proto == PF_VPROTO_FRAGMENT) {
  5795. action = pf_setup_pdesc(&pd, &pdhdrs, af, dir, kif, *m0,
  5796. &reason);
  5797. if (action != PF_PASS) {
  5798. #if NPFLOG > 0
  5799. pd.pflog |= PF_LOG_FORCE;
  5800. #endif /* NPFLOG > 0 */
  5801. goto done;
  5802. }
  5803. }
  5804. pd.m->m_pkthdr.pf.flags |= PF_TAG_PROCESSED;
  5805. switch (pd.virtual_proto) {
  5806. case PF_VPROTO_FRAGMENT: {
  5807. /*
  5808. * handle fragments that aren't reassembled by
  5809. * normalization
  5810. */
  5811. action = pf_test_rule(&pd, &r, &s, &a, &ruleset);
  5812. if (action != PF_PASS)
  5813. REASON_SET(&reason, PFRES_FRAG);
  5814. break;
  5815. }
  5816. case IPPROTO_ICMP: {
  5817. if (pd.af != AF_INET) {
  5818. action = PF_DROP;
  5819. REASON_SET(&reason, PFRES_NORM);
  5820. DPFPRINTF(LOG_NOTICE,
  5821. "dropping IPv6 packet with ICMPv4 payload");
  5822. goto done;
  5823. }
  5824. action = pf_test_state_icmp(&pd, &s, &reason);
  5825. if (action == PF_PASS || action == PF_AFRT) {
  5826. #if NPFSYNC > 0
  5827. pfsync_update_state(s);
  5828. #endif /* NPFSYNC > 0 */
  5829. r = s->rule.ptr;
  5830. a = s->anchor.ptr;
  5831. #if NPFLOG > 0
  5832. pd.pflog |= s->log;
  5833. #endif /* NPFLOG > 0 */
  5834. } else if (s == NULL)
  5835. action = pf_test_rule(&pd, &r, &s, &a, &ruleset);
  5836. break;
  5837. }
  5838. #ifdef INET6
  5839. case IPPROTO_ICMPV6: {
  5840. if (pd.af != AF_INET6) {
  5841. action = PF_DROP;
  5842. REASON_SET(&reason, PFRES_NORM);
  5843. DPFPRINTF(LOG_NOTICE,
  5844. "dropping IPv4 packet with ICMPv6 payload");
  5845. goto done;
  5846. }
  5847. action = pf_test_state_icmp(&pd, &s, &reason);
  5848. if (action == PF_PASS || action == PF_AFRT) {
  5849. #if NPFSYNC > 0
  5850. pfsync_update_state(s);
  5851. #endif /* NPFSYNC > 0 */
  5852. r = s->rule.ptr;
  5853. a = s->anchor.ptr;
  5854. #if NPFLOG > 0
  5855. pd.pflog |= s->log;
  5856. #endif /* NPFLOG > 0 */
  5857. } else if (s == NULL)
  5858. action = pf_test_rule(&pd, &r, &s, &a, &ruleset);
  5859. break;
  5860. }
  5861. #endif /* INET6 */
  5862. default:
  5863. if (pd.virtual_proto == IPPROTO_TCP) {
  5864. if ((pd.hdr.tcp->th_flags & TH_ACK) && pd.p_len == 0)
  5865. pqid = 1;
  5866. action = pf_normalize_tcp(&pd);
  5867. if (action == PF_DROP)
  5868. goto done;
  5869. }
  5870. action = pf_test_state(&pd, &s, &reason);
  5871. if (action == PF_PASS || action == PF_AFRT) {
  5872. #if NPFSYNC > 0
  5873. pfsync_update_state(s);
  5874. #endif /* NPFSYNC > 0 */
  5875. r = s->rule.ptr;
  5876. a = s->anchor.ptr;
  5877. #if NPFLOG > 0
  5878. pd.pflog |= s->log;
  5879. #endif /* NPFLOG > 0 */
  5880. } else if (s == NULL)
  5881. action = pf_test_rule(&pd, &r, &s, &a, &ruleset);
  5882. if (pd.virtual_proto == IPPROTO_TCP) {
  5883. if (s) {
  5884. if (s->max_mss)
  5885. pf_normalize_mss(&pd, s->max_mss);
  5886. } else if (r->max_mss)
  5887. pf_normalize_mss(&pd, r->max_mss);
  5888. }
  5889. break;
  5890. }
  5891. done:
  5892. if (action != PF_DROP) {
  5893. if (s) {
  5894. /* The non-state case is handled in pf_test_rule() */
  5895. if (action == PF_PASS && pd.badopts &&
  5896. !(s->state_flags & PFSTATE_ALLOWOPTS)) {
  5897. action = PF_DROP;
  5898. REASON_SET(&reason, PFRES_IPOPTIONS);
  5899. #if NPFLOG > 0
  5900. pd.pflog |= PF_LOG_FORCE;
  5901. #endif /* NPFLOG > 0 */
  5902. DPFPRINTF(LOG_NOTICE, "dropping packet with "
  5903. "ip/ipv6 options in pf_test()");
  5904. }
  5905. pf_scrub(pd.m, s->state_flags, pd.af, s->min_ttl,
  5906. s->set_tos);
  5907. pf_tag_packet(pd.m, s->tag, s->rtableid[pd.didx]);
  5908. if (pqid || (pd.tos & IPTOS_LOWDELAY)) {
  5909. qid = s->pqid;
  5910. if (s->state_flags & PFSTATE_SETPRIO)
  5911. pd.m->m_pkthdr.pf.prio = s->set_prio[1];
  5912. } else {
  5913. qid = s->qid;
  5914. if (s->state_flags & PFSTATE_SETPRIO)
  5915. pd.m->m_pkthdr.pf.prio = s->set_prio[0];
  5916. }
  5917. } else {
  5918. pf_scrub(pd.m, r->scrub_flags, pd.af, r->min_ttl,
  5919. r->set_tos);
  5920. if (pqid || (pd.tos & IPTOS_LOWDELAY)) {
  5921. qid = r->pqid;
  5922. if (r->scrub_flags & PFSTATE_SETPRIO)
  5923. pd.m->m_pkthdr.pf.prio = r->set_prio[1];
  5924. } else {
  5925. qid = r->qid;
  5926. if (r->scrub_flags & PFSTATE_SETPRIO)
  5927. pd.m->m_pkthdr.pf.prio = r->set_prio[0];
  5928. }
  5929. }
  5930. }
  5931. if (action == PF_PASS && qid)
  5932. pd.m->m_pkthdr.pf.qid = qid;
  5933. if (pd.dir == PF_IN && s && s->key[PF_SK_STACK])
  5934. pd.m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
  5935. if (pd.dir == PF_OUT &&
  5936. pd.m->m_pkthdr.pf.inp && !pd.m->m_pkthdr.pf.inp->inp_pf_sk &&
  5937. s && s->key[PF_SK_STACK] && !s->key[PF_SK_STACK]->inp) {
  5938. pd.m->m_pkthdr.pf.inp->inp_pf_sk = s->key[PF_SK_STACK];
  5939. s->key[PF_SK_STACK]->inp = pd.m->m_pkthdr.pf.inp;
  5940. }
  5941. /*
  5942. * connections redirected to loopback should not match sockets
  5943. * bound specifically to loopback due to security implications,
  5944. * see tcp_input() and in_pcblookup_listen().
  5945. */
  5946. if (pd.destchg)
  5947. if ((pd.af == AF_INET && (ntohl(pd.dst->v4.s_addr) >>
  5948. IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) ||
  5949. (pd.af == AF_INET6 && IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)))
  5950. pd.m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
  5951. /* We need to redo the route lookup on outgoing routes. */
  5952. if (pd.destchg && pd.dir == PF_OUT)
  5953. pd.m->m_pkthdr.pf.flags |= PF_TAG_REROUTE;
  5954. if (pd.dir == PF_IN && action == PF_PASS && r->divert.port) {
  5955. struct pf_divert *divert;
  5956. if ((divert = pf_get_divert(pd.m))) {
  5957. pd.m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
  5958. divert->port = r->divert.port;
  5959. divert->rdomain = pd.rdomain;
  5960. divert->addr = r->divert.addr;
  5961. }
  5962. }
  5963. if (action == PF_PASS && r->divert_packet.port)
  5964. action = PF_DIVERT;
  5965. #if NPFLOG > 0
  5966. if (pd.pflog) {
  5967. struct pf_rule_item *ri;
  5968. if (pd.pflog & PF_LOG_FORCE || r->log & PF_LOG_ALL)
  5969. PFLOG_PACKET(&pd, reason, r, a, ruleset, NULL);
  5970. if (s) {
  5971. SLIST_FOREACH(ri, &s->match_rules, entry)
  5972. if (ri->r->log & PF_LOG_ALL)
  5973. PFLOG_PACKET(&pd, reason, ri->r, a,
  5974. ruleset, NULL);
  5975. }
  5976. }
  5977. #endif /* NPFLOG > 0 */
  5978. pf_counters_inc(action, &pd, s, r, a);
  5979. switch (action) {
  5980. case PF_SYNPROXY_DROP:
  5981. m_freem(*m0);
  5982. case PF_DEFER:
  5983. *m0 = NULL;
  5984. action = PF_PASS;
  5985. break;
  5986. case PF_DIVERT:
  5987. switch (pd.af) {
  5988. case AF_INET:
  5989. if (!divert_packet(pd.m, pd.dir, r->divert_packet.port))
  5990. *m0 = NULL;
  5991. break;
  5992. #ifdef INET6
  5993. case AF_INET6:
  5994. if (!divert6_packet(pd.m, pd.dir,
  5995. r->divert_packet.port))
  5996. *m0 = NULL;
  5997. break;
  5998. #endif /* INET6 */
  5999. }
  6000. action = PF_PASS;
  6001. break;
  6002. #ifdef INET6
  6003. case PF_AFRT:
  6004. if (pf_translate_af(&pd)) {
  6005. if (!pd.m)
  6006. *m0 = NULL;
  6007. action = PF_DROP;
  6008. break;
  6009. }
  6010. if (pd.naf == AF_INET)
  6011. pf_route(&pd.m, r, dir, kif->pfik_ifp, s);
  6012. if (pd.naf == AF_INET6)
  6013. pf_route6(&pd.m, r, dir, kif->pfik_ifp, s);
  6014. *m0 = NULL;
  6015. action = PF_PASS;
  6016. break;
  6017. #endif /* INET6 */
  6018. default:
  6019. /* pf_route can free the mbuf causing *m0 to become NULL */
  6020. if (r->rt) {
  6021. switch (pd.af) {
  6022. case AF_INET:
  6023. pf_route(m0, r, pd.dir, pd.kif->pfik_ifp, s);
  6024. break;
  6025. #ifdef INET6
  6026. case AF_INET6:
  6027. pf_route6(m0, r, pd.dir, pd.kif->pfik_ifp, s);
  6028. break;
  6029. #endif /* INET6 */
  6030. }
  6031. }
  6032. break;
  6033. }
  6034. #ifdef INET6
  6035. /* if reassembled packet passed, create new fragments */
  6036. if (pf_status.reass && action == PF_PASS && *m0 && fwdir == PF_FWD) {
  6037. struct m_tag *mtag;
  6038. if ((mtag = m_tag_find(*m0, PACKET_TAG_PF_REASSEMBLED, NULL)))
  6039. action = pf_refragment6(m0, mtag);
  6040. }
  6041. #endif /* INET6 */
  6042. if (s && action != PF_DROP) {
  6043. if (!s->if_index_in && dir == PF_IN)
  6044. s->if_index_in = ifp->if_index;
  6045. else if (!s->if_index_out && dir == PF_OUT)
  6046. s->if_index_out = ifp->if_index;
  6047. }
  6048. return (action);
  6049. }
  6050. void
  6051. pf_cksum(struct pf_pdesc *pd, struct mbuf *m)
  6052. {
  6053. if (pd->csum_status != PF_CSUM_OK)
  6054. return; /* don't fix broken cksums */
  6055. switch (pd->proto) {
  6056. case IPPROTO_TCP:
  6057. pd->hdr.tcp->th_sum = 0;
  6058. m->m_pkthdr.csum_flags |= M_TCP_CSUM_OUT;
  6059. break;
  6060. case IPPROTO_UDP:
  6061. pd->hdr.udp->uh_sum = 0;
  6062. m->m_pkthdr.csum_flags |= M_UDP_CSUM_OUT;
  6063. break;
  6064. case IPPROTO_ICMP:
  6065. pd->hdr.icmp->icmp_cksum = 0;
  6066. m->m_pkthdr.csum_flags |= M_ICMP_CSUM_OUT;
  6067. break;
  6068. #ifdef INET6
  6069. case IPPROTO_ICMPV6:
  6070. pd->hdr.icmp6->icmp6_cksum = 0;
  6071. m->m_pkthdr.csum_flags |= M_ICMP_CSUM_OUT;
  6072. break;
  6073. #endif /* INET6 */
  6074. default:
  6075. /* nothing */
  6076. break;
  6077. }
  6078. }
  6079. /*
  6080. * must be called whenever any addressing information such as
  6081. * address, port, protocol has changed
  6082. */
  6083. void
  6084. pf_pkt_addr_changed(struct mbuf *m)
  6085. {
  6086. m->m_pkthdr.pf.statekey = NULL;
  6087. m->m_pkthdr.pf.inp = NULL;
  6088. }
  6089. #if NPFLOG > 0
  6090. void
  6091. pf_log_matches(struct pf_pdesc *pd, struct pf_rule *rm, struct pf_rule *am,
  6092. struct pf_ruleset *ruleset, struct pf_rule_slist *matchrules)
  6093. {
  6094. struct pf_rule_item *ri;
  6095. /* if this is the log(matches) rule, packet has been logged already */
  6096. if (rm->log & PF_LOG_MATCHES)
  6097. return;
  6098. SLIST_FOREACH(ri, matchrules, entry)
  6099. if (ri->r->log & PF_LOG_MATCHES)
  6100. PFLOG_PACKET(pd, PFRES_MATCH, rm, am, ruleset, ri->r);
  6101. }
  6102. #endif /* NPFLOG > 0 */