pf_table.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576
  1. /* $OpenBSD: pf_table.c,v 1.113 2015/07/20 18:42:08 jsg Exp $ */
  2. /*
  3. * Copyright (c) 2002 Cedric Berger
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. *
  10. * - Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * - Redistributions in binary form must reproduce the above
  13. * copyright notice, this list of conditions and the following
  14. * disclaimer in the documentation and/or other materials provided
  15. * with the distribution.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  20. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  21. * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  22. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  23. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  24. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  25. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  26. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  27. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  28. * POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include <sys/param.h>
  32. #include <sys/systm.h>
  33. #include <sys/socket.h>
  34. #include <sys/mbuf.h>
  35. #include <sys/pool.h>
  36. #include <sys/syslog.h>
  37. #include <net/if.h>
  38. #include <netinet/in.h>
  39. #include <netinet/ip_ipsp.h>
  40. #include <net/pfvar.h>
  41. #define ACCEPT_FLAGS(flags, oklist) \
  42. do { \
  43. if ((flags & ~(oklist)) & \
  44. PFR_FLAG_ALLMASK) \
  45. return (EINVAL); \
  46. } while (0)
  47. #define COPYIN(from, to, size, flags) \
  48. ((flags & PFR_FLAG_USERIOCTL) ? \
  49. copyin((from), (to), (size)) : \
  50. (bcopy((from), (to), (size)), 0))
  51. #define COPYOUT(from, to, size, flags) \
  52. ((flags & PFR_FLAG_USERIOCTL) ? \
  53. copyout((from), (to), (size)) : \
  54. (bcopy((from), (to), (size)), 0))
  55. #define YIELD(cnt, ok) \
  56. do { \
  57. if ((cnt % 1024 == 1023) && \
  58. (ok)) \
  59. yield(); \
  60. } while (0)
  61. #define FILLIN_SIN(sin, addr) \
  62. do { \
  63. (sin).sin_len = sizeof(sin); \
  64. (sin).sin_family = AF_INET; \
  65. (sin).sin_addr = (addr); \
  66. } while (0)
  67. #define FILLIN_SIN6(sin6, addr) \
  68. do { \
  69. (sin6).sin6_len = sizeof(sin6); \
  70. (sin6).sin6_family = AF_INET6; \
  71. (sin6).sin6_addr = (addr); \
  72. } while (0)
  73. #define SWAP(type, a1, a2) \
  74. do { \
  75. type tmp = a1; \
  76. a1 = a2; \
  77. a2 = tmp; \
  78. } while (0)
  79. #define SUNION2PF(su, af) (((af)==AF_INET) ? \
  80. (struct pf_addr *)&(su)->sin.sin_addr : \
  81. (struct pf_addr *)&(su)->sin6.sin6_addr)
  82. #define AF_BITS(af) (((af)==AF_INET)?32:128)
  83. #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
  84. #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
  85. #define NO_ADDRESSES (-1)
  86. #define ENQUEUE_UNMARKED_ONLY (1)
  87. #define INVERT_NEG_FLAG (1)
  88. struct pfr_walktree {
  89. enum pfrw_op {
  90. PFRW_MARK,
  91. PFRW_SWEEP,
  92. PFRW_ENQUEUE,
  93. PFRW_GET_ADDRS,
  94. PFRW_GET_ASTATS,
  95. PFRW_POOL_GET,
  96. PFRW_DYNADDR_UPDATE
  97. } pfrw_op;
  98. union {
  99. struct pfr_addr *pfrw1_addr;
  100. struct pfr_astats *pfrw1_astats;
  101. struct pfr_kentryworkq *pfrw1_workq;
  102. struct pfr_kentry *pfrw1_kentry;
  103. struct pfi_dynaddr *pfrw1_dyn;
  104. } pfrw_1;
  105. int pfrw_free;
  106. int pfrw_flags;
  107. };
  108. #define pfrw_addr pfrw_1.pfrw1_addr
  109. #define pfrw_astats pfrw_1.pfrw1_astats
  110. #define pfrw_workq pfrw_1.pfrw1_workq
  111. #define pfrw_kentry pfrw_1.pfrw1_kentry
  112. #define pfrw_dyn pfrw_1.pfrw1_dyn
  113. #define pfrw_cnt pfrw_free
  114. #define senderr(e) do { rv = (e); goto _bad; } while (0)
  115. struct pool pfr_ktable_pl;
  116. struct pool pfr_kentry_pl[PFRKE_MAX];
  117. struct pool pfr_kcounters_pl;
  118. struct sockaddr_in pfr_sin;
  119. #ifdef INET6
  120. struct sockaddr_in6 pfr_sin6;
  121. #endif /* INET6 */
  122. union sockaddr_union pfr_mask;
  123. struct pf_addr pfr_ffaddr;
  124. int pfr_gcd(int, int);
  125. void pfr_copyout_addr(struct pfr_addr *,
  126. struct pfr_kentry *ke);
  127. int pfr_validate_addr(struct pfr_addr *);
  128. void pfr_enqueue_addrs(struct pfr_ktable *,
  129. struct pfr_kentryworkq *, int *, int);
  130. void pfr_mark_addrs(struct pfr_ktable *);
  131. struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
  132. struct pfr_addr *, int);
  133. struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
  134. void pfr_destroy_kentries(struct pfr_kentryworkq *);
  135. void pfr_destroy_kentry(struct pfr_kentry *);
  136. void pfr_insert_kentries(struct pfr_ktable *,
  137. struct pfr_kentryworkq *, time_t);
  138. void pfr_remove_kentries(struct pfr_ktable *,
  139. struct pfr_kentryworkq *);
  140. void pfr_clstats_kentries(struct pfr_kentryworkq *, time_t,
  141. int);
  142. void pfr_reset_feedback(struct pfr_addr *, int, int);
  143. void pfr_prepare_network(union sockaddr_union *, int, int);
  144. int pfr_route_kentry(struct pfr_ktable *,
  145. struct pfr_kentry *);
  146. int pfr_unroute_kentry(struct pfr_ktable *,
  147. struct pfr_kentry *);
  148. int pfr_walktree(struct radix_node *, void *, u_int);
  149. int pfr_validate_table(struct pfr_table *, int, int);
  150. int pfr_fix_anchor(char *);
  151. void pfr_commit_ktable(struct pfr_ktable *, time_t);
  152. void pfr_insert_ktables(struct pfr_ktableworkq *);
  153. void pfr_insert_ktable(struct pfr_ktable *);
  154. void pfr_setflags_ktables(struct pfr_ktableworkq *);
  155. void pfr_setflags_ktable(struct pfr_ktable *, int);
  156. void pfr_clstats_ktables(struct pfr_ktableworkq *, time_t,
  157. int);
  158. void pfr_clstats_ktable(struct pfr_ktable *, time_t, int);
  159. struct pfr_ktable *pfr_create_ktable(struct pfr_table *, time_t, int,
  160. int);
  161. void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
  162. void pfr_destroy_ktable(struct pfr_ktable *, int);
  163. int pfr_ktable_compare(struct pfr_ktable *,
  164. struct pfr_ktable *);
  165. void pfr_ktable_winfo_update(struct pfr_ktable *,
  166. struct pfr_kentry *);
  167. struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
  168. void pfr_clean_node_mask(struct pfr_ktable *,
  169. struct pfr_kentryworkq *);
  170. int pfr_table_count(struct pfr_table *, int);
  171. int pfr_skip_table(struct pfr_table *,
  172. struct pfr_ktable *, int);
  173. struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
  174. int pfr_islinklocal(sa_family_t, struct pf_addr *);
  175. RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
  176. RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
  177. struct pfr_ktablehead pfr_ktables;
  178. struct pfr_table pfr_nulltable;
  179. int pfr_ktable_cnt;
  180. int
  181. pfr_gcd(int m, int n)
  182. {
  183. int t;
  184. while (m > 0) {
  185. t = n % m;
  186. n = m;
  187. m = t;
  188. }
  189. return (n);
  190. }
  191. void
  192. pfr_initialize(void)
  193. {
  194. pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
  195. "pfrktable", NULL);
  196. pool_init(&pfr_kentry_pl[PFRKE_PLAIN], sizeof(struct pfr_kentry),
  197. 0, 0, 0, "pfrke_plain", NULL);
  198. pool_init(&pfr_kentry_pl[PFRKE_ROUTE], sizeof(struct pfr_kentry_route),
  199. 0, 0, 0, "pfrke_route", NULL);
  200. pool_init(&pfr_kentry_pl[PFRKE_COST], sizeof(struct pfr_kentry_cost),
  201. 0, 0, 0, "pfrke_cost", NULL);
  202. pool_init(&pfr_kcounters_pl, sizeof(struct pfr_kcounters),
  203. 0, 0, 0, "pfrkcounters", NULL);
  204. pfr_sin.sin_len = sizeof(pfr_sin);
  205. pfr_sin.sin_family = AF_INET;
  206. #ifdef INET6
  207. pfr_sin6.sin6_len = sizeof(pfr_sin6);
  208. pfr_sin6.sin6_family = AF_INET6;
  209. #endif /* INET6 */
  210. memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
  211. }
  212. int
  213. pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
  214. {
  215. struct pfr_ktable *kt;
  216. struct pfr_kentryworkq workq;
  217. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
  218. if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
  219. return (EINVAL);
  220. kt = pfr_lookup_table(tbl);
  221. if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  222. return (ESRCH);
  223. if (kt->pfrkt_flags & PFR_TFLAG_CONST)
  224. return (EPERM);
  225. pfr_enqueue_addrs(kt, &workq, ndel, 0);
  226. if (!(flags & PFR_FLAG_DUMMY)) {
  227. pfr_remove_kentries(kt, &workq);
  228. if (kt->pfrkt_cnt) {
  229. DPFPRINTF(LOG_NOTICE,
  230. "pfr_clr_addrs: corruption detected (%d).",
  231. kt->pfrkt_cnt);
  232. kt->pfrkt_cnt = 0;
  233. }
  234. }
  235. return (0);
  236. }
  237. int
  238. pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
  239. int *nadd, int flags)
  240. {
  241. struct pfr_ktable *kt, *tmpkt;
  242. struct pfr_kentryworkq workq;
  243. struct pfr_kentry *p, *q;
  244. struct pfr_addr ad;
  245. int i, rv, xadd = 0;
  246. time_t tzero = time_second;
  247. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
  248. if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
  249. return (EINVAL);
  250. kt = pfr_lookup_table(tbl);
  251. if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  252. return (ESRCH);
  253. if (kt->pfrkt_flags & PFR_TFLAG_CONST)
  254. return (EPERM);
  255. tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
  256. !(flags & PFR_FLAG_USERIOCTL));
  257. if (tmpkt == NULL)
  258. return (ENOMEM);
  259. SLIST_INIT(&workq);
  260. for (i = 0; i < size; i++) {
  261. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  262. if (COPYIN(addr+i, &ad, sizeof(ad), flags))
  263. senderr(EFAULT);
  264. if (pfr_validate_addr(&ad))
  265. senderr(EINVAL);
  266. p = pfr_lookup_addr(kt, &ad, 1);
  267. q = pfr_lookup_addr(tmpkt, &ad, 1);
  268. if (flags & PFR_FLAG_FEEDBACK) {
  269. if (q != NULL)
  270. ad.pfra_fback = PFR_FB_DUPLICATE;
  271. else if (p == NULL)
  272. ad.pfra_fback = PFR_FB_ADDED;
  273. else if ((p->pfrke_flags & PFRKE_FLAG_NOT) !=
  274. ad.pfra_not)
  275. ad.pfra_fback = PFR_FB_CONFLICT;
  276. else
  277. ad.pfra_fback = PFR_FB_NONE;
  278. }
  279. if (p == NULL && q == NULL) {
  280. p = pfr_create_kentry(&ad);
  281. if (p == NULL)
  282. senderr(ENOMEM);
  283. if (pfr_route_kentry(tmpkt, p)) {
  284. pfr_destroy_kentry(p);
  285. ad.pfra_fback = PFR_FB_NONE;
  286. } else {
  287. SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
  288. xadd++;
  289. }
  290. }
  291. if (flags & PFR_FLAG_FEEDBACK)
  292. if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
  293. senderr(EFAULT);
  294. }
  295. pfr_clean_node_mask(tmpkt, &workq);
  296. if (!(flags & PFR_FLAG_DUMMY)) {
  297. pfr_insert_kentries(kt, &workq, tzero);
  298. } else
  299. pfr_destroy_kentries(&workq);
  300. if (nadd != NULL)
  301. *nadd = xadd;
  302. pfr_destroy_ktable(tmpkt, 0);
  303. return (0);
  304. _bad:
  305. pfr_clean_node_mask(tmpkt, &workq);
  306. pfr_destroy_kentries(&workq);
  307. if (flags & PFR_FLAG_FEEDBACK)
  308. pfr_reset_feedback(addr, size, flags);
  309. pfr_destroy_ktable(tmpkt, 0);
  310. return (rv);
  311. }
  312. int
  313. pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
  314. int *ndel, int flags)
  315. {
  316. struct pfr_ktable *kt;
  317. struct pfr_kentryworkq workq;
  318. struct pfr_kentry *p;
  319. struct pfr_addr ad;
  320. int i, rv, xdel = 0, log = 1;
  321. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
  322. if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
  323. return (EINVAL);
  324. kt = pfr_lookup_table(tbl);
  325. if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  326. return (ESRCH);
  327. if (kt->pfrkt_flags & PFR_TFLAG_CONST)
  328. return (EPERM);
  329. /*
  330. * there are two algorithms to choose from here.
  331. * with:
  332. * n: number of addresses to delete
  333. * N: number of addresses in the table
  334. *
  335. * one is O(N) and is better for large 'n'
  336. * one is O(n*LOG(N)) and is better for small 'n'
  337. *
  338. * following code try to decide which one is best.
  339. */
  340. for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
  341. log++;
  342. if (size > kt->pfrkt_cnt/log) {
  343. /* full table scan */
  344. pfr_mark_addrs(kt);
  345. } else {
  346. /* iterate over addresses to delete */
  347. for (i = 0; i < size; i++) {
  348. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  349. if (COPYIN(addr+i, &ad, sizeof(ad), flags))
  350. return (EFAULT);
  351. if (pfr_validate_addr(&ad))
  352. return (EINVAL);
  353. p = pfr_lookup_addr(kt, &ad, 1);
  354. if (p != NULL)
  355. p->pfrke_flags &= ~PFRKE_FLAG_MARK;
  356. }
  357. }
  358. SLIST_INIT(&workq);
  359. for (i = 0; i < size; i++) {
  360. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  361. if (COPYIN(addr+i, &ad, sizeof(ad), flags))
  362. senderr(EFAULT);
  363. if (pfr_validate_addr(&ad))
  364. senderr(EINVAL);
  365. p = pfr_lookup_addr(kt, &ad, 1);
  366. if (flags & PFR_FLAG_FEEDBACK) {
  367. if (p == NULL)
  368. ad.pfra_fback = PFR_FB_NONE;
  369. else if ((p->pfrke_flags & PFRKE_FLAG_NOT) !=
  370. ad.pfra_not)
  371. ad.pfra_fback = PFR_FB_CONFLICT;
  372. else if (p->pfrke_flags & PFRKE_FLAG_MARK)
  373. ad.pfra_fback = PFR_FB_DUPLICATE;
  374. else
  375. ad.pfra_fback = PFR_FB_DELETED;
  376. }
  377. if (p != NULL &&
  378. (p->pfrke_flags & PFRKE_FLAG_NOT) == ad.pfra_not &&
  379. !(p->pfrke_flags & PFRKE_FLAG_MARK)) {
  380. p->pfrke_flags |= PFRKE_FLAG_MARK;
  381. SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
  382. xdel++;
  383. }
  384. if (flags & PFR_FLAG_FEEDBACK)
  385. if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
  386. senderr(EFAULT);
  387. }
  388. if (!(flags & PFR_FLAG_DUMMY)) {
  389. pfr_remove_kentries(kt, &workq);
  390. }
  391. if (ndel != NULL)
  392. *ndel = xdel;
  393. return (0);
  394. _bad:
  395. if (flags & PFR_FLAG_FEEDBACK)
  396. pfr_reset_feedback(addr, size, flags);
  397. return (rv);
  398. }
  399. int
  400. pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
  401. int *size2, int *nadd, int *ndel, int *nchange, int flags,
  402. u_int32_t ignore_pfrt_flags)
  403. {
  404. struct pfr_ktable *kt, *tmpkt;
  405. struct pfr_kentryworkq addq, delq, changeq;
  406. struct pfr_kentry *p, *q;
  407. struct pfr_addr ad;
  408. int i, rv, xadd = 0, xdel = 0, xchange = 0;
  409. time_t tzero = time_second;
  410. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
  411. if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
  412. PFR_FLAG_USERIOCTL))
  413. return (EINVAL);
  414. kt = pfr_lookup_table(tbl);
  415. if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  416. return (ESRCH);
  417. if (kt->pfrkt_flags & PFR_TFLAG_CONST)
  418. return (EPERM);
  419. tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
  420. !(flags & PFR_FLAG_USERIOCTL));
  421. if (tmpkt == NULL)
  422. return (ENOMEM);
  423. pfr_mark_addrs(kt);
  424. SLIST_INIT(&addq);
  425. SLIST_INIT(&delq);
  426. SLIST_INIT(&changeq);
  427. for (i = 0; i < size; i++) {
  428. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  429. if (COPYIN(addr+i, &ad, sizeof(ad), flags))
  430. senderr(EFAULT);
  431. if (pfr_validate_addr(&ad))
  432. senderr(EINVAL);
  433. ad.pfra_fback = PFR_FB_NONE;
  434. p = pfr_lookup_addr(kt, &ad, 1);
  435. if (p != NULL) {
  436. if (p->pfrke_flags & PFRKE_FLAG_MARK) {
  437. ad.pfra_fback = PFR_FB_DUPLICATE;
  438. goto _skip;
  439. }
  440. p->pfrke_flags |= PFRKE_FLAG_MARK;
  441. if ((p->pfrke_flags & PFRKE_FLAG_NOT) != ad.pfra_not) {
  442. SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
  443. ad.pfra_fback = PFR_FB_CHANGED;
  444. xchange++;
  445. }
  446. } else {
  447. q = pfr_lookup_addr(tmpkt, &ad, 1);
  448. if (q != NULL) {
  449. ad.pfra_fback = PFR_FB_DUPLICATE;
  450. goto _skip;
  451. }
  452. p = pfr_create_kentry(&ad);
  453. if (p == NULL)
  454. senderr(ENOMEM);
  455. if (pfr_route_kentry(tmpkt, p)) {
  456. pfr_destroy_kentry(p);
  457. ad.pfra_fback = PFR_FB_NONE;
  458. } else {
  459. SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
  460. ad.pfra_fback = PFR_FB_ADDED;
  461. xadd++;
  462. }
  463. if (p->pfrke_type == PFRKE_COST)
  464. kt->pfrkt_refcntcost++;
  465. pfr_ktable_winfo_update(kt, p);
  466. }
  467. _skip:
  468. if (flags & PFR_FLAG_FEEDBACK)
  469. if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
  470. senderr(EFAULT);
  471. }
  472. pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
  473. if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
  474. if (*size2 < size+xdel) {
  475. *size2 = size+xdel;
  476. senderr(0);
  477. }
  478. i = 0;
  479. SLIST_FOREACH(p, &delq, pfrke_workq) {
  480. pfr_copyout_addr(&ad, p);
  481. ad.pfra_fback = PFR_FB_DELETED;
  482. if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
  483. senderr(EFAULT);
  484. i++;
  485. }
  486. }
  487. pfr_clean_node_mask(tmpkt, &addq);
  488. if (!(flags & PFR_FLAG_DUMMY)) {
  489. pfr_insert_kentries(kt, &addq, tzero);
  490. pfr_remove_kentries(kt, &delq);
  491. pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
  492. } else
  493. pfr_destroy_kentries(&addq);
  494. if (nadd != NULL)
  495. *nadd = xadd;
  496. if (ndel != NULL)
  497. *ndel = xdel;
  498. if (nchange != NULL)
  499. *nchange = xchange;
  500. if ((flags & PFR_FLAG_FEEDBACK) && size2)
  501. *size2 = size+xdel;
  502. pfr_destroy_ktable(tmpkt, 0);
  503. return (0);
  504. _bad:
  505. pfr_clean_node_mask(tmpkt, &addq);
  506. pfr_destroy_kentries(&addq);
  507. if (flags & PFR_FLAG_FEEDBACK)
  508. pfr_reset_feedback(addr, size, flags);
  509. pfr_destroy_ktable(tmpkt, 0);
  510. return (rv);
  511. }
  512. int
  513. pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
  514. int *nmatch, int flags)
  515. {
  516. struct pfr_ktable *kt;
  517. struct pfr_kentry *p;
  518. struct pfr_addr ad;
  519. int i, xmatch = 0;
  520. ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
  521. if (pfr_validate_table(tbl, 0, 0))
  522. return (EINVAL);
  523. kt = pfr_lookup_table(tbl);
  524. if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  525. return (ESRCH);
  526. for (i = 0; i < size; i++) {
  527. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  528. if (COPYIN(addr+i, &ad, sizeof(ad), flags))
  529. return (EFAULT);
  530. if (pfr_validate_addr(&ad))
  531. return (EINVAL);
  532. if (ADDR_NETWORK(&ad))
  533. return (EINVAL);
  534. p = pfr_lookup_addr(kt, &ad, 0);
  535. if (flags & PFR_FLAG_REPLACE)
  536. pfr_copyout_addr(&ad, p);
  537. ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
  538. ((p->pfrke_flags & PFRKE_FLAG_NOT) ?
  539. PFR_FB_NOTMATCH : PFR_FB_MATCH);
  540. if (p != NULL && !(p->pfrke_flags & PFRKE_FLAG_NOT))
  541. xmatch++;
  542. if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
  543. return (EFAULT);
  544. }
  545. if (nmatch != NULL)
  546. *nmatch = xmatch;
  547. return (0);
  548. }
  549. int
  550. pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
  551. int flags)
  552. {
  553. struct pfr_ktable *kt;
  554. struct pfr_walktree w;
  555. int rv;
  556. ACCEPT_FLAGS(flags, 0);
  557. if (pfr_validate_table(tbl, 0, 0))
  558. return (EINVAL);
  559. kt = pfr_lookup_table(tbl);
  560. if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  561. return (ESRCH);
  562. if (kt->pfrkt_cnt > *size) {
  563. *size = kt->pfrkt_cnt;
  564. return (0);
  565. }
  566. bzero(&w, sizeof(w));
  567. w.pfrw_op = PFRW_GET_ADDRS;
  568. w.pfrw_addr = addr;
  569. w.pfrw_free = kt->pfrkt_cnt;
  570. w.pfrw_flags = flags;
  571. rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
  572. if (!rv)
  573. rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
  574. if (rv)
  575. return (rv);
  576. if (w.pfrw_free) {
  577. DPFPRINTF(LOG_ERR,
  578. "pfr_get_addrs: corruption detected (%d)", w.pfrw_free);
  579. return (ENOTTY);
  580. }
  581. *size = kt->pfrkt_cnt;
  582. return (0);
  583. }
  584. int
  585. pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
  586. int flags)
  587. {
  588. struct pfr_ktable *kt;
  589. struct pfr_walktree w;
  590. struct pfr_kentryworkq workq;
  591. int rv;
  592. time_t tzero = time_second;
  593. if (pfr_validate_table(tbl, 0, 0))
  594. return (EINVAL);
  595. kt = pfr_lookup_table(tbl);
  596. if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  597. return (ESRCH);
  598. if (kt->pfrkt_cnt > *size) {
  599. *size = kt->pfrkt_cnt;
  600. return (0);
  601. }
  602. bzero(&w, sizeof(w));
  603. w.pfrw_op = PFRW_GET_ASTATS;
  604. w.pfrw_astats = addr;
  605. w.pfrw_free = kt->pfrkt_cnt;
  606. w.pfrw_flags = flags;
  607. rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
  608. if (!rv)
  609. rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
  610. if (!rv && (flags & PFR_FLAG_CLSTATS)) {
  611. pfr_enqueue_addrs(kt, &workq, NULL, 0);
  612. pfr_clstats_kentries(&workq, tzero, 0);
  613. }
  614. if (rv)
  615. return (rv);
  616. if (w.pfrw_free) {
  617. DPFPRINTF(LOG_ERR,
  618. "pfr_get_astats: corruption detected (%d)", w.pfrw_free);
  619. return (ENOTTY);
  620. }
  621. *size = kt->pfrkt_cnt;
  622. return (0);
  623. }
  624. int
  625. pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
  626. int *nzero, int flags)
  627. {
  628. struct pfr_ktable *kt;
  629. struct pfr_kentryworkq workq;
  630. struct pfr_kentry *p;
  631. struct pfr_addr ad;
  632. int i, rv, xzero = 0;
  633. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
  634. if (pfr_validate_table(tbl, 0, 0))
  635. return (EINVAL);
  636. kt = pfr_lookup_table(tbl);
  637. if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  638. return (ESRCH);
  639. SLIST_INIT(&workq);
  640. for (i = 0; i < size; i++) {
  641. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  642. if (COPYIN(addr+i, &ad, sizeof(ad), flags))
  643. senderr(EFAULT);
  644. if (pfr_validate_addr(&ad))
  645. senderr(EINVAL);
  646. p = pfr_lookup_addr(kt, &ad, 1);
  647. if (flags & PFR_FLAG_FEEDBACK) {
  648. ad.pfra_fback = (p != NULL) ?
  649. PFR_FB_CLEARED : PFR_FB_NONE;
  650. if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
  651. senderr(EFAULT);
  652. }
  653. if (p != NULL) {
  654. SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
  655. xzero++;
  656. }
  657. }
  658. if (!(flags & PFR_FLAG_DUMMY)) {
  659. pfr_clstats_kentries(&workq, time_second, 0);
  660. }
  661. if (nzero != NULL)
  662. *nzero = xzero;
  663. return (0);
  664. _bad:
  665. if (flags & PFR_FLAG_FEEDBACK)
  666. pfr_reset_feedback(addr, size, flags);
  667. return (rv);
  668. }
  669. int
  670. pfr_validate_addr(struct pfr_addr *ad)
  671. {
  672. int i;
  673. switch (ad->pfra_af) {
  674. case AF_INET:
  675. if (ad->pfra_net > 32)
  676. return (-1);
  677. break;
  678. #ifdef INET6
  679. case AF_INET6:
  680. if (ad->pfra_net > 128)
  681. return (-1);
  682. break;
  683. #endif /* INET6 */
  684. default:
  685. return (-1);
  686. }
  687. if (ad->pfra_net < 128 &&
  688. (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
  689. return (-1);
  690. for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
  691. if (((caddr_t)ad)[i])
  692. return (-1);
  693. if (ad->pfra_not && ad->pfra_not != 1)
  694. return (-1);
  695. if (ad->pfra_fback)
  696. return (-1);
  697. return (0);
  698. }
  699. void
  700. pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
  701. int *naddr, int sweep)
  702. {
  703. struct pfr_walktree w;
  704. SLIST_INIT(workq);
  705. bzero(&w, sizeof(w));
  706. w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
  707. w.pfrw_workq = workq;
  708. if (kt->pfrkt_ip4 != NULL)
  709. if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
  710. DPFPRINTF(LOG_ERR,
  711. "pfr_enqueue_addrs: IPv4 walktree failed.");
  712. if (kt->pfrkt_ip6 != NULL)
  713. if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
  714. DPFPRINTF(LOG_ERR,
  715. "pfr_enqueue_addrs: IPv6 walktree failed.");
  716. if (naddr != NULL)
  717. *naddr = w.pfrw_cnt;
  718. }
  719. void
  720. pfr_mark_addrs(struct pfr_ktable *kt)
  721. {
  722. struct pfr_walktree w;
  723. bzero(&w, sizeof(w));
  724. w.pfrw_op = PFRW_MARK;
  725. if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
  726. DPFPRINTF(LOG_ERR,
  727. "pfr_mark_addrs: IPv4 walktree failed.");
  728. if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
  729. DPFPRINTF(LOG_ERR,
  730. "pfr_mark_addrs: IPv6 walktree failed.");
  731. }
  732. struct pfr_kentry *
  733. pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
  734. {
  735. union sockaddr_union sa, mask;
  736. struct radix_node_head *head;
  737. struct pfr_kentry *ke;
  738. int s;
  739. bzero(&sa, sizeof(sa));
  740. switch (ad->pfra_af) {
  741. case AF_INET:
  742. FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
  743. head = kt->pfrkt_ip4;
  744. break;
  745. #ifdef INET6
  746. case AF_INET6:
  747. FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
  748. head = kt->pfrkt_ip6;
  749. break;
  750. #endif /* INET6 */
  751. default:
  752. unhandled_af(ad->pfra_af);
  753. }
  754. if (ADDR_NETWORK(ad)) {
  755. pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
  756. s = splsoftnet(); /* rn_lookup makes use of globals */
  757. ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
  758. splx(s);
  759. } else {
  760. ke = (struct pfr_kentry *)rn_match(&sa, head);
  761. if (exact && ke && KENTRY_NETWORK(ke))
  762. ke = NULL;
  763. }
  764. return (ke);
  765. }
  766. struct pfr_kentry *
  767. pfr_create_kentry(struct pfr_addr *ad)
  768. {
  769. struct pfr_kentry_all *ke;
  770. ke = pool_get(&pfr_kentry_pl[ad->pfra_type], PR_NOWAIT | PR_ZERO);
  771. if (ke == NULL)
  772. return (NULL);
  773. ke->pfrke_type = ad->pfra_type;
  774. /* set weight allowing implicit weights */
  775. if (ad->pfra_weight == 0)
  776. ad->pfra_weight = 1;
  777. switch (ke->pfrke_type) {
  778. case PFRKE_PLAIN:
  779. break;
  780. case PFRKE_COST:
  781. ((struct pfr_kentry_cost *)ke)->weight = ad->pfra_weight;
  782. /* FALLTHROUGH */
  783. case PFRKE_ROUTE:
  784. if (ad->pfra_ifname[0])
  785. ke->pfrke_rkif = pfi_kif_get(ad->pfra_ifname);
  786. if (ke->pfrke_rkif)
  787. pfi_kif_ref(ke->pfrke_rkif, PFI_KIF_REF_ROUTE);
  788. break;
  789. default:
  790. panic("unknown pfrke_type %d", ke->pfrke_type);
  791. break;
  792. }
  793. switch (ad->pfra_af) {
  794. case AF_INET:
  795. FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
  796. break;
  797. #ifdef INET6
  798. case AF_INET6:
  799. FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
  800. break;
  801. #endif /* INET6 */
  802. default:
  803. unhandled_af(ad->pfra_af);
  804. }
  805. ke->pfrke_af = ad->pfra_af;
  806. ke->pfrke_net = ad->pfra_net;
  807. if (ad->pfra_not)
  808. ke->pfrke_flags |= PFRKE_FLAG_NOT;
  809. return ((struct pfr_kentry *)ke);
  810. }
  811. void
  812. pfr_destroy_kentries(struct pfr_kentryworkq *workq)
  813. {
  814. struct pfr_kentry *p, *q;
  815. int i;
  816. for (i = 0, p = SLIST_FIRST(workq); p != NULL; i++, p = q) {
  817. YIELD(i, 1);
  818. q = SLIST_NEXT(p, pfrke_workq);
  819. pfr_destroy_kentry(p);
  820. }
  821. }
  822. void
  823. pfr_destroy_kentry(struct pfr_kentry *ke)
  824. {
  825. if (ke->pfrke_counters)
  826. pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
  827. if (ke->pfrke_type == PFRKE_COST || ke->pfrke_type == PFRKE_ROUTE)
  828. pfi_kif_unref(((struct pfr_kentry_all *)ke)->pfrke_rkif,
  829. PFI_KIF_REF_ROUTE);
  830. pool_put(&pfr_kentry_pl[ke->pfrke_type], ke);
  831. }
  832. void
  833. pfr_insert_kentries(struct pfr_ktable *kt,
  834. struct pfr_kentryworkq *workq, time_t tzero)
  835. {
  836. struct pfr_kentry *p;
  837. int rv, n = 0;
  838. SLIST_FOREACH(p, workq, pfrke_workq) {
  839. rv = pfr_route_kentry(kt, p);
  840. if (rv) {
  841. DPFPRINTF(LOG_ERR,
  842. "pfr_insert_kentries: cannot route entry "
  843. "(code=%d).", rv);
  844. break;
  845. }
  846. p->pfrke_tzero = tzero;
  847. ++n;
  848. if (p->pfrke_type == PFRKE_COST)
  849. kt->pfrkt_refcntcost++;
  850. pfr_ktable_winfo_update(kt, p);
  851. YIELD(n, 1);
  852. }
  853. kt->pfrkt_cnt += n;
  854. }
  855. int
  856. pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero)
  857. {
  858. struct pfr_kentry *p;
  859. int rv;
  860. p = pfr_lookup_addr(kt, ad, 1);
  861. if (p != NULL)
  862. return (0);
  863. p = pfr_create_kentry(ad);
  864. if (p == NULL)
  865. return (EINVAL);
  866. rv = pfr_route_kentry(kt, p);
  867. if (rv)
  868. return (rv);
  869. p->pfrke_tzero = tzero;
  870. if (p->pfrke_type == PFRKE_COST)
  871. kt->pfrkt_refcntcost++;
  872. kt->pfrkt_cnt++;
  873. pfr_ktable_winfo_update(kt, p);
  874. return (0);
  875. }
  876. void
  877. pfr_remove_kentries(struct pfr_ktable *kt,
  878. struct pfr_kentryworkq *workq)
  879. {
  880. struct pfr_kentry *p;
  881. struct pfr_kentryworkq addrq;
  882. int n = 0;
  883. SLIST_FOREACH(p, workq, pfrke_workq) {
  884. pfr_unroute_kentry(kt, p);
  885. ++n;
  886. YIELD(n, 1);
  887. if (p->pfrke_type == PFRKE_COST)
  888. kt->pfrkt_refcntcost--;
  889. }
  890. kt->pfrkt_cnt -= n;
  891. pfr_destroy_kentries(workq);
  892. /* update maxweight and gcd for load balancing */
  893. if (kt->pfrkt_refcntcost > 0) {
  894. kt->pfrkt_gcdweight = 0;
  895. kt->pfrkt_maxweight = 1;
  896. pfr_enqueue_addrs(kt, &addrq, NULL, 0);
  897. SLIST_FOREACH(p, &addrq, pfrke_workq)
  898. pfr_ktable_winfo_update(kt, p);
  899. }
  900. }
  901. void
  902. pfr_clean_node_mask(struct pfr_ktable *kt,
  903. struct pfr_kentryworkq *workq)
  904. {
  905. struct pfr_kentry *p;
  906. SLIST_FOREACH(p, workq, pfrke_workq) {
  907. pfr_unroute_kentry(kt, p);
  908. }
  909. }
  910. void
  911. pfr_clstats_kentries(struct pfr_kentryworkq *workq, time_t tzero, int negchange)
  912. {
  913. struct pfr_kentry *p;
  914. int s;
  915. SLIST_FOREACH(p, workq, pfrke_workq) {
  916. s = splsoftnet();
  917. if (negchange)
  918. p->pfrke_flags ^= PFRKE_FLAG_NOT;
  919. if (p->pfrke_counters) {
  920. pool_put(&pfr_kcounters_pl, p->pfrke_counters);
  921. p->pfrke_counters = NULL;
  922. }
  923. splx(s);
  924. p->pfrke_tzero = tzero;
  925. }
  926. }
  927. void
  928. pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
  929. {
  930. struct pfr_addr ad;
  931. int i;
  932. for (i = 0; i < size; i++) {
  933. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  934. if (COPYIN(addr+i, &ad, sizeof(ad), flags))
  935. break;
  936. ad.pfra_fback = PFR_FB_NONE;
  937. if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
  938. break;
  939. }
  940. }
  941. void
  942. pfr_prepare_network(union sockaddr_union *sa, int af, int net)
  943. {
  944. #ifdef INET6
  945. int i;
  946. #endif /* INET6 */
  947. bzero(sa, sizeof(*sa));
  948. switch (af) {
  949. case AF_INET:
  950. sa->sin.sin_len = sizeof(sa->sin);
  951. sa->sin.sin_family = AF_INET;
  952. sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
  953. break;
  954. #ifdef INET6
  955. case AF_INET6:
  956. sa->sin6.sin6_len = sizeof(sa->sin6);
  957. sa->sin6.sin6_family = AF_INET6;
  958. for (i = 0; i < 4; i++) {
  959. if (net <= 32) {
  960. sa->sin6.sin6_addr.s6_addr32[i] =
  961. net ? htonl(-1 << (32-net)) : 0;
  962. break;
  963. }
  964. sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
  965. net -= 32;
  966. }
  967. break;
  968. #endif /* INET6 */
  969. default:
  970. unhandled_af(af);
  971. }
  972. }
  973. int
  974. pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
  975. {
  976. union sockaddr_union mask;
  977. struct radix_node *rn;
  978. struct radix_node_head *head;
  979. int s;
  980. bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
  981. switch (ke->pfrke_af) {
  982. case AF_INET:
  983. head = kt->pfrkt_ip4;
  984. break;
  985. #ifdef INET6
  986. case AF_INET6:
  987. head = kt->pfrkt_ip6;
  988. break;
  989. #endif /* INET6 */
  990. default:
  991. unhandled_af(ke->pfrke_af);
  992. }
  993. s = splsoftnet();
  994. if (KENTRY_NETWORK(ke)) {
  995. pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
  996. rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node, 0);
  997. } else
  998. rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node, 0);
  999. splx(s);
  1000. return (rn == NULL ? -1 : 0);
  1001. }
  1002. int
  1003. pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
  1004. {
  1005. union sockaddr_union mask;
  1006. struct radix_node *rn;
  1007. struct radix_node_head *head;
  1008. int s;
  1009. switch (ke->pfrke_af) {
  1010. case AF_INET:
  1011. head = kt->pfrkt_ip4;
  1012. break;
  1013. #ifdef INET6
  1014. case AF_INET6:
  1015. head = kt->pfrkt_ip6;
  1016. break;
  1017. #endif /* INET6 */
  1018. default:
  1019. unhandled_af(ke->pfrke_af);
  1020. }
  1021. s = splsoftnet();
  1022. if (KENTRY_NETWORK(ke)) {
  1023. pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
  1024. rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
  1025. } else
  1026. rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
  1027. splx(s);
  1028. if (rn == NULL) {
  1029. DPFPRINTF(LOG_ERR, "pfr_unroute_kentry: delete failed.\n");
  1030. return (-1);
  1031. }
  1032. return (0);
  1033. }
  1034. void
  1035. pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
  1036. {
  1037. bzero(ad, sizeof(*ad));
  1038. if (ke == NULL)
  1039. return;
  1040. ad->pfra_af = ke->pfrke_af;
  1041. ad->pfra_net = ke->pfrke_net;
  1042. ad->pfra_type = ke->pfrke_type;
  1043. if (ke->pfrke_flags & PFRKE_FLAG_NOT)
  1044. ad->pfra_not = 1;
  1045. switch (ad->pfra_af) {
  1046. case AF_INET:
  1047. ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
  1048. break;
  1049. #ifdef INET6
  1050. case AF_INET6:
  1051. ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
  1052. break;
  1053. #endif /* INET6 */
  1054. default:
  1055. unhandled_af(ad->pfra_af);
  1056. }
  1057. if (ke->pfrke_counters != NULL)
  1058. ad->pfra_states = ke->pfrke_counters->states;
  1059. switch (ke->pfrke_type) {
  1060. case PFRKE_COST:
  1061. ad->pfra_weight = ((struct pfr_kentry_cost *)ke)->weight;
  1062. /* FALLTHROUGH */
  1063. case PFRKE_ROUTE:
  1064. if (((struct pfr_kentry_route *)ke)->kif != NULL)
  1065. strlcpy(ad->pfra_ifname,
  1066. ((struct pfr_kentry_route *)ke)->kif->pfik_name,
  1067. IFNAMSIZ);
  1068. break;
  1069. default:
  1070. break;
  1071. }
  1072. }
  1073. int
  1074. pfr_walktree(struct radix_node *rn, void *arg, u_int id)
  1075. {
  1076. struct pfr_kentry *ke = (struct pfr_kentry *)rn;
  1077. struct pfr_walktree *w = arg;
  1078. int s, flags = w->pfrw_flags;
  1079. switch (w->pfrw_op) {
  1080. case PFRW_MARK:
  1081. ke->pfrke_flags &= ~PFRKE_FLAG_MARK;
  1082. break;
  1083. case PFRW_SWEEP:
  1084. if (ke->pfrke_flags & PFRKE_FLAG_MARK)
  1085. break;
  1086. /* FALLTHROUGH */
  1087. case PFRW_ENQUEUE:
  1088. SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
  1089. w->pfrw_cnt++;
  1090. break;
  1091. case PFRW_GET_ADDRS:
  1092. if (w->pfrw_free-- > 0) {
  1093. struct pfr_addr ad;
  1094. pfr_copyout_addr(&ad, ke);
  1095. if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
  1096. return (EFAULT);
  1097. w->pfrw_addr++;
  1098. }
  1099. break;
  1100. case PFRW_GET_ASTATS:
  1101. if (w->pfrw_free-- > 0) {
  1102. struct pfr_astats as;
  1103. pfr_copyout_addr(&as.pfras_a, ke);
  1104. s = splsoftnet();
  1105. if (ke->pfrke_counters) {
  1106. bcopy(ke->pfrke_counters->pfrkc_packets,
  1107. as.pfras_packets, sizeof(as.pfras_packets));
  1108. bcopy(ke->pfrke_counters->pfrkc_bytes,
  1109. as.pfras_bytes, sizeof(as.pfras_bytes));
  1110. } else {
  1111. bzero(as.pfras_packets,
  1112. sizeof(as.pfras_packets));
  1113. bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
  1114. as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
  1115. }
  1116. splx(s);
  1117. as.pfras_tzero = ke->pfrke_tzero;
  1118. if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
  1119. return (EFAULT);
  1120. w->pfrw_astats++;
  1121. }
  1122. break;
  1123. case PFRW_POOL_GET:
  1124. if (ke->pfrke_flags & PFRKE_FLAG_NOT)
  1125. break; /* negative entries are ignored */
  1126. if (!w->pfrw_cnt--) {
  1127. w->pfrw_kentry = ke;
  1128. return (1); /* finish search */
  1129. }
  1130. break;
  1131. case PFRW_DYNADDR_UPDATE:
  1132. switch (ke->pfrke_af) {
  1133. case AF_INET:
  1134. if (w->pfrw_dyn->pfid_acnt4++ > 0)
  1135. break;
  1136. pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
  1137. w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
  1138. &ke->pfrke_sa, AF_INET);
  1139. w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
  1140. &pfr_mask, AF_INET);
  1141. break;
  1142. #ifdef INET6
  1143. case AF_INET6:
  1144. if (w->pfrw_dyn->pfid_acnt6++ > 0)
  1145. break;
  1146. pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
  1147. w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
  1148. &ke->pfrke_sa, AF_INET6);
  1149. w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
  1150. &pfr_mask, AF_INET6);
  1151. break;
  1152. #endif /* INET6 */
  1153. default:
  1154. unhandled_af(ke->pfrke_af);
  1155. }
  1156. break;
  1157. }
  1158. return (0);
  1159. }
  1160. int
  1161. pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
  1162. {
  1163. struct pfr_ktableworkq workq;
  1164. struct pfr_ktable *p;
  1165. int xdel = 0;
  1166. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
  1167. if (pfr_fix_anchor(filter->pfrt_anchor))
  1168. return (EINVAL);
  1169. if (pfr_table_count(filter, flags) < 0)
  1170. return (ENOENT);
  1171. SLIST_INIT(&workq);
  1172. RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
  1173. if (pfr_skip_table(filter, p, flags))
  1174. continue;
  1175. if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
  1176. continue;
  1177. if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
  1178. continue;
  1179. p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
  1180. SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
  1181. xdel++;
  1182. }
  1183. if (!(flags & PFR_FLAG_DUMMY)) {
  1184. pfr_setflags_ktables(&workq);
  1185. }
  1186. if (ndel != NULL)
  1187. *ndel = xdel;
  1188. return (0);
  1189. }
  1190. int
  1191. pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
  1192. {
  1193. struct pfr_ktableworkq addq, changeq;
  1194. struct pfr_ktable *p, *q, *r, key;
  1195. int i, rv, xadd = 0;
  1196. time_t tzero = time_second;
  1197. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
  1198. SLIST_INIT(&addq);
  1199. SLIST_INIT(&changeq);
  1200. for (i = 0; i < size; i++) {
  1201. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  1202. if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
  1203. senderr(EFAULT);
  1204. if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
  1205. flags & PFR_FLAG_USERIOCTL))
  1206. senderr(EINVAL);
  1207. key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
  1208. p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
  1209. if (p == NULL) {
  1210. p = pfr_create_ktable(&key.pfrkt_t, tzero, 1,
  1211. !(flags & PFR_FLAG_USERIOCTL));
  1212. if (p == NULL)
  1213. senderr(ENOMEM);
  1214. SLIST_FOREACH(q, &addq, pfrkt_workq) {
  1215. if (!pfr_ktable_compare(p, q))
  1216. goto _skip;
  1217. }
  1218. SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
  1219. xadd++;
  1220. if (!key.pfrkt_anchor[0])
  1221. goto _skip;
  1222. /* find or create root table */
  1223. bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
  1224. r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
  1225. if (r != NULL) {
  1226. p->pfrkt_root = r;
  1227. goto _skip;
  1228. }
  1229. SLIST_FOREACH(q, &addq, pfrkt_workq) {
  1230. if (!pfr_ktable_compare(&key, q)) {
  1231. p->pfrkt_root = q;
  1232. goto _skip;
  1233. }
  1234. }
  1235. key.pfrkt_flags = 0;
  1236. r = pfr_create_ktable(&key.pfrkt_t, 0, 1,
  1237. !(flags & PFR_FLAG_USERIOCTL));
  1238. if (r == NULL)
  1239. senderr(ENOMEM);
  1240. SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
  1241. p->pfrkt_root = r;
  1242. } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
  1243. SLIST_FOREACH(q, &changeq, pfrkt_workq)
  1244. if (!pfr_ktable_compare(&key, q))
  1245. goto _skip;
  1246. p->pfrkt_nflags = (p->pfrkt_flags &
  1247. ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
  1248. SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
  1249. xadd++;
  1250. }
  1251. _skip:
  1252. ;
  1253. }
  1254. if (!(flags & PFR_FLAG_DUMMY)) {
  1255. pfr_insert_ktables(&addq);
  1256. pfr_setflags_ktables(&changeq);
  1257. } else
  1258. pfr_destroy_ktables(&addq, 0);
  1259. if (nadd != NULL)
  1260. *nadd = xadd;
  1261. return (0);
  1262. _bad:
  1263. pfr_destroy_ktables(&addq, 0);
  1264. return (rv);
  1265. }
  1266. int
  1267. pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
  1268. {
  1269. struct pfr_ktableworkq workq;
  1270. struct pfr_ktable *p, *q, key;
  1271. int i, xdel = 0;
  1272. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
  1273. SLIST_INIT(&workq);
  1274. for (i = 0; i < size; i++) {
  1275. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  1276. if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
  1277. return (EFAULT);
  1278. if (pfr_validate_table(&key.pfrkt_t, 0,
  1279. flags & PFR_FLAG_USERIOCTL))
  1280. return (EINVAL);
  1281. p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
  1282. if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
  1283. SLIST_FOREACH(q, &workq, pfrkt_workq)
  1284. if (!pfr_ktable_compare(p, q))
  1285. goto _skip;
  1286. p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
  1287. SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
  1288. xdel++;
  1289. }
  1290. _skip:
  1291. ;
  1292. }
  1293. if (!(flags & PFR_FLAG_DUMMY)) {
  1294. pfr_setflags_ktables(&workq);
  1295. }
  1296. if (ndel != NULL)
  1297. *ndel = xdel;
  1298. return (0);
  1299. }
  1300. int
  1301. pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
  1302. int flags)
  1303. {
  1304. struct pfr_ktable *p;
  1305. int n, nn;
  1306. ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
  1307. if (pfr_fix_anchor(filter->pfrt_anchor))
  1308. return (EINVAL);
  1309. n = nn = pfr_table_count(filter, flags);
  1310. if (n < 0)
  1311. return (ENOENT);
  1312. if (n > *size) {
  1313. *size = n;
  1314. return (0);
  1315. }
  1316. RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
  1317. if (pfr_skip_table(filter, p, flags))
  1318. continue;
  1319. if (n-- <= 0)
  1320. continue;
  1321. if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
  1322. return (EFAULT);
  1323. }
  1324. if (n) {
  1325. DPFPRINTF(LOG_ERR,
  1326. "pfr_get_tables: corruption detected (%d).", n);
  1327. return (ENOTTY);
  1328. }
  1329. *size = nn;
  1330. return (0);
  1331. }
  1332. int
  1333. pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
  1334. int flags)
  1335. {
  1336. struct pfr_ktable *p;
  1337. struct pfr_ktableworkq workq;
  1338. int s, n, nn;
  1339. time_t tzero = time_second;
  1340. /* XXX PFR_FLAG_CLSTATS disabled */
  1341. ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
  1342. if (pfr_fix_anchor(filter->pfrt_anchor))
  1343. return (EINVAL);
  1344. n = nn = pfr_table_count(filter, flags);
  1345. if (n < 0)
  1346. return (ENOENT);
  1347. if (n > *size) {
  1348. *size = n;
  1349. return (0);
  1350. }
  1351. SLIST_INIT(&workq);
  1352. RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
  1353. if (pfr_skip_table(filter, p, flags))
  1354. continue;
  1355. if (n-- <= 0)
  1356. continue;
  1357. s = splsoftnet();
  1358. if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags)) {
  1359. splx(s);
  1360. return (EFAULT);
  1361. }
  1362. splx(s);
  1363. SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
  1364. }
  1365. if (flags & PFR_FLAG_CLSTATS)
  1366. pfr_clstats_ktables(&workq, tzero,
  1367. flags & PFR_FLAG_ADDRSTOO);
  1368. if (n) {
  1369. DPFPRINTF(LOG_ERR,
  1370. "pfr_get_tstats: corruption detected (%d).", n);
  1371. return (ENOTTY);
  1372. }
  1373. *size = nn;
  1374. return (0);
  1375. }
  1376. int
  1377. pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
  1378. {
  1379. struct pfr_ktableworkq workq;
  1380. struct pfr_ktable *p, key;
  1381. int i, xzero = 0;
  1382. time_t tzero = time_second;
  1383. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
  1384. SLIST_INIT(&workq);
  1385. for (i = 0; i < size; i++) {
  1386. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  1387. if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
  1388. return (EFAULT);
  1389. if (pfr_validate_table(&key.pfrkt_t, 0, 0))
  1390. return (EINVAL);
  1391. p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
  1392. if (p != NULL) {
  1393. SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
  1394. xzero++;
  1395. }
  1396. }
  1397. if (!(flags & PFR_FLAG_DUMMY)) {
  1398. pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
  1399. }
  1400. if (nzero != NULL)
  1401. *nzero = xzero;
  1402. return (0);
  1403. }
  1404. int
  1405. pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
  1406. int *nchange, int *ndel, int flags)
  1407. {
  1408. struct pfr_ktableworkq workq;
  1409. struct pfr_ktable *p, *q, key;
  1410. int i, xchange = 0, xdel = 0;
  1411. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
  1412. if ((setflag & ~PFR_TFLAG_USRMASK) ||
  1413. (clrflag & ~PFR_TFLAG_USRMASK) ||
  1414. (setflag & clrflag))
  1415. return (EINVAL);
  1416. SLIST_INIT(&workq);
  1417. for (i = 0; i < size; i++) {
  1418. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  1419. if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
  1420. return (EFAULT);
  1421. if (pfr_validate_table(&key.pfrkt_t, 0,
  1422. flags & PFR_FLAG_USERIOCTL))
  1423. return (EINVAL);
  1424. p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
  1425. if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
  1426. p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
  1427. ~clrflag;
  1428. if (p->pfrkt_nflags == p->pfrkt_flags)
  1429. goto _skip;
  1430. SLIST_FOREACH(q, &workq, pfrkt_workq)
  1431. if (!pfr_ktable_compare(p, q))
  1432. goto _skip;
  1433. SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
  1434. if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
  1435. (clrflag & PFR_TFLAG_PERSIST) &&
  1436. !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
  1437. xdel++;
  1438. else
  1439. xchange++;
  1440. }
  1441. _skip:
  1442. ;
  1443. }
  1444. if (!(flags & PFR_FLAG_DUMMY)) {
  1445. pfr_setflags_ktables(&workq);
  1446. }
  1447. if (nchange != NULL)
  1448. *nchange = xchange;
  1449. if (ndel != NULL)
  1450. *ndel = xdel;
  1451. return (0);
  1452. }
  1453. int
  1454. pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
  1455. {
  1456. struct pfr_ktableworkq workq;
  1457. struct pfr_ktable *p;
  1458. struct pf_ruleset *rs;
  1459. int xdel = 0;
  1460. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
  1461. rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
  1462. if (rs == NULL)
  1463. return (ENOMEM);
  1464. SLIST_INIT(&workq);
  1465. RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
  1466. if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
  1467. pfr_skip_table(trs, p, 0))
  1468. continue;
  1469. p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
  1470. SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
  1471. xdel++;
  1472. }
  1473. if (!(flags & PFR_FLAG_DUMMY)) {
  1474. pfr_setflags_ktables(&workq);
  1475. if (ticket != NULL)
  1476. *ticket = ++rs->tticket;
  1477. rs->topen = 1;
  1478. } else
  1479. pf_remove_if_empty_ruleset(rs);
  1480. if (ndel != NULL)
  1481. *ndel = xdel;
  1482. return (0);
  1483. }
  1484. int
  1485. pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
  1486. int *nadd, int *naddr, u_int32_t ticket, int flags)
  1487. {
  1488. struct pfr_ktableworkq tableq;
  1489. struct pfr_kentryworkq addrq;
  1490. struct pfr_ktable *kt, *rt, *shadow, key;
  1491. struct pfr_kentry *p;
  1492. struct pfr_addr ad;
  1493. struct pf_ruleset *rs;
  1494. int i, rv, xadd = 0, xaddr = 0;
  1495. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
  1496. if (size && !(flags & PFR_FLAG_ADDRSTOO))
  1497. return (EINVAL);
  1498. if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
  1499. flags & PFR_FLAG_USERIOCTL))
  1500. return (EINVAL);
  1501. rs = pf_find_ruleset(tbl->pfrt_anchor);
  1502. if (rs == NULL || !rs->topen || ticket != rs->tticket)
  1503. return (EBUSY);
  1504. tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
  1505. SLIST_INIT(&tableq);
  1506. kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
  1507. if (kt == NULL) {
  1508. kt = pfr_create_ktable(tbl, 0, 1,
  1509. !(flags & PFR_FLAG_USERIOCTL));
  1510. if (kt == NULL)
  1511. return (ENOMEM);
  1512. SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
  1513. xadd++;
  1514. if (!tbl->pfrt_anchor[0])
  1515. goto _skip;
  1516. /* find or create root table */
  1517. bzero(&key, sizeof(key));
  1518. strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
  1519. rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
  1520. if (rt != NULL) {
  1521. kt->pfrkt_root = rt;
  1522. goto _skip;
  1523. }
  1524. rt = pfr_create_ktable(&key.pfrkt_t, 0, 1,
  1525. !(flags & PFR_FLAG_USERIOCTL));
  1526. if (rt == NULL) {
  1527. pfr_destroy_ktables(&tableq, 0);
  1528. return (ENOMEM);
  1529. }
  1530. SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
  1531. kt->pfrkt_root = rt;
  1532. } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
  1533. xadd++;
  1534. _skip:
  1535. shadow = pfr_create_ktable(tbl, 0, 0, !(flags & PFR_FLAG_USERIOCTL));
  1536. if (shadow == NULL) {
  1537. pfr_destroy_ktables(&tableq, 0);
  1538. return (ENOMEM);
  1539. }
  1540. SLIST_INIT(&addrq);
  1541. for (i = 0; i < size; i++) {
  1542. YIELD(i, flags & PFR_FLAG_USERIOCTL);
  1543. if (COPYIN(addr+i, &ad, sizeof(ad), flags))
  1544. senderr(EFAULT);
  1545. if (pfr_validate_addr(&ad))
  1546. senderr(EINVAL);
  1547. if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
  1548. continue;
  1549. p = pfr_create_kentry(&ad);
  1550. if (p == NULL)
  1551. senderr(ENOMEM);
  1552. if (pfr_route_kentry(shadow, p)) {
  1553. pfr_destroy_kentry(p);
  1554. continue;
  1555. }
  1556. SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
  1557. xaddr++;
  1558. if (p->pfrke_type == PFRKE_COST)
  1559. kt->pfrkt_refcntcost++;
  1560. pfr_ktable_winfo_update(kt, p);
  1561. }
  1562. if (!(flags & PFR_FLAG_DUMMY)) {
  1563. if (kt->pfrkt_shadow != NULL)
  1564. pfr_destroy_ktable(kt->pfrkt_shadow, 1);
  1565. kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
  1566. pfr_insert_ktables(&tableq);
  1567. shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
  1568. xaddr : NO_ADDRESSES;
  1569. kt->pfrkt_shadow = shadow;
  1570. } else {
  1571. pfr_clean_node_mask(shadow, &addrq);
  1572. pfr_destroy_ktable(shadow, 0);
  1573. pfr_destroy_ktables(&tableq, 0);
  1574. pfr_destroy_kentries(&addrq);
  1575. }
  1576. if (nadd != NULL)
  1577. *nadd = xadd;
  1578. if (naddr != NULL)
  1579. *naddr = xaddr;
  1580. return (0);
  1581. _bad:
  1582. pfr_destroy_ktable(shadow, 0);
  1583. pfr_destroy_ktables(&tableq, 0);
  1584. pfr_destroy_kentries(&addrq);
  1585. return (rv);
  1586. }
  1587. int
  1588. pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
  1589. {
  1590. struct pfr_ktableworkq workq;
  1591. struct pfr_ktable *p;
  1592. struct pf_ruleset *rs;
  1593. int xdel = 0;
  1594. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
  1595. rs = pf_find_ruleset(trs->pfrt_anchor);
  1596. if (rs == NULL || !rs->topen || ticket != rs->tticket)
  1597. return (0);
  1598. SLIST_INIT(&workq);
  1599. RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
  1600. if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
  1601. pfr_skip_table(trs, p, 0))
  1602. continue;
  1603. p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
  1604. SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
  1605. xdel++;
  1606. }
  1607. if (!(flags & PFR_FLAG_DUMMY)) {
  1608. pfr_setflags_ktables(&workq);
  1609. rs->topen = 0;
  1610. pf_remove_if_empty_ruleset(rs);
  1611. }
  1612. if (ndel != NULL)
  1613. *ndel = xdel;
  1614. return (0);
  1615. }
  1616. int
  1617. pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
  1618. int *nchange, int flags)
  1619. {
  1620. struct pfr_ktable *p, *q;
  1621. struct pfr_ktableworkq workq;
  1622. struct pf_ruleset *rs;
  1623. int xadd = 0, xchange = 0;
  1624. time_t tzero = time_second;
  1625. ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
  1626. rs = pf_find_ruleset(trs->pfrt_anchor);
  1627. if (rs == NULL || !rs->topen || ticket != rs->tticket)
  1628. return (EBUSY);
  1629. SLIST_INIT(&workq);
  1630. RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
  1631. if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
  1632. pfr_skip_table(trs, p, 0))
  1633. continue;
  1634. SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
  1635. if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
  1636. xchange++;
  1637. else
  1638. xadd++;
  1639. }
  1640. if (!(flags & PFR_FLAG_DUMMY)) {
  1641. for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
  1642. q = SLIST_NEXT(p, pfrkt_workq);
  1643. pfr_commit_ktable(p, tzero);
  1644. }
  1645. rs->topen = 0;
  1646. pf_remove_if_empty_ruleset(rs);
  1647. }
  1648. if (nadd != NULL)
  1649. *nadd = xadd;
  1650. if (nchange != NULL)
  1651. *nchange = xchange;
  1652. return (0);
  1653. }
  1654. void
  1655. pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
  1656. {
  1657. struct pfr_ktable *shadow = kt->pfrkt_shadow;
  1658. int nflags;
  1659. if (shadow->pfrkt_cnt == NO_ADDRESSES) {
  1660. if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  1661. pfr_clstats_ktable(kt, tzero, 1);
  1662. } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
  1663. /* kt might contain addresses */
  1664. struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
  1665. struct pfr_kentry *p, *q, *next;
  1666. struct pfr_addr ad;
  1667. pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
  1668. pfr_mark_addrs(kt);
  1669. SLIST_INIT(&addq);
  1670. SLIST_INIT(&changeq);
  1671. SLIST_INIT(&delq);
  1672. SLIST_INIT(&garbageq);
  1673. pfr_clean_node_mask(shadow, &addrq);
  1674. for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
  1675. next = SLIST_NEXT(p, pfrke_workq); /* XXX */
  1676. pfr_copyout_addr(&ad, p);
  1677. q = pfr_lookup_addr(kt, &ad, 1);
  1678. if (q != NULL) {
  1679. if ((q->pfrke_flags & PFRKE_FLAG_NOT) !=
  1680. (p->pfrke_flags & PFRKE_FLAG_NOT))
  1681. SLIST_INSERT_HEAD(&changeq, q,
  1682. pfrke_workq);
  1683. q->pfrke_flags |= PFRKE_FLAG_MARK;
  1684. SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
  1685. } else {
  1686. p->pfrke_tzero = tzero;
  1687. SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
  1688. }
  1689. }
  1690. pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
  1691. pfr_insert_kentries(kt, &addq, tzero);
  1692. pfr_remove_kentries(kt, &delq);
  1693. pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
  1694. pfr_destroy_kentries(&garbageq);
  1695. } else {
  1696. /* kt cannot contain addresses */
  1697. SWAP(struct radix_node_head *, kt->pfrkt_ip4,
  1698. shadow->pfrkt_ip4);
  1699. SWAP(struct radix_node_head *, kt->pfrkt_ip6,
  1700. shadow->pfrkt_ip6);
  1701. SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
  1702. pfr_clstats_ktable(kt, tzero, 1);
  1703. }
  1704. nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
  1705. (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
  1706. & ~PFR_TFLAG_INACTIVE;
  1707. pfr_destroy_ktable(shadow, 0);
  1708. kt->pfrkt_shadow = NULL;
  1709. pfr_setflags_ktable(kt, nflags);
  1710. }
  1711. int
  1712. pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
  1713. {
  1714. int i;
  1715. if (!tbl->pfrt_name[0])
  1716. return (-1);
  1717. if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
  1718. return (-1);
  1719. if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
  1720. return (-1);
  1721. for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
  1722. if (tbl->pfrt_name[i])
  1723. return (-1);
  1724. if (pfr_fix_anchor(tbl->pfrt_anchor))
  1725. return (-1);
  1726. if (tbl->pfrt_flags & ~allowedflags)
  1727. return (-1);
  1728. return (0);
  1729. }
  1730. /*
  1731. * Rewrite anchors referenced by tables to remove slashes
  1732. * and check for validity.
  1733. */
  1734. int
  1735. pfr_fix_anchor(char *anchor)
  1736. {
  1737. size_t siz = MAXPATHLEN;
  1738. int i;
  1739. if (anchor[0] == '/') {
  1740. char *path;
  1741. int off;
  1742. path = anchor;
  1743. off = 1;
  1744. while (*++path == '/')
  1745. off++;
  1746. bcopy(path, anchor, siz - off);
  1747. memset(anchor + siz - off, 0, off);
  1748. }
  1749. if (anchor[siz - 1])
  1750. return (-1);
  1751. for (i = strlen(anchor); i < siz; i++)
  1752. if (anchor[i])
  1753. return (-1);
  1754. return (0);
  1755. }
  1756. int
  1757. pfr_table_count(struct pfr_table *filter, int flags)
  1758. {
  1759. struct pf_ruleset *rs;
  1760. if (flags & PFR_FLAG_ALLRSETS)
  1761. return (pfr_ktable_cnt);
  1762. if (filter->pfrt_anchor[0]) {
  1763. rs = pf_find_ruleset(filter->pfrt_anchor);
  1764. return ((rs != NULL) ? rs->tables : -1);
  1765. }
  1766. return (pf_main_ruleset.tables);
  1767. }
  1768. int
  1769. pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
  1770. {
  1771. if (flags & PFR_FLAG_ALLRSETS)
  1772. return (0);
  1773. if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
  1774. return (1);
  1775. return (0);
  1776. }
  1777. void
  1778. pfr_insert_ktables(struct pfr_ktableworkq *workq)
  1779. {
  1780. struct pfr_ktable *p;
  1781. SLIST_FOREACH(p, workq, pfrkt_workq)
  1782. pfr_insert_ktable(p);
  1783. }
  1784. void
  1785. pfr_insert_ktable(struct pfr_ktable *kt)
  1786. {
  1787. RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
  1788. pfr_ktable_cnt++;
  1789. if (kt->pfrkt_root != NULL)
  1790. if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
  1791. pfr_setflags_ktable(kt->pfrkt_root,
  1792. kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
  1793. }
  1794. void
  1795. pfr_setflags_ktables(struct pfr_ktableworkq *workq)
  1796. {
  1797. struct pfr_ktable *p, *q;
  1798. for (p = SLIST_FIRST(workq); p; p = q) {
  1799. q = SLIST_NEXT(p, pfrkt_workq);
  1800. pfr_setflags_ktable(p, p->pfrkt_nflags);
  1801. }
  1802. }
  1803. void
  1804. pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
  1805. {
  1806. struct pfr_kentryworkq addrq;
  1807. if (!(newf & PFR_TFLAG_REFERENCED) &&
  1808. !(newf & PFR_TFLAG_REFDANCHOR) &&
  1809. !(newf & PFR_TFLAG_PERSIST))
  1810. newf &= ~PFR_TFLAG_ACTIVE;
  1811. if (!(newf & PFR_TFLAG_ACTIVE))
  1812. newf &= ~PFR_TFLAG_USRMASK;
  1813. if (!(newf & PFR_TFLAG_SETMASK)) {
  1814. RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
  1815. if (kt->pfrkt_root != NULL)
  1816. if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
  1817. pfr_setflags_ktable(kt->pfrkt_root,
  1818. kt->pfrkt_root->pfrkt_flags &
  1819. ~PFR_TFLAG_REFDANCHOR);
  1820. pfr_destroy_ktable(kt, 1);
  1821. pfr_ktable_cnt--;
  1822. return;
  1823. }
  1824. if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
  1825. pfr_enqueue_addrs(kt, &addrq, NULL, 0);
  1826. pfr_remove_kentries(kt, &addrq);
  1827. }
  1828. if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
  1829. pfr_destroy_ktable(kt->pfrkt_shadow, 1);
  1830. kt->pfrkt_shadow = NULL;
  1831. }
  1832. kt->pfrkt_flags = newf;
  1833. }
  1834. void
  1835. pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse)
  1836. {
  1837. struct pfr_ktable *p;
  1838. SLIST_FOREACH(p, workq, pfrkt_workq)
  1839. pfr_clstats_ktable(p, tzero, recurse);
  1840. }
  1841. void
  1842. pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse)
  1843. {
  1844. struct pfr_kentryworkq addrq;
  1845. int s;
  1846. if (recurse) {
  1847. pfr_enqueue_addrs(kt, &addrq, NULL, 0);
  1848. pfr_clstats_kentries(&addrq, tzero, 0);
  1849. }
  1850. s = splsoftnet();
  1851. bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
  1852. bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
  1853. kt->pfrkt_match = kt->pfrkt_nomatch = 0;
  1854. splx(s);
  1855. kt->pfrkt_tzero = tzero;
  1856. }
  1857. struct pfr_ktable *
  1858. pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset,
  1859. int intr)
  1860. {
  1861. struct pfr_ktable *kt;
  1862. struct pf_ruleset *rs;
  1863. if (intr)
  1864. kt = pool_get(&pfr_ktable_pl, PR_NOWAIT|PR_ZERO|PR_LIMITFAIL);
  1865. else
  1866. kt = pool_get(&pfr_ktable_pl, PR_WAITOK|PR_ZERO|PR_LIMITFAIL);
  1867. if (kt == NULL)
  1868. return (NULL);
  1869. kt->pfrkt_t = *tbl;
  1870. if (attachruleset) {
  1871. rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
  1872. if (!rs) {
  1873. pfr_destroy_ktable(kt, 0);
  1874. return (NULL);
  1875. }
  1876. kt->pfrkt_rs = rs;
  1877. rs->tables++;
  1878. }
  1879. if (!rn_inithead((void **)&kt->pfrkt_ip4,
  1880. offsetof(struct sockaddr_in, sin_addr) * 8) ||
  1881. !rn_inithead((void **)&kt->pfrkt_ip6,
  1882. offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
  1883. pfr_destroy_ktable(kt, 0);
  1884. return (NULL);
  1885. }
  1886. kt->pfrkt_tzero = tzero;
  1887. kt->pfrkt_refcntcost = 0;
  1888. kt->pfrkt_gcdweight = 0;
  1889. kt->pfrkt_maxweight = 1;
  1890. return (kt);
  1891. }
  1892. void
  1893. pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
  1894. {
  1895. struct pfr_ktable *p, *q;
  1896. for (p = SLIST_FIRST(workq); p; p = q) {
  1897. q = SLIST_NEXT(p, pfrkt_workq);
  1898. pfr_destroy_ktable(p, flushaddr);
  1899. }
  1900. }
  1901. void
  1902. pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
  1903. {
  1904. struct pfr_kentryworkq addrq;
  1905. if (flushaddr) {
  1906. pfr_enqueue_addrs(kt, &addrq, NULL, 0);
  1907. pfr_clean_node_mask(kt, &addrq);
  1908. pfr_destroy_kentries(&addrq);
  1909. }
  1910. if (kt->pfrkt_ip4 != NULL)
  1911. free((caddr_t)kt->pfrkt_ip4, M_RTABLE, 0);
  1912. if (kt->pfrkt_ip6 != NULL)
  1913. free((caddr_t)kt->pfrkt_ip6, M_RTABLE, 0);
  1914. if (kt->pfrkt_shadow != NULL)
  1915. pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
  1916. if (kt->pfrkt_rs != NULL) {
  1917. kt->pfrkt_rs->tables--;
  1918. pf_remove_if_empty_ruleset(kt->pfrkt_rs);
  1919. }
  1920. pool_put(&pfr_ktable_pl, kt);
  1921. }
  1922. int
  1923. pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
  1924. {
  1925. int d;
  1926. if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
  1927. return (d);
  1928. return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
  1929. }
  1930. struct pfr_ktable *
  1931. pfr_lookup_table(struct pfr_table *tbl)
  1932. {
  1933. /* struct pfr_ktable start like a struct pfr_table */
  1934. return (RB_FIND(pfr_ktablehead, &pfr_ktables,
  1935. (struct pfr_ktable *)tbl));
  1936. }
  1937. int
  1938. pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
  1939. {
  1940. struct pfr_kentry *ke = NULL;
  1941. int match;
  1942. if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
  1943. kt = kt->pfrkt_root;
  1944. if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  1945. return (0);
  1946. switch (af) {
  1947. case AF_INET:
  1948. pfr_sin.sin_addr.s_addr = a->addr32[0];
  1949. ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
  1950. break;
  1951. #ifdef INET6
  1952. case AF_INET6:
  1953. bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
  1954. ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
  1955. break;
  1956. #endif /* INET6 */
  1957. default:
  1958. unhandled_af(af);
  1959. }
  1960. match = (ke && !(ke->pfrke_flags & PFRKE_FLAG_NOT));
  1961. if (match)
  1962. kt->pfrkt_match++;
  1963. else
  1964. kt->pfrkt_nomatch++;
  1965. return (match);
  1966. }
  1967. void
  1968. pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, struct pf_pdesc *pd,
  1969. int op, int notrule)
  1970. {
  1971. struct pfr_kentry *ke = NULL;
  1972. sa_family_t af = pd->af;
  1973. u_int64_t len = pd->tot_len;
  1974. int dir_idx = (pd->dir == PF_OUT);
  1975. int op_idx;
  1976. if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
  1977. kt = kt->pfrkt_root;
  1978. if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  1979. return;
  1980. switch (af) {
  1981. case AF_INET:
  1982. pfr_sin.sin_addr.s_addr = a->addr32[0];
  1983. ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
  1984. break;
  1985. #ifdef INET6
  1986. case AF_INET6:
  1987. bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
  1988. ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
  1989. break;
  1990. #endif /* INET6 */
  1991. default:
  1992. unhandled_af(af);
  1993. }
  1994. switch (op) {
  1995. case PF_PASS:
  1996. op_idx = PFR_OP_PASS;
  1997. break;
  1998. case PF_MATCH:
  1999. op_idx = PFR_OP_MATCH;
  2000. break;
  2001. case PF_DROP:
  2002. op_idx = PFR_OP_BLOCK;
  2003. break;
  2004. default:
  2005. panic("unhandled op");
  2006. }
  2007. if ((ke == NULL || (ke->pfrke_flags & PFRKE_FLAG_NOT)) != notrule) {
  2008. if (op_idx != PFR_OP_PASS)
  2009. DPFPRINTF(LOG_DEBUG,
  2010. "pfr_update_stats: assertion failed.");
  2011. op_idx = PFR_OP_XPASS;
  2012. }
  2013. kt->pfrkt_packets[dir_idx][op_idx]++;
  2014. kt->pfrkt_bytes[dir_idx][op_idx] += len;
  2015. if (ke != NULL && op_idx != PFR_OP_XPASS &&
  2016. (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
  2017. if (ke->pfrke_counters == NULL)
  2018. ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
  2019. PR_NOWAIT | PR_ZERO);
  2020. if (ke->pfrke_counters != NULL) {
  2021. ke->pfrke_counters->pfrkc_packets[dir_idx][op_idx]++;
  2022. ke->pfrke_counters->pfrkc_bytes[dir_idx][op_idx] += len;
  2023. }
  2024. }
  2025. }
  2026. struct pfr_ktable *
  2027. pfr_attach_table(struct pf_ruleset *rs, char *name, int intr)
  2028. {
  2029. struct pfr_ktable *kt, *rt;
  2030. struct pfr_table tbl;
  2031. struct pf_anchor *ac = rs->anchor;
  2032. bzero(&tbl, sizeof(tbl));
  2033. strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
  2034. if (ac != NULL)
  2035. strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
  2036. kt = pfr_lookup_table(&tbl);
  2037. if (kt == NULL) {
  2038. kt = pfr_create_ktable(&tbl, time_second, 1, intr);
  2039. if (kt == NULL)
  2040. return (NULL);
  2041. if (ac != NULL) {
  2042. bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
  2043. rt = pfr_lookup_table(&tbl);
  2044. if (rt == NULL) {
  2045. rt = pfr_create_ktable(&tbl, 0, 1, intr);
  2046. if (rt == NULL) {
  2047. pfr_destroy_ktable(kt, 0);
  2048. return (NULL);
  2049. }
  2050. pfr_insert_ktable(rt);
  2051. }
  2052. kt->pfrkt_root = rt;
  2053. }
  2054. pfr_insert_ktable(kt);
  2055. }
  2056. if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
  2057. pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
  2058. return (kt);
  2059. }
  2060. void
  2061. pfr_detach_table(struct pfr_ktable *kt)
  2062. {
  2063. if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
  2064. DPFPRINTF(LOG_NOTICE, "pfr_detach_table: refcount = %d.",
  2065. kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
  2066. else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
  2067. pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
  2068. }
  2069. int
  2070. pfr_islinklocal(sa_family_t af, struct pf_addr *addr)
  2071. {
  2072. #ifdef INET6
  2073. if (af == AF_INET6 && IN6_IS_ADDR_LINKLOCAL(&addr->v6))
  2074. return (1);
  2075. #endif /* INET6 */
  2076. return (0);
  2077. }
  2078. int
  2079. pfr_pool_get(struct pf_pool *rpool, struct pf_addr **raddr,
  2080. struct pf_addr **rmask, sa_family_t af)
  2081. {
  2082. struct pfr_ktable *kt;
  2083. struct pfr_kentry *ke, *ke2;
  2084. struct pf_addr *addr, *counter;
  2085. union sockaddr_union mask;
  2086. int startidx, idx = -1, loop = 0, use_counter = 0;
  2087. switch (af) {
  2088. case AF_INET:
  2089. addr = (struct pf_addr *)&pfr_sin.sin_addr;
  2090. break;
  2091. #ifdef INET6
  2092. case AF_INET6:
  2093. addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
  2094. break;
  2095. #endif /* INET6 */
  2096. default:
  2097. unhandled_af(af);
  2098. }
  2099. if (rpool->addr.type == PF_ADDR_TABLE)
  2100. kt = rpool->addr.p.tbl;
  2101. else if (rpool->addr.type == PF_ADDR_DYNIFTL)
  2102. kt = rpool->addr.p.dyn->pfid_kt;
  2103. else
  2104. return (-1);
  2105. if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
  2106. kt = kt->pfrkt_root;
  2107. if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
  2108. return (-1);
  2109. counter = &rpool->counter;
  2110. idx = rpool->tblidx;
  2111. if (idx < 0 || idx >= kt->pfrkt_cnt)
  2112. idx = 0;
  2113. else
  2114. use_counter = 1;
  2115. startidx = idx;
  2116. _next_block:
  2117. if (loop && startidx == idx) {
  2118. kt->pfrkt_nomatch++;
  2119. return (1);
  2120. }
  2121. ke = pfr_kentry_byidx(kt, idx, af);
  2122. if (ke == NULL) {
  2123. /* we don't have this idx, try looping */
  2124. if (loop || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
  2125. kt->pfrkt_nomatch++;
  2126. return (1);
  2127. }
  2128. idx = 0;
  2129. loop++;
  2130. }
  2131. /* Get current weight for weighted round-robin */
  2132. if (idx == 0 && use_counter == 1 && kt->pfrkt_refcntcost > 0) {
  2133. rpool->curweight = rpool->curweight - kt->pfrkt_gcdweight;
  2134. if (rpool->curweight < 1)
  2135. rpool->curweight = kt->pfrkt_maxweight;
  2136. }
  2137. pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
  2138. *raddr = SUNION2PF(&ke->pfrke_sa, af);
  2139. *rmask = SUNION2PF(&pfr_mask, af);
  2140. if (use_counter && !PF_AZERO(counter, af)) {
  2141. /* is supplied address within block? */
  2142. if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
  2143. /* no, go to next block in table */
  2144. idx++;
  2145. use_counter = 0;
  2146. goto _next_block;
  2147. }
  2148. PF_ACPY(addr, counter, af);
  2149. } else {
  2150. /* use first address of block */
  2151. PF_ACPY(addr, *raddr, af);
  2152. }
  2153. if (!KENTRY_NETWORK(ke)) {
  2154. /* this is a single IP address - no possible nested block */
  2155. if (rpool->addr.type == PF_ADDR_DYNIFTL &&
  2156. pfr_islinklocal(af, addr)) {
  2157. idx++;
  2158. goto _next_block;
  2159. }
  2160. PF_ACPY(counter, addr, af);
  2161. rpool->tblidx = idx;
  2162. kt->pfrkt_match++;
  2163. rpool->states = 0;
  2164. if (ke->pfrke_counters != NULL)
  2165. rpool->states = ke->pfrke_counters->states;
  2166. switch (ke->pfrke_type) {
  2167. case PFRKE_COST:
  2168. rpool->weight = ((struct pfr_kentry_cost *)ke)->weight;
  2169. /* FALLTHROUGH */
  2170. case PFRKE_ROUTE:
  2171. rpool->kif = ((struct pfr_kentry_route *)ke)->kif;
  2172. break;
  2173. default:
  2174. rpool->weight = 1;
  2175. break;
  2176. }
  2177. return (0);
  2178. }
  2179. for (;;) {
  2180. /* we don't want to use a nested block */
  2181. switch (af) {
  2182. case AF_INET:
  2183. ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
  2184. kt->pfrkt_ip4);
  2185. break;
  2186. #ifdef INET6
  2187. case AF_INET6:
  2188. ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
  2189. kt->pfrkt_ip6);
  2190. break;
  2191. #endif /* INET6 */
  2192. default:
  2193. unhandled_af(af);
  2194. }
  2195. if (ke2 == ke) {
  2196. /* lookup return the same block - perfect */
  2197. if (rpool->addr.type == PF_ADDR_DYNIFTL &&
  2198. pfr_islinklocal(af, addr))
  2199. goto _next_entry;
  2200. PF_ACPY(counter, addr, af);
  2201. rpool->tblidx = idx;
  2202. kt->pfrkt_match++;
  2203. rpool->states = 0;
  2204. if (ke->pfrke_counters != NULL)
  2205. rpool->states = ke->pfrke_counters->states;
  2206. switch (ke->pfrke_type) {
  2207. case PFRKE_COST:
  2208. rpool->weight =
  2209. ((struct pfr_kentry_cost *)ke)->weight;
  2210. /* FALLTHROUGH */
  2211. case PFRKE_ROUTE:
  2212. rpool->kif = ((struct pfr_kentry_route *)ke)->kif;
  2213. break;
  2214. default:
  2215. rpool->weight = 1;
  2216. break;
  2217. }
  2218. return (0);
  2219. }
  2220. _next_entry:
  2221. /* we need to increase the counter past the nested block */
  2222. pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
  2223. PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
  2224. PF_AINC(addr, af);
  2225. if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
  2226. /* ok, we reached the end of our main block */
  2227. /* go to next block in table */
  2228. idx++;
  2229. use_counter = 0;
  2230. goto _next_block;
  2231. }
  2232. }
  2233. }
  2234. struct pfr_kentry *
  2235. pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
  2236. {
  2237. struct pfr_walktree w;
  2238. bzero(&w, sizeof(w));
  2239. w.pfrw_op = PFRW_POOL_GET;
  2240. w.pfrw_cnt = idx;
  2241. switch (af) {
  2242. case AF_INET:
  2243. rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
  2244. return (w.pfrw_kentry);
  2245. #ifdef INET6
  2246. case AF_INET6:
  2247. rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
  2248. return (w.pfrw_kentry);
  2249. #endif /* INET6 */
  2250. default:
  2251. return (NULL);
  2252. }
  2253. }
  2254. /* Added for load balancing state counter use. */
  2255. int
  2256. pfr_states_increase(struct pfr_ktable *kt, struct pf_addr *addr, int af)
  2257. {
  2258. struct pfr_kentry *ke;
  2259. ke = pfr_kentry_byaddr(kt, addr, af, 1);
  2260. if (ke == NULL)
  2261. return (-1);
  2262. if (ke->pfrke_counters == NULL)
  2263. ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
  2264. PR_NOWAIT | PR_ZERO);
  2265. if (ke->pfrke_counters == NULL)
  2266. return (-1);
  2267. ke->pfrke_counters->states++;
  2268. return ke->pfrke_counters->states;
  2269. }
  2270. /* Added for load balancing state counter use. */
  2271. int
  2272. pfr_states_decrease(struct pfr_ktable *kt, struct pf_addr *addr, int af)
  2273. {
  2274. struct pfr_kentry *ke;
  2275. ke = pfr_kentry_byaddr(kt, addr, af, 1);
  2276. if (ke == NULL)
  2277. return (-1);
  2278. if (ke->pfrke_counters == NULL)
  2279. ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
  2280. PR_NOWAIT | PR_ZERO);
  2281. if (ke->pfrke_counters == NULL)
  2282. return (-1);
  2283. if (ke->pfrke_counters->states > 0)
  2284. ke->pfrke_counters->states--;
  2285. else
  2286. DPFPRINTF(LOG_DEBUG,
  2287. "pfr_states_decrease: states-- when states <= 0");
  2288. return ke->pfrke_counters->states;
  2289. }
  2290. /*
  2291. * Added for load balancing to find a kentry outside of the table.
  2292. * We need to create a custom pfr_addr struct.
  2293. */
  2294. struct pfr_kentry *
  2295. pfr_kentry_byaddr(struct pfr_ktable *kt, struct pf_addr *addr, sa_family_t af,
  2296. int exact)
  2297. {
  2298. struct pfr_kentry *ke;
  2299. struct pfr_addr p;
  2300. bzero(&p, sizeof(p));
  2301. p.pfra_af = af;
  2302. switch (af) {
  2303. case AF_INET:
  2304. p.pfra_net = 32;
  2305. p.pfra_ip4addr = addr->v4;
  2306. break;
  2307. #ifdef INET6
  2308. case AF_INET6:
  2309. p.pfra_net = 128;
  2310. p.pfra_ip6addr = addr->v6;
  2311. break;
  2312. #endif /* INET6 */
  2313. default:
  2314. unhandled_af(af);
  2315. }
  2316. ke = pfr_lookup_addr(kt, &p, exact);
  2317. return ke;
  2318. }
  2319. void
  2320. pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
  2321. {
  2322. struct pfr_walktree w;
  2323. int s;
  2324. bzero(&w, sizeof(w));
  2325. w.pfrw_op = PFRW_DYNADDR_UPDATE;
  2326. w.pfrw_dyn = dyn;
  2327. s = splsoftnet();
  2328. dyn->pfid_acnt4 = 0;
  2329. dyn->pfid_acnt6 = 0;
  2330. switch (dyn->pfid_af) {
  2331. case AF_UNSPEC: /* look up all both addresses IPv4 + IPv6 */
  2332. rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
  2333. rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
  2334. break;
  2335. case AF_INET:
  2336. rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
  2337. break;
  2338. #ifdef INET6
  2339. case AF_INET6:
  2340. rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
  2341. break;
  2342. #endif /* INET6 */
  2343. default:
  2344. unhandled_af(dyn->pfid_af);
  2345. }
  2346. splx(s);
  2347. }
  2348. void
  2349. pfr_ktable_winfo_update(struct pfr_ktable *kt, struct pfr_kentry *p) {
  2350. /*
  2351. * If cost flag is set,
  2352. * gcdweight is needed for round-robin.
  2353. */
  2354. if (kt->pfrkt_refcntcost > 0) {
  2355. u_int16_t weight;
  2356. weight = (p->pfrke_type == PFRKE_COST) ?
  2357. ((struct pfr_kentry_cost *)p)->weight : 1;
  2358. if (kt->pfrkt_gcdweight == 0)
  2359. kt->pfrkt_gcdweight = weight;
  2360. kt->pfrkt_gcdweight =
  2361. pfr_gcd(weight, kt->pfrkt_gcdweight);
  2362. if (kt->pfrkt_maxweight < weight)
  2363. kt->pfrkt_maxweight = weight;
  2364. }
  2365. }