pf_ioctl.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465
  1. /* $OpenBSD: pf_ioctl.c,v 1.289 2015/07/21 02:32:04 sashan Exp $ */
  2. /*
  3. * Copyright (c) 2001 Daniel Hartmeier
  4. * Copyright (c) 2002 - 2013 Henning Brauer <henning@openbsd.org>
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. *
  11. * - Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * - Redistributions in binary form must reproduce the above
  14. * copyright notice, this list of conditions and the following
  15. * disclaimer in the documentation and/or other materials provided
  16. * with the distribution.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  21. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  22. * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  23. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  24. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  25. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  26. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  27. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  28. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  29. * POSSIBILITY OF SUCH DAMAGE.
  30. *
  31. * Effort sponsored in part by the Defense Advanced Research Projects
  32. * Agency (DARPA) and Air Force Research Laboratory, Air Force
  33. * Materiel Command, USAF, under agreement number F30602-01-2-0537.
  34. *
  35. */
  36. #include "pfsync.h"
  37. #include "pflog.h"
  38. #include <sys/param.h>
  39. #include <sys/systm.h>
  40. #include <sys/mbuf.h>
  41. #include <sys/filio.h>
  42. #include <sys/fcntl.h>
  43. #include <sys/socket.h>
  44. #include <sys/socketvar.h>
  45. #include <sys/kernel.h>
  46. #include <sys/time.h>
  47. #include <sys/timeout.h>
  48. #include <sys/pool.h>
  49. #include <sys/malloc.h>
  50. #include <sys/kthread.h>
  51. #include <sys/rwlock.h>
  52. #include <sys/syslog.h>
  53. #include <uvm/uvm_extern.h>
  54. #include <net/if.h>
  55. #include <net/if_var.h>
  56. #include <net/if_types.h>
  57. #include <net/route.h>
  58. #include <netinet/in.h>
  59. #include <netinet/ip.h>
  60. #include <netinet/ip_var.h>
  61. #include <netinet/ip_icmp.h>
  62. #include <crypto/md5.h>
  63. #include <net/pfvar.h>
  64. #if NPFSYNC > 0
  65. #include <netinet/ip_ipsp.h>
  66. #include <net/if_pfsync.h>
  67. #endif /* NPFSYNC > 0 */
  68. #ifdef INET6
  69. #include <netinet/ip6.h>
  70. #include <netinet/in_pcb.h>
  71. #endif /* INET6 */
  72. void pfattach(int);
  73. void pf_thread_create(void *);
  74. int pfopen(dev_t, int, int, struct proc *);
  75. int pfclose(dev_t, int, int, struct proc *);
  76. int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
  77. int pf_begin_rules(u_int32_t *, const char *);
  78. int pf_rollback_rules(u_int32_t, char *);
  79. int pf_create_queues(void);
  80. int pf_commit_queues(void);
  81. int pf_setup_pfsync_matching(struct pf_ruleset *);
  82. void pf_hash_rule(MD5_CTX *, struct pf_rule *);
  83. void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
  84. int pf_commit_rules(u_int32_t, char *);
  85. int pf_addr_setup(struct pf_ruleset *,
  86. struct pf_addr_wrap *, sa_family_t);
  87. int pf_kif_setup(char *, struct pfi_kif **);
  88. void pf_addr_copyout(struct pf_addr_wrap *);
  89. void pf_trans_set_commit(void);
  90. void pf_pool_copyin(struct pf_pool *, struct pf_pool *);
  91. int pf_rule_copyin(struct pf_rule *, struct pf_rule *,
  92. struct pf_ruleset *);
  93. u_int16_t pf_qname2qid(char *, int);
  94. void pf_qid2qname(u_int16_t, char *);
  95. void pf_qid_unref(u_int16_t);
  96. struct pf_rule pf_default_rule, pf_default_rule_new;
  97. struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
  98. struct {
  99. char statusif[IFNAMSIZ];
  100. u_int32_t debug;
  101. u_int32_t hostid;
  102. u_int32_t reass;
  103. u_int32_t mask;
  104. } pf_trans_set;
  105. #define PF_TSET_STATUSIF 0x01
  106. #define PF_TSET_DEBUG 0x02
  107. #define PF_TSET_HOSTID 0x04
  108. #define PF_TSET_REASS 0x08
  109. #define TAGID_MAX 50000
  110. TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
  111. pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
  112. #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
  113. #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
  114. #endif
  115. u_int16_t tagname2tag(struct pf_tags *, char *, int);
  116. void tag2tagname(struct pf_tags *, u_int16_t, char *);
  117. void tag_unref(struct pf_tags *, u_int16_t);
  118. int pf_rtlabel_add(struct pf_addr_wrap *);
  119. void pf_rtlabel_remove(struct pf_addr_wrap *);
  120. void pf_rtlabel_copyout(struct pf_addr_wrap *);
  121. void
  122. pfattach(int num)
  123. {
  124. u_int32_t *timeout = pf_default_rule.timeout;
  125. pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrule",
  126. &pool_allocator_nointr);
  127. pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
  128. "pfsrctr", NULL);
  129. pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 0, 0,
  130. "pfsnitem", NULL);
  131. pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstate",
  132. NULL);
  133. pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
  134. "pfstkey", NULL);
  135. pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0,
  136. "pfstitem", NULL);
  137. pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 0, 0,
  138. "pfruleitem", NULL);
  139. pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0, 0, 0,
  140. "pfqueue", NULL);
  141. hfsc_initialize();
  142. pfr_initialize();
  143. pfi_initialize();
  144. pf_osfp_initialize();
  145. pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
  146. pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
  147. if (physmem <= atop(100*1024*1024))
  148. pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
  149. PFR_KENTRY_HIWAT_SMALL;
  150. RB_INIT(&tree_src_tracking);
  151. RB_INIT(&pf_anchors);
  152. pf_init_ruleset(&pf_main_ruleset);
  153. TAILQ_INIT(&pf_queues[0]);
  154. TAILQ_INIT(&pf_queues[1]);
  155. pf_queues_active = &pf_queues[0];
  156. pf_queues_inactive = &pf_queues[1];
  157. TAILQ_INIT(&state_list);
  158. /* default rule should never be garbage collected */
  159. pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
  160. pf_default_rule.action = PF_PASS;
  161. pf_default_rule.nr = (u_int32_t)-1;
  162. pf_default_rule.rtableid = -1;
  163. /* initialize default timeouts */
  164. timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
  165. timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
  166. timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
  167. timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
  168. timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
  169. timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
  170. timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
  171. timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
  172. timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
  173. timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
  174. timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
  175. timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
  176. timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
  177. timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
  178. timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
  179. timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
  180. timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
  181. timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
  182. timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
  183. timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
  184. pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK;
  185. pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK;
  186. pf_default_rule.rdr.addr.type = PF_ADDR_NONE;
  187. pf_default_rule.nat.addr.type = PF_ADDR_NONE;
  188. pf_default_rule.route.addr.type = PF_ADDR_NONE;
  189. pf_normalize_init();
  190. bzero(&pf_status, sizeof(pf_status));
  191. pf_status.debug = LOG_ERR;
  192. pf_status.reass = PF_REASS_ENABLED;
  193. /* XXX do our best to avoid a conflict */
  194. pf_status.hostid = arc4random();
  195. /* require process context to purge states, so perform in a thread */
  196. kthread_create_deferred(pf_thread_create, NULL);
  197. }
  198. void
  199. pf_thread_create(void *v)
  200. {
  201. if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
  202. panic("pfpurge thread");
  203. }
  204. int
  205. pfopen(dev_t dev, int flags, int fmt, struct proc *p)
  206. {
  207. if (minor(dev) >= 1)
  208. return (ENXIO);
  209. return (0);
  210. }
  211. int
  212. pfclose(dev_t dev, int flags, int fmt, struct proc *p)
  213. {
  214. if (minor(dev) >= 1)
  215. return (ENXIO);
  216. return (0);
  217. }
  218. void
  219. pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
  220. {
  221. if (rulequeue != NULL) {
  222. if (rule->states_cur == 0 && rule->src_nodes == 0) {
  223. /*
  224. * XXX - we need to remove the table *before* detaching
  225. * the rule to make sure the table code does not delete
  226. * the anchor under our feet.
  227. */
  228. pf_tbladdr_remove(&rule->src.addr);
  229. pf_tbladdr_remove(&rule->dst.addr);
  230. pf_tbladdr_remove(&rule->rdr.addr);
  231. pf_tbladdr_remove(&rule->nat.addr);
  232. pf_tbladdr_remove(&rule->route.addr);
  233. if (rule->overload_tbl)
  234. pfr_detach_table(rule->overload_tbl);
  235. }
  236. TAILQ_REMOVE(rulequeue, rule, entries);
  237. rule->entries.tqe_prev = NULL;
  238. rule->nr = (u_int32_t)-1;
  239. }
  240. if (rule->states_cur > 0 || rule->src_nodes > 0 ||
  241. rule->entries.tqe_prev != NULL)
  242. return;
  243. pf_tag_unref(rule->tag);
  244. pf_tag_unref(rule->match_tag);
  245. pf_rtlabel_remove(&rule->src.addr);
  246. pf_rtlabel_remove(&rule->dst.addr);
  247. pfi_dynaddr_remove(&rule->src.addr);
  248. pfi_dynaddr_remove(&rule->dst.addr);
  249. pfi_dynaddr_remove(&rule->rdr.addr);
  250. pfi_dynaddr_remove(&rule->nat.addr);
  251. pfi_dynaddr_remove(&rule->route.addr);
  252. if (rulequeue == NULL) {
  253. pf_tbladdr_remove(&rule->src.addr);
  254. pf_tbladdr_remove(&rule->dst.addr);
  255. pf_tbladdr_remove(&rule->rdr.addr);
  256. pf_tbladdr_remove(&rule->nat.addr);
  257. pf_tbladdr_remove(&rule->route.addr);
  258. if (rule->overload_tbl)
  259. pfr_detach_table(rule->overload_tbl);
  260. }
  261. pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
  262. pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
  263. pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
  264. pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
  265. pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
  266. pf_anchor_remove(rule);
  267. pool_put(&pf_rule_pl, rule);
  268. }
  269. void
  270. pf_purge_rule(struct pf_ruleset *ruleset, struct pf_rule *rule,
  271. struct pf_ruleset *aruleset, struct pf_rule *arule)
  272. {
  273. u_int32_t nr = 0;
  274. KASSERT(ruleset != NULL && rule != NULL);
  275. pf_rm_rule(ruleset->rules.active.ptr, rule);
  276. ruleset->rules.active.rcount--;
  277. TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)
  278. rule->nr = nr++;
  279. ruleset->rules.active.ticket++;
  280. pf_calc_skip_steps(ruleset->rules.active.ptr);
  281. /* remove the parent anchor rule */
  282. if (nr == 0 && arule && aruleset) {
  283. pf_rm_rule(aruleset->rules.active.ptr, arule);
  284. aruleset->rules.active.rcount--;
  285. TAILQ_FOREACH(rule, aruleset->rules.active.ptr, entries)
  286. rule->nr = nr++;
  287. aruleset->rules.active.ticket++;
  288. pf_calc_skip_steps(aruleset->rules.active.ptr);
  289. }
  290. }
  291. u_int16_t
  292. tagname2tag(struct pf_tags *head, char *tagname, int create)
  293. {
  294. struct pf_tagname *tag, *p = NULL;
  295. u_int16_t new_tagid = 1;
  296. TAILQ_FOREACH(tag, head, entries)
  297. if (strcmp(tagname, tag->name) == 0) {
  298. tag->ref++;
  299. return (tag->tag);
  300. }
  301. if (!create)
  302. return (0);
  303. /*
  304. * to avoid fragmentation, we do a linear search from the beginning
  305. * and take the first free slot we find. if there is none or the list
  306. * is empty, append a new entry at the end.
  307. */
  308. /* new entry */
  309. if (!TAILQ_EMPTY(head))
  310. for (p = TAILQ_FIRST(head); p != NULL &&
  311. p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
  312. new_tagid = p->tag + 1;
  313. if (new_tagid > TAGID_MAX)
  314. return (0);
  315. /* allocate and fill new struct pf_tagname */
  316. tag = malloc(sizeof(*tag), M_TEMP, M_NOWAIT|M_ZERO);
  317. if (tag == NULL)
  318. return (0);
  319. strlcpy(tag->name, tagname, sizeof(tag->name));
  320. tag->tag = new_tagid;
  321. tag->ref++;
  322. if (p != NULL) /* insert new entry before p */
  323. TAILQ_INSERT_BEFORE(p, tag, entries);
  324. else /* either list empty or no free slot in between */
  325. TAILQ_INSERT_TAIL(head, tag, entries);
  326. return (tag->tag);
  327. }
  328. void
  329. tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
  330. {
  331. struct pf_tagname *tag;
  332. TAILQ_FOREACH(tag, head, entries)
  333. if (tag->tag == tagid) {
  334. strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
  335. return;
  336. }
  337. }
  338. void
  339. tag_unref(struct pf_tags *head, u_int16_t tag)
  340. {
  341. struct pf_tagname *p, *next;
  342. if (tag == 0)
  343. return;
  344. for (p = TAILQ_FIRST(head); p != NULL; p = next) {
  345. next = TAILQ_NEXT(p, entries);
  346. if (tag == p->tag) {
  347. if (--p->ref == 0) {
  348. TAILQ_REMOVE(head, p, entries);
  349. free(p, M_TEMP, 0);
  350. }
  351. break;
  352. }
  353. }
  354. }
  355. u_int16_t
  356. pf_tagname2tag(char *tagname, int create)
  357. {
  358. return (tagname2tag(&pf_tags, tagname, create));
  359. }
  360. void
  361. pf_tag2tagname(u_int16_t tagid, char *p)
  362. {
  363. tag2tagname(&pf_tags, tagid, p);
  364. }
  365. void
  366. pf_tag_ref(u_int16_t tag)
  367. {
  368. struct pf_tagname *t;
  369. TAILQ_FOREACH(t, &pf_tags, entries)
  370. if (t->tag == tag)
  371. break;
  372. if (t != NULL)
  373. t->ref++;
  374. }
  375. void
  376. pf_tag_unref(u_int16_t tag)
  377. {
  378. tag_unref(&pf_tags, tag);
  379. }
  380. int
  381. pf_rtlabel_add(struct pf_addr_wrap *a)
  382. {
  383. if (a->type == PF_ADDR_RTLABEL &&
  384. (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
  385. return (-1);
  386. return (0);
  387. }
  388. void
  389. pf_rtlabel_remove(struct pf_addr_wrap *a)
  390. {
  391. if (a->type == PF_ADDR_RTLABEL)
  392. rtlabel_unref(a->v.rtlabel);
  393. }
  394. void
  395. pf_rtlabel_copyout(struct pf_addr_wrap *a)
  396. {
  397. const char *name;
  398. if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
  399. if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
  400. strlcpy(a->v.rtlabelname, "?",
  401. sizeof(a->v.rtlabelname));
  402. else
  403. strlcpy(a->v.rtlabelname, name,
  404. sizeof(a->v.rtlabelname));
  405. }
  406. }
  407. u_int16_t
  408. pf_qname2qid(char *qname, int create)
  409. {
  410. return (tagname2tag(&pf_qids, qname, create));
  411. }
  412. void
  413. pf_qid2qname(u_int16_t qid, char *p)
  414. {
  415. tag2tagname(&pf_qids, qid, p);
  416. }
  417. void
  418. pf_qid_unref(u_int16_t qid)
  419. {
  420. tag_unref(&pf_qids, (u_int16_t)qid);
  421. }
  422. int
  423. pf_begin_rules(u_int32_t *ticket, const char *anchor)
  424. {
  425. struct pf_ruleset *rs;
  426. struct pf_rule *rule;
  427. if ((rs = pf_find_or_create_ruleset(anchor)) == NULL)
  428. return (EINVAL);
  429. while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
  430. pf_rm_rule(rs->rules.inactive.ptr, rule);
  431. rs->rules.inactive.rcount--;
  432. }
  433. *ticket = ++rs->rules.inactive.ticket;
  434. rs->rules.inactive.open = 1;
  435. return (0);
  436. }
  437. int
  438. pf_rollback_rules(u_int32_t ticket, char *anchor)
  439. {
  440. struct pf_ruleset *rs;
  441. struct pf_rule *rule;
  442. rs = pf_find_ruleset(anchor);
  443. if (rs == NULL || !rs->rules.inactive.open ||
  444. rs->rules.inactive.ticket != ticket)
  445. return (0);
  446. while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) {
  447. pf_rm_rule(rs->rules.inactive.ptr, rule);
  448. rs->rules.inactive.rcount--;
  449. }
  450. rs->rules.inactive.open = 0;
  451. /* queue defs only in the main ruleset */
  452. if (anchor[0])
  453. return (0);
  454. return (pf_free_queues(pf_queues_inactive, NULL));
  455. }
  456. int
  457. pf_free_queues(struct pf_queuehead *where, struct ifnet *ifp)
  458. {
  459. struct pf_queuespec *q, *qtmp;
  460. TAILQ_FOREACH_SAFE(q, where, entries, qtmp) {
  461. if (ifp && q->kif->pfik_ifp != ifp)
  462. continue;
  463. TAILQ_REMOVE(where, q, entries);
  464. pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
  465. pool_put(&pf_queue_pl, q);
  466. }
  467. return (0);
  468. }
  469. int
  470. pf_remove_queues(struct ifnet *ifp)
  471. {
  472. struct pf_queuespec *q;
  473. int error = 0;
  474. /* remove queues */
  475. TAILQ_FOREACH_REVERSE(q, pf_queues_active, pf_queuehead, entries) {
  476. if (ifp && q->kif->pfik_ifp != ifp)
  477. continue;
  478. if ((error = hfsc_delqueue(q)) != 0)
  479. return (error);
  480. }
  481. /* put back interfaces in normal queueing mode */
  482. TAILQ_FOREACH(q, pf_queues_active, entries) {
  483. if (ifp && q->kif->pfik_ifp != ifp)
  484. continue;
  485. if (q->parent_qid == 0)
  486. if ((error = hfsc_detach(q->kif->pfik_ifp)) != 0)
  487. return (error);
  488. }
  489. return (0);
  490. }
  491. int
  492. pf_create_queues(void)
  493. {
  494. struct pf_queuespec *q;
  495. int error = 0;
  496. /* find root queues and attach hfsc to these interfaces */
  497. TAILQ_FOREACH(q, pf_queues_active, entries)
  498. if (q->parent_qid == 0)
  499. if ((error = hfsc_attach(q->kif->pfik_ifp)) != 0)
  500. return (error);
  501. /* and now everything */
  502. TAILQ_FOREACH(q, pf_queues_active, entries)
  503. if ((error = hfsc_addqueue(q)) != 0)
  504. return (error);
  505. return (0);
  506. }
  507. int
  508. pf_commit_queues(void)
  509. {
  510. struct pf_queuehead *qswap;
  511. int error;
  512. if ((error = pf_remove_queues(NULL)) != 0)
  513. return (error);
  514. /* swap */
  515. qswap = pf_queues_active;
  516. pf_queues_active = pf_queues_inactive;
  517. pf_queues_inactive = qswap;
  518. pf_free_queues(pf_queues_inactive, NULL);
  519. return (pf_create_queues());
  520. }
  521. #define PF_MD5_UPD(st, elm) \
  522. MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
  523. #define PF_MD5_UPD_STR(st, elm) \
  524. MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
  525. #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
  526. (stor) = htonl((st)->elm); \
  527. MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
  528. } while (0)
  529. #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
  530. (stor) = htons((st)->elm); \
  531. MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
  532. } while (0)
  533. void
  534. pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
  535. {
  536. PF_MD5_UPD(pfr, addr.type);
  537. switch (pfr->addr.type) {
  538. case PF_ADDR_DYNIFTL:
  539. PF_MD5_UPD(pfr, addr.v.ifname);
  540. PF_MD5_UPD(pfr, addr.iflags);
  541. break;
  542. case PF_ADDR_TABLE:
  543. PF_MD5_UPD(pfr, addr.v.tblname);
  544. break;
  545. case PF_ADDR_ADDRMASK:
  546. /* XXX ignore af? */
  547. PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
  548. PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
  549. break;
  550. case PF_ADDR_RTLABEL:
  551. PF_MD5_UPD(pfr, addr.v.rtlabelname);
  552. break;
  553. }
  554. PF_MD5_UPD(pfr, port[0]);
  555. PF_MD5_UPD(pfr, port[1]);
  556. PF_MD5_UPD(pfr, neg);
  557. PF_MD5_UPD(pfr, port_op);
  558. }
  559. void
  560. pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
  561. {
  562. u_int16_t x;
  563. u_int32_t y;
  564. pf_hash_rule_addr(ctx, &rule->src);
  565. pf_hash_rule_addr(ctx, &rule->dst);
  566. PF_MD5_UPD_STR(rule, label);
  567. PF_MD5_UPD_STR(rule, ifname);
  568. PF_MD5_UPD_STR(rule, rcv_ifname);
  569. PF_MD5_UPD_STR(rule, match_tagname);
  570. PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
  571. PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
  572. PF_MD5_UPD_HTONL(rule, prob, y);
  573. PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
  574. PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
  575. PF_MD5_UPD(rule, uid.op);
  576. PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
  577. PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
  578. PF_MD5_UPD(rule, gid.op);
  579. PF_MD5_UPD_HTONL(rule, rule_flag, y);
  580. PF_MD5_UPD(rule, action);
  581. PF_MD5_UPD(rule, direction);
  582. PF_MD5_UPD(rule, af);
  583. PF_MD5_UPD(rule, quick);
  584. PF_MD5_UPD(rule, ifnot);
  585. PF_MD5_UPD(rule, rcvifnot);
  586. PF_MD5_UPD(rule, match_tag_not);
  587. PF_MD5_UPD(rule, keep_state);
  588. PF_MD5_UPD(rule, proto);
  589. PF_MD5_UPD(rule, type);
  590. PF_MD5_UPD(rule, code);
  591. PF_MD5_UPD(rule, flags);
  592. PF_MD5_UPD(rule, flagset);
  593. PF_MD5_UPD(rule, allow_opts);
  594. PF_MD5_UPD(rule, rt);
  595. PF_MD5_UPD(rule, tos);
  596. }
  597. int
  598. pf_commit_rules(u_int32_t ticket, char *anchor)
  599. {
  600. struct pf_ruleset *rs;
  601. struct pf_rule *rule, **old_array;
  602. struct pf_rulequeue *old_rules;
  603. int s, error;
  604. u_int32_t old_rcount;
  605. rs = pf_find_ruleset(anchor);
  606. if (rs == NULL || !rs->rules.inactive.open ||
  607. ticket != rs->rules.inactive.ticket)
  608. return (EBUSY);
  609. /* Calculate checksum for the main ruleset */
  610. if (rs == &pf_main_ruleset) {
  611. error = pf_setup_pfsync_matching(rs);
  612. if (error != 0)
  613. return (error);
  614. }
  615. /* Swap rules, keep the old. */
  616. s = splsoftnet();
  617. old_rules = rs->rules.active.ptr;
  618. old_rcount = rs->rules.active.rcount;
  619. old_array = rs->rules.active.ptr_array;
  620. rs->rules.active.ptr = rs->rules.inactive.ptr;
  621. rs->rules.active.ptr_array = rs->rules.inactive.ptr_array;
  622. rs->rules.active.rcount = rs->rules.inactive.rcount;
  623. rs->rules.inactive.ptr = old_rules;
  624. rs->rules.inactive.ptr_array = old_array;
  625. rs->rules.inactive.rcount = old_rcount;
  626. rs->rules.active.ticket = rs->rules.inactive.ticket;
  627. pf_calc_skip_steps(rs->rules.active.ptr);
  628. /* Purge the old rule list. */
  629. while ((rule = TAILQ_FIRST(old_rules)) != NULL)
  630. pf_rm_rule(old_rules, rule);
  631. if (rs->rules.inactive.ptr_array)
  632. free(rs->rules.inactive.ptr_array, M_TEMP, 0);
  633. rs->rules.inactive.ptr_array = NULL;
  634. rs->rules.inactive.rcount = 0;
  635. rs->rules.inactive.open = 0;
  636. pf_remove_if_empty_ruleset(rs);
  637. splx(s);
  638. /* queue defs only in the main ruleset */
  639. if (anchor[0])
  640. return (0);
  641. return (pf_commit_queues());
  642. }
  643. int
  644. pf_setup_pfsync_matching(struct pf_ruleset *rs)
  645. {
  646. MD5_CTX ctx;
  647. struct pf_rule *rule;
  648. u_int8_t digest[PF_MD5_DIGEST_LENGTH];
  649. MD5Init(&ctx);
  650. if (rs->rules.inactive.ptr_array)
  651. free(rs->rules.inactive.ptr_array, M_TEMP, 0);
  652. rs->rules.inactive.ptr_array = NULL;
  653. if (rs->rules.inactive.rcount) {
  654. rs->rules.inactive.ptr_array =
  655. mallocarray(rs->rules.inactive.rcount, sizeof(caddr_t),
  656. M_TEMP, M_NOWAIT);
  657. if (!rs->rules.inactive.ptr_array)
  658. return (ENOMEM);
  659. TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) {
  660. pf_hash_rule(&ctx, rule);
  661. (rs->rules.inactive.ptr_array)[rule->nr] = rule;
  662. }
  663. }
  664. MD5Final(digest, &ctx);
  665. memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
  666. return (0);
  667. }
  668. int
  669. pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
  670. sa_family_t af)
  671. {
  672. if (pfi_dynaddr_setup(addr, af) ||
  673. pf_tbladdr_setup(ruleset, addr) ||
  674. pf_rtlabel_add(addr))
  675. return (EINVAL);
  676. return (0);
  677. }
  678. int
  679. pf_kif_setup(char *ifname, struct pfi_kif **kif)
  680. {
  681. if (ifname[0]) {
  682. *kif = pfi_kif_get(ifname);
  683. if (*kif == NULL)
  684. return (EINVAL);
  685. pfi_kif_ref(*kif, PFI_KIF_REF_RULE);
  686. } else
  687. *kif = NULL;
  688. return (0);
  689. }
  690. void
  691. pf_addr_copyout(struct pf_addr_wrap *addr)
  692. {
  693. pfi_dynaddr_copyout(addr);
  694. pf_tbladdr_copyout(addr);
  695. pf_rtlabel_copyout(addr);
  696. }
  697. int
  698. pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
  699. {
  700. int s;
  701. int error = 0;
  702. /* XXX keep in sync with switch() below */
  703. if (securelevel > 1)
  704. switch (cmd) {
  705. case DIOCGETRULES:
  706. case DIOCGETRULE:
  707. case DIOCGETSTATE:
  708. case DIOCSETSTATUSIF:
  709. case DIOCGETSTATUS:
  710. case DIOCCLRSTATUS:
  711. case DIOCNATLOOK:
  712. case DIOCSETDEBUG:
  713. case DIOCGETSTATES:
  714. case DIOCGETTIMEOUT:
  715. case DIOCCLRRULECTRS:
  716. case DIOCGETLIMIT:
  717. case DIOCGETRULESETS:
  718. case DIOCGETRULESET:
  719. case DIOCGETQUEUES:
  720. case DIOCGETQUEUE:
  721. case DIOCGETQSTATS:
  722. case DIOCRGETTABLES:
  723. case DIOCRGETTSTATS:
  724. case DIOCRCLRTSTATS:
  725. case DIOCRCLRADDRS:
  726. case DIOCRADDADDRS:
  727. case DIOCRDELADDRS:
  728. case DIOCRSETADDRS:
  729. case DIOCRGETASTATS:
  730. case DIOCRCLRASTATS:
  731. case DIOCRTSTADDRS:
  732. case DIOCOSFPGET:
  733. case DIOCGETSRCNODES:
  734. case DIOCCLRSRCNODES:
  735. case DIOCIGETIFACES:
  736. case DIOCSETIFFLAG:
  737. case DIOCCLRIFFLAG:
  738. break;
  739. case DIOCRCLRTABLES:
  740. case DIOCRADDTABLES:
  741. case DIOCRDELTABLES:
  742. case DIOCRSETTFLAGS:
  743. if (((struct pfioc_table *)addr)->pfrio_flags &
  744. PFR_FLAG_DUMMY)
  745. break; /* dummy operation ok */
  746. return (EPERM);
  747. default:
  748. return (EPERM);
  749. }
  750. if (!(flags & FWRITE))
  751. switch (cmd) {
  752. case DIOCGETRULES:
  753. case DIOCGETSTATE:
  754. case DIOCGETSTATUS:
  755. case DIOCGETSTATES:
  756. case DIOCGETTIMEOUT:
  757. case DIOCGETLIMIT:
  758. case DIOCGETRULESETS:
  759. case DIOCGETRULESET:
  760. case DIOCGETQUEUES:
  761. case DIOCGETQUEUE:
  762. case DIOCGETQSTATS:
  763. case DIOCNATLOOK:
  764. case DIOCRGETTABLES:
  765. case DIOCRGETTSTATS:
  766. case DIOCRGETADDRS:
  767. case DIOCRGETASTATS:
  768. case DIOCRTSTADDRS:
  769. case DIOCOSFPGET:
  770. case DIOCGETSRCNODES:
  771. case DIOCIGETIFACES:
  772. break;
  773. case DIOCRCLRTABLES:
  774. case DIOCRADDTABLES:
  775. case DIOCRDELTABLES:
  776. case DIOCRCLRTSTATS:
  777. case DIOCRCLRADDRS:
  778. case DIOCRADDADDRS:
  779. case DIOCRDELADDRS:
  780. case DIOCRSETADDRS:
  781. case DIOCRSETTFLAGS:
  782. if (((struct pfioc_table *)addr)->pfrio_flags &
  783. PFR_FLAG_DUMMY) {
  784. flags |= FWRITE; /* need write lock for dummy */
  785. break; /* dummy operation ok */
  786. }
  787. return (EACCES);
  788. case DIOCGETRULE:
  789. if (((struct pfioc_rule *)addr)->action ==
  790. PF_GET_CLR_CNTR)
  791. return (EACCES);
  792. break;
  793. default:
  794. return (EACCES);
  795. }
  796. if (flags & FWRITE)
  797. rw_enter_write(&pf_consistency_lock);
  798. else
  799. rw_enter_read(&pf_consistency_lock);
  800. s = splsoftnet();
  801. switch (cmd) {
  802. case DIOCSTART:
  803. if (pf_status.running)
  804. error = EEXIST;
  805. else {
  806. pf_status.running = 1;
  807. pf_status.since = time_second;
  808. if (pf_status.stateid == 0) {
  809. pf_status.stateid = time_second;
  810. pf_status.stateid = pf_status.stateid << 32;
  811. }
  812. pf_create_queues();
  813. DPFPRINTF(LOG_NOTICE, "pf: started");
  814. }
  815. break;
  816. case DIOCSTOP:
  817. if (!pf_status.running)
  818. error = ENOENT;
  819. else {
  820. pf_status.running = 0;
  821. pf_status.since = time_second;
  822. pf_remove_queues(NULL);
  823. DPFPRINTF(LOG_NOTICE, "pf: stopped");
  824. }
  825. break;
  826. case DIOCGETQUEUES: {
  827. struct pfioc_queue *pq = (struct pfioc_queue *)addr;
  828. struct pf_queuespec *qs;
  829. u_int32_t nr = 0;
  830. pq->ticket = pf_main_ruleset.rules.active.ticket;
  831. /* save state to not run over them all each time? */
  832. qs = TAILQ_FIRST(pf_queues_active);
  833. while (qs != NULL) {
  834. qs = TAILQ_NEXT(qs, entries);
  835. nr++;
  836. }
  837. pq->nr = nr;
  838. break;
  839. }
  840. case DIOCGETQUEUE: {
  841. struct pfioc_queue *pq = (struct pfioc_queue *)addr;
  842. struct pf_queuespec *qs;
  843. u_int32_t nr = 0;
  844. if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
  845. error = EBUSY;
  846. break;
  847. }
  848. /* save state to not run over them all each time? */
  849. qs = TAILQ_FIRST(pf_queues_active);
  850. while ((qs != NULL) && (nr++ < pq->nr))
  851. qs = TAILQ_NEXT(qs, entries);
  852. if (qs == NULL) {
  853. error = EBUSY;
  854. break;
  855. }
  856. bcopy(qs, &pq->queue, sizeof(pq->queue));
  857. break;
  858. }
  859. case DIOCGETQSTATS: {
  860. struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
  861. struct pf_queuespec *qs;
  862. u_int32_t nr;
  863. int nbytes;
  864. if (pq->ticket != pf_main_ruleset.rules.active.ticket) {
  865. error = EBUSY;
  866. break;
  867. }
  868. nbytes = pq->nbytes;
  869. nr = 0;
  870. /* save state to not run over them all each time? */
  871. qs = TAILQ_FIRST(pf_queues_active);
  872. while ((qs != NULL) && (nr++ < pq->nr))
  873. qs = TAILQ_NEXT(qs, entries);
  874. if (qs == NULL) {
  875. error = EBUSY;
  876. break;
  877. }
  878. bcopy(qs, &pq->queue, sizeof(pq->queue));
  879. error = hfsc_qstats(qs, pq->buf, &nbytes);
  880. if (error == 0)
  881. pq->nbytes = nbytes;
  882. break;
  883. }
  884. case DIOCADDQUEUE: {
  885. struct pfioc_queue *q = (struct pfioc_queue *)addr;
  886. struct pf_queuespec *qs;
  887. if (q->ticket != pf_main_ruleset.rules.inactive.ticket) {
  888. error = EBUSY;
  889. break;
  890. }
  891. qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
  892. if (qs == NULL) {
  893. error = ENOMEM;
  894. break;
  895. }
  896. bcopy(&q->queue, qs, sizeof(*qs));
  897. qs->qid = pf_qname2qid(qs->qname, 1);
  898. if (qs->parent[0] && (qs->parent_qid =
  899. pf_qname2qid(qs->parent, 0)) == 0) {
  900. pool_put(&pf_queue_pl, qs);
  901. error = ESRCH;
  902. break;
  903. }
  904. qs->kif = pfi_kif_get(qs->ifname);
  905. if (qs->kif == NULL) {
  906. pool_put(&pf_queue_pl, qs);
  907. error = ESRCH;
  908. break;
  909. }
  910. /* XXX resolve bw percentage specs */
  911. pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
  912. if (qs->qlimit == 0)
  913. qs->qlimit = HFSC_DEFAULT_QLIMIT;
  914. TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries);
  915. break;
  916. }
  917. case DIOCADDRULE: {
  918. struct pfioc_rule *pr = (struct pfioc_rule *)addr;
  919. struct pf_ruleset *ruleset;
  920. struct pf_rule *rule, *tail;
  921. pr->anchor[sizeof(pr->anchor) - 1] = 0;
  922. ruleset = pf_find_ruleset(pr->anchor);
  923. if (ruleset == NULL) {
  924. error = EINVAL;
  925. break;
  926. }
  927. if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
  928. error = EINVAL;
  929. break;
  930. }
  931. if (pr->ticket != ruleset->rules.inactive.ticket) {
  932. error = EBUSY;
  933. break;
  934. }
  935. rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
  936. if (rule == NULL) {
  937. error = ENOMEM;
  938. break;
  939. }
  940. if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) {
  941. pf_rm_rule(NULL, rule);
  942. rule = NULL;
  943. break;
  944. }
  945. rule->cuid = p->p_ucred->cr_ruid;
  946. rule->cpid = p->p_p->ps_pid;
  947. switch (rule->af) {
  948. case 0:
  949. break;
  950. case AF_INET:
  951. break;
  952. #ifdef INET6
  953. case AF_INET6:
  954. break;
  955. #endif /* INET6 */
  956. default:
  957. pf_rm_rule(NULL, rule);
  958. rule = NULL;
  959. error = EAFNOSUPPORT;
  960. goto fail;
  961. }
  962. tail = TAILQ_LAST(ruleset->rules.inactive.ptr,
  963. pf_rulequeue);
  964. if (tail)
  965. rule->nr = tail->nr + 1;
  966. else
  967. rule->nr = 0;
  968. if (rule->src.addr.type == PF_ADDR_NONE ||
  969. rule->dst.addr.type == PF_ADDR_NONE)
  970. error = EINVAL;
  971. if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
  972. error = EINVAL;
  973. if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
  974. error = EINVAL;
  975. if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
  976. error = EINVAL;
  977. if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
  978. error = EINVAL;
  979. if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
  980. error = EINVAL;
  981. if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
  982. error = EINVAL;
  983. if (rule->rt && !rule->direction)
  984. error = EINVAL;
  985. if (rule->scrub_flags & PFSTATE_SETPRIO &&
  986. (rule->set_prio[0] > IFQ_MAXPRIO ||
  987. rule->set_prio[1] > IFQ_MAXPRIO))
  988. error = EINVAL;
  989. if (error) {
  990. pf_rm_rule(NULL, rule);
  991. break;
  992. }
  993. TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,
  994. rule, entries);
  995. ruleset->rules.inactive.rcount++;
  996. break;
  997. }
  998. case DIOCGETRULES: {
  999. struct pfioc_rule *pr = (struct pfioc_rule *)addr;
  1000. struct pf_ruleset *ruleset;
  1001. struct pf_rule *tail;
  1002. pr->anchor[sizeof(pr->anchor) - 1] = 0;
  1003. ruleset = pf_find_ruleset(pr->anchor);
  1004. if (ruleset == NULL) {
  1005. error = EINVAL;
  1006. break;
  1007. }
  1008. tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue);
  1009. if (tail)
  1010. pr->nr = tail->nr + 1;
  1011. else
  1012. pr->nr = 0;
  1013. pr->ticket = ruleset->rules.active.ticket;
  1014. break;
  1015. }
  1016. case DIOCGETRULE: {
  1017. struct pfioc_rule *pr = (struct pfioc_rule *)addr;
  1018. struct pf_ruleset *ruleset;
  1019. struct pf_rule *rule;
  1020. int i;
  1021. pr->anchor[sizeof(pr->anchor) - 1] = 0;
  1022. ruleset = pf_find_ruleset(pr->anchor);
  1023. if (ruleset == NULL) {
  1024. error = EINVAL;
  1025. break;
  1026. }
  1027. if (pr->ticket != ruleset->rules.active.ticket) {
  1028. error = EBUSY;
  1029. break;
  1030. }
  1031. rule = TAILQ_FIRST(ruleset->rules.active.ptr);
  1032. while ((rule != NULL) && (rule->nr != pr->nr))
  1033. rule = TAILQ_NEXT(rule, entries);
  1034. if (rule == NULL) {
  1035. error = EBUSY;
  1036. break;
  1037. }
  1038. bcopy(rule, &pr->rule, sizeof(struct pf_rule));
  1039. bzero(&pr->rule.entries, sizeof(pr->rule.entries));
  1040. pr->rule.kif = NULL;
  1041. pr->rule.nat.kif = NULL;
  1042. pr->rule.rdr.kif = NULL;
  1043. pr->rule.route.kif = NULL;
  1044. pr->rule.rcv_kif = NULL;
  1045. pr->rule.anchor = NULL;
  1046. pr->rule.overload_tbl = NULL;
  1047. if (pf_anchor_copyout(ruleset, rule, pr)) {
  1048. error = EBUSY;
  1049. break;
  1050. }
  1051. pf_addr_copyout(&pr->rule.src.addr);
  1052. pf_addr_copyout(&pr->rule.dst.addr);
  1053. pf_addr_copyout(&pr->rule.rdr.addr);
  1054. pf_addr_copyout(&pr->rule.nat.addr);
  1055. pf_addr_copyout(&pr->rule.route.addr);
  1056. for (i = 0; i < PF_SKIP_COUNT; ++i)
  1057. if (rule->skip[i].ptr == NULL)
  1058. pr->rule.skip[i].nr = (u_int32_t)-1;
  1059. else
  1060. pr->rule.skip[i].nr =
  1061. rule->skip[i].ptr->nr;
  1062. if (pr->action == PF_GET_CLR_CNTR) {
  1063. rule->evaluations = 0;
  1064. rule->packets[0] = rule->packets[1] = 0;
  1065. rule->bytes[0] = rule->bytes[1] = 0;
  1066. rule->states_tot = 0;
  1067. }
  1068. break;
  1069. }
  1070. case DIOCCHANGERULE: {
  1071. struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
  1072. struct pf_ruleset *ruleset;
  1073. struct pf_rule *oldrule = NULL, *newrule = NULL;
  1074. u_int32_t nr = 0;
  1075. if (pcr->action < PF_CHANGE_ADD_HEAD ||
  1076. pcr->action > PF_CHANGE_GET_TICKET) {
  1077. error = EINVAL;
  1078. break;
  1079. }
  1080. ruleset = pf_find_ruleset(pcr->anchor);
  1081. if (ruleset == NULL) {
  1082. error = EINVAL;
  1083. break;
  1084. }
  1085. if (pcr->action == PF_CHANGE_GET_TICKET) {
  1086. pcr->ticket = ++ruleset->rules.active.ticket;
  1087. break;
  1088. } else {
  1089. if (pcr->ticket !=
  1090. ruleset->rules.active.ticket) {
  1091. error = EINVAL;
  1092. break;
  1093. }
  1094. if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
  1095. error = EINVAL;
  1096. break;
  1097. }
  1098. }
  1099. if (pcr->action != PF_CHANGE_REMOVE) {
  1100. newrule = pool_get(&pf_rule_pl,
  1101. PR_WAITOK|PR_LIMITFAIL|PR_ZERO);
  1102. if (newrule == NULL) {
  1103. error = ENOMEM;
  1104. break;
  1105. }
  1106. pf_rule_copyin(&pcr->rule, newrule, ruleset);
  1107. newrule->cuid = p->p_ucred->cr_ruid;
  1108. newrule->cpid = p->p_p->ps_pid;
  1109. switch (newrule->af) {
  1110. case 0:
  1111. break;
  1112. case AF_INET:
  1113. break;
  1114. #ifdef INET6
  1115. case AF_INET6:
  1116. break;
  1117. #endif /* INET6 */
  1118. default:
  1119. pool_put(&pf_rule_pl, newrule);
  1120. error = EAFNOSUPPORT;
  1121. goto fail;
  1122. }
  1123. if (newrule->rt && !newrule->direction)
  1124. error = EINVAL;
  1125. if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
  1126. error = EINVAL;
  1127. if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
  1128. error = EINVAL;
  1129. if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af))
  1130. error = EINVAL;
  1131. if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af))
  1132. error = EINVAL;
  1133. if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af))
  1134. error = EINVAL;
  1135. if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
  1136. error = EINVAL;
  1137. if (error) {
  1138. pf_rm_rule(NULL, newrule);
  1139. break;
  1140. }
  1141. }
  1142. if (pcr->action == PF_CHANGE_ADD_HEAD)
  1143. oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
  1144. else if (pcr->action == PF_CHANGE_ADD_TAIL)
  1145. oldrule = TAILQ_LAST(ruleset->rules.active.ptr,
  1146. pf_rulequeue);
  1147. else {
  1148. oldrule = TAILQ_FIRST(ruleset->rules.active.ptr);
  1149. while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
  1150. oldrule = TAILQ_NEXT(oldrule, entries);
  1151. if (oldrule == NULL) {
  1152. if (newrule != NULL)
  1153. pf_rm_rule(NULL, newrule);
  1154. error = EINVAL;
  1155. break;
  1156. }
  1157. }
  1158. if (pcr->action == PF_CHANGE_REMOVE) {
  1159. pf_rm_rule(ruleset->rules.active.ptr, oldrule);
  1160. ruleset->rules.active.rcount--;
  1161. } else {
  1162. if (oldrule == NULL)
  1163. TAILQ_INSERT_TAIL(
  1164. ruleset->rules.active.ptr,
  1165. newrule, entries);
  1166. else if (pcr->action == PF_CHANGE_ADD_HEAD ||
  1167. pcr->action == PF_CHANGE_ADD_BEFORE)
  1168. TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
  1169. else
  1170. TAILQ_INSERT_AFTER(
  1171. ruleset->rules.active.ptr,
  1172. oldrule, newrule, entries);
  1173. ruleset->rules.active.rcount++;
  1174. }
  1175. nr = 0;
  1176. TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)
  1177. oldrule->nr = nr++;
  1178. ruleset->rules.active.ticket++;
  1179. pf_calc_skip_steps(ruleset->rules.active.ptr);
  1180. pf_remove_if_empty_ruleset(ruleset);
  1181. break;
  1182. }
  1183. case DIOCCLRSTATES: {
  1184. struct pf_state *s, *nexts;
  1185. struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
  1186. u_int killed = 0;
  1187. for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
  1188. nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
  1189. if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
  1190. s->kif->pfik_name)) {
  1191. #if NPFSYNC > 0
  1192. /* don't send out individual delete messages */
  1193. SET(s->state_flags, PFSTATE_NOSYNC);
  1194. #endif /* NPFSYNC > 0 */
  1195. pf_unlink_state(s);
  1196. killed++;
  1197. }
  1198. }
  1199. psk->psk_killed = killed;
  1200. #if NPFSYNC > 0
  1201. pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
  1202. #endif /* NPFSYNC > 0 */
  1203. break;
  1204. }
  1205. case DIOCKILLSTATES: {
  1206. struct pf_state *s, *nexts;
  1207. struct pf_state_key *sk;
  1208. struct pf_addr *srcaddr, *dstaddr;
  1209. u_int16_t srcport, dstport;
  1210. struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
  1211. u_int killed = 0;
  1212. if (psk->psk_pfcmp.id) {
  1213. if (psk->psk_pfcmp.creatorid == 0)
  1214. psk->psk_pfcmp.creatorid = pf_status.hostid;
  1215. if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
  1216. pf_unlink_state(s);
  1217. psk->psk_killed = 1;
  1218. }
  1219. break;
  1220. }
  1221. for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
  1222. s = nexts) {
  1223. nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
  1224. if (s->direction == PF_OUT) {
  1225. sk = s->key[PF_SK_STACK];
  1226. srcaddr = &sk->addr[1];
  1227. dstaddr = &sk->addr[0];
  1228. srcport = sk->port[1];
  1229. dstport = sk->port[0];
  1230. } else {
  1231. sk = s->key[PF_SK_WIRE];
  1232. srcaddr = &sk->addr[0];
  1233. dstaddr = &sk->addr[1];
  1234. srcport = sk->port[0];
  1235. dstport = sk->port[1];
  1236. }
  1237. if ((!psk->psk_af || sk->af == psk->psk_af)
  1238. && (!psk->psk_proto || psk->psk_proto ==
  1239. sk->proto) && psk->psk_rdomain == sk->rdomain &&
  1240. PF_MATCHA(psk->psk_src.neg,
  1241. &psk->psk_src.addr.v.a.addr,
  1242. &psk->psk_src.addr.v.a.mask,
  1243. srcaddr, sk->af) &&
  1244. PF_MATCHA(psk->psk_dst.neg,
  1245. &psk->psk_dst.addr.v.a.addr,
  1246. &psk->psk_dst.addr.v.a.mask,
  1247. dstaddr, sk->af) &&
  1248. (psk->psk_src.port_op == 0 ||
  1249. pf_match_port(psk->psk_src.port_op,
  1250. psk->psk_src.port[0], psk->psk_src.port[1],
  1251. srcport)) &&
  1252. (psk->psk_dst.port_op == 0 ||
  1253. pf_match_port(psk->psk_dst.port_op,
  1254. psk->psk_dst.port[0], psk->psk_dst.port[1],
  1255. dstport)) &&
  1256. (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
  1257. !strcmp(psk->psk_label, s->rule.ptr->label))) &&
  1258. (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
  1259. s->kif->pfik_name))) {
  1260. pf_unlink_state(s);
  1261. killed++;
  1262. }
  1263. }
  1264. psk->psk_killed = killed;
  1265. break;
  1266. }
  1267. #if NPFSYNC > 0
  1268. case DIOCADDSTATE: {
  1269. struct pfioc_state *ps = (struct pfioc_state *)addr;
  1270. struct pfsync_state *sp = &ps->state;
  1271. if (sp->timeout >= PFTM_MAX) {
  1272. error = EINVAL;
  1273. break;
  1274. }
  1275. error = pfsync_state_import(sp, PFSYNC_SI_IOCTL);
  1276. break;
  1277. }
  1278. #endif /* NPFSYNC > 0 */
  1279. case DIOCGETSTATE: {
  1280. struct pfioc_state *ps = (struct pfioc_state *)addr;
  1281. struct pf_state *s;
  1282. struct pf_state_cmp id_key;
  1283. bzero(&id_key, sizeof(id_key));
  1284. id_key.id = ps->state.id;
  1285. id_key.creatorid = ps->state.creatorid;
  1286. s = pf_find_state_byid(&id_key);
  1287. if (s == NULL) {
  1288. error = ENOENT;
  1289. break;
  1290. }
  1291. pf_state_export(&ps->state, s);
  1292. break;
  1293. }
  1294. case DIOCGETSTATES: {
  1295. struct pfioc_states *ps = (struct pfioc_states *)addr;
  1296. struct pf_state *state;
  1297. struct pfsync_state *p, *pstore;
  1298. u_int32_t nr = 0;
  1299. if (ps->ps_len == 0) {
  1300. nr = pf_status.states;
  1301. ps->ps_len = sizeof(struct pfsync_state) * nr;
  1302. break;
  1303. }
  1304. pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
  1305. p = ps->ps_states;
  1306. state = TAILQ_FIRST(&state_list);
  1307. while (state) {
  1308. if (state->timeout != PFTM_UNLINKED) {
  1309. if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
  1310. break;
  1311. pf_state_export(pstore, state);
  1312. error = copyout(pstore, p, sizeof(*p));
  1313. if (error) {
  1314. free(pstore, M_TEMP, 0);
  1315. goto fail;
  1316. }
  1317. p++;
  1318. nr++;
  1319. }
  1320. state = TAILQ_NEXT(state, entry_list);
  1321. }
  1322. ps->ps_len = sizeof(struct pfsync_state) * nr;
  1323. free(pstore, M_TEMP, 0);
  1324. break;
  1325. }
  1326. case DIOCGETSTATUS: {
  1327. struct pf_status *s = (struct pf_status *)addr;
  1328. bcopy(&pf_status, s, sizeof(struct pf_status));
  1329. pfi_update_status(s->ifname, s);
  1330. break;
  1331. }
  1332. case DIOCSETSTATUSIF: {
  1333. struct pfioc_iface *pi = (struct pfioc_iface *)addr;
  1334. if (pi->pfiio_name[0] == 0) {
  1335. bzero(pf_status.ifname, IFNAMSIZ);
  1336. break;
  1337. }
  1338. strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ);
  1339. pf_trans_set.mask |= PF_TSET_STATUSIF;
  1340. break;
  1341. }
  1342. case DIOCCLRSTATUS: {
  1343. struct pfioc_iface *pi = (struct pfioc_iface *)addr;
  1344. /* if ifname is specified, clear counters there only */
  1345. if (pi->pfiio_name[0]) {
  1346. pfi_update_status(pi->pfiio_name, NULL);
  1347. break;
  1348. }
  1349. bzero(pf_status.counters, sizeof(pf_status.counters));
  1350. bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
  1351. bzero(pf_status.scounters, sizeof(pf_status.scounters));
  1352. pf_status.since = time_second;
  1353. break;
  1354. }
  1355. case DIOCNATLOOK: {
  1356. struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
  1357. struct pf_state_key *sk;
  1358. struct pf_state *state;
  1359. struct pf_state_key_cmp key;
  1360. int m = 0, direction = pnl->direction;
  1361. int sidx, didx;
  1362. /* NATLOOK src and dst are reversed, so reverse sidx/didx */
  1363. sidx = (direction == PF_IN) ? 1 : 0;
  1364. didx = (direction == PF_IN) ? 0 : 1;
  1365. if (!pnl->proto ||
  1366. PF_AZERO(&pnl->saddr, pnl->af) ||
  1367. PF_AZERO(&pnl->daddr, pnl->af) ||
  1368. ((pnl->proto == IPPROTO_TCP ||
  1369. pnl->proto == IPPROTO_UDP) &&
  1370. (!pnl->dport || !pnl->sport)) ||
  1371. pnl->rdomain > RT_TABLEID_MAX)
  1372. error = EINVAL;
  1373. else {
  1374. key.af = pnl->af;
  1375. key.proto = pnl->proto;
  1376. key.rdomain = pnl->rdomain;
  1377. PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
  1378. key.port[sidx] = pnl->sport;
  1379. PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
  1380. key.port[didx] = pnl->dport;
  1381. state = pf_find_state_all(&key, direction, &m);
  1382. if (m > 1)
  1383. error = E2BIG; /* more than one state */
  1384. else if (state != NULL) {
  1385. sk = state->key[sidx];
  1386. PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
  1387. pnl->rsport = sk->port[sidx];
  1388. PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
  1389. pnl->rdport = sk->port[didx];
  1390. pnl->rrdomain = sk->rdomain;
  1391. } else
  1392. error = ENOENT;
  1393. }
  1394. break;
  1395. }
  1396. case DIOCSETTIMEOUT: {
  1397. struct pfioc_tm *pt = (struct pfioc_tm *)addr;
  1398. if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
  1399. pt->seconds < 0) {
  1400. error = EINVAL;
  1401. goto fail;
  1402. }
  1403. if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
  1404. pt->seconds = 1;
  1405. pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
  1406. pt->seconds = pf_default_rule.timeout[pt->timeout];
  1407. break;
  1408. }
  1409. case DIOCGETTIMEOUT: {
  1410. struct pfioc_tm *pt = (struct pfioc_tm *)addr;
  1411. if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
  1412. error = EINVAL;
  1413. goto fail;
  1414. }
  1415. pt->seconds = pf_default_rule.timeout[pt->timeout];
  1416. break;
  1417. }
  1418. case DIOCGETLIMIT: {
  1419. struct pfioc_limit *pl = (struct pfioc_limit *)addr;
  1420. if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
  1421. error = EINVAL;
  1422. goto fail;
  1423. }
  1424. pl->limit = pf_pool_limits[pl->index].limit;
  1425. break;
  1426. }
  1427. case DIOCSETLIMIT: {
  1428. struct pfioc_limit *pl = (struct pfioc_limit *)addr;
  1429. if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
  1430. pf_pool_limits[pl->index].pp == NULL) {
  1431. error = EINVAL;
  1432. goto fail;
  1433. }
  1434. if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
  1435. pl->limit) {
  1436. error = EBUSY;
  1437. goto fail;
  1438. }
  1439. /* Fragments reference mbuf clusters. */
  1440. if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
  1441. error = EINVAL;
  1442. goto fail;
  1443. }
  1444. pf_pool_limits[pl->index].limit_new = pl->limit;
  1445. pl->limit = pf_pool_limits[pl->index].limit;
  1446. break;
  1447. }
  1448. case DIOCSETDEBUG: {
  1449. u_int32_t *level = (u_int32_t *)addr;
  1450. pf_trans_set.debug = *level;
  1451. pf_trans_set.mask |= PF_TSET_DEBUG;
  1452. break;
  1453. }
  1454. case DIOCCLRRULECTRS: {
  1455. /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
  1456. struct pf_ruleset *ruleset = &pf_main_ruleset;
  1457. struct pf_rule *rule;
  1458. TAILQ_FOREACH(rule,
  1459. ruleset->rules.active.ptr, entries) {
  1460. rule->evaluations = 0;
  1461. rule->packets[0] = rule->packets[1] = 0;
  1462. rule->bytes[0] = rule->bytes[1] = 0;
  1463. }
  1464. break;
  1465. }
  1466. case DIOCGETRULESETS: {
  1467. struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
  1468. struct pf_ruleset *ruleset;
  1469. struct pf_anchor *anchor;
  1470. pr->path[sizeof(pr->path) - 1] = 0;
  1471. if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
  1472. error = EINVAL;
  1473. break;
  1474. }
  1475. pr->nr = 0;
  1476. if (ruleset->anchor == NULL) {
  1477. /* XXX kludge for pf_main_ruleset */
  1478. RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
  1479. if (anchor->parent == NULL)
  1480. pr->nr++;
  1481. } else {
  1482. RB_FOREACH(anchor, pf_anchor_node,
  1483. &ruleset->anchor->children)
  1484. pr->nr++;
  1485. }
  1486. break;
  1487. }
  1488. case DIOCGETRULESET: {
  1489. struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
  1490. struct pf_ruleset *ruleset;
  1491. struct pf_anchor *anchor;
  1492. u_int32_t nr = 0;
  1493. pr->path[sizeof(pr->path) - 1] = 0;
  1494. if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
  1495. error = EINVAL;
  1496. break;
  1497. }
  1498. pr->name[0] = 0;
  1499. if (ruleset->anchor == NULL) {
  1500. /* XXX kludge for pf_main_ruleset */
  1501. RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
  1502. if (anchor->parent == NULL && nr++ == pr->nr) {
  1503. strlcpy(pr->name, anchor->name,
  1504. sizeof(pr->name));
  1505. break;
  1506. }
  1507. } else {
  1508. RB_FOREACH(anchor, pf_anchor_node,
  1509. &ruleset->anchor->children)
  1510. if (nr++ == pr->nr) {
  1511. strlcpy(pr->name, anchor->name,
  1512. sizeof(pr->name));
  1513. break;
  1514. }
  1515. }
  1516. if (!pr->name[0])
  1517. error = EBUSY;
  1518. break;
  1519. }
  1520. case DIOCRCLRTABLES: {
  1521. struct pfioc_table *io = (struct pfioc_table *)addr;
  1522. if (io->pfrio_esize != 0) {
  1523. error = ENODEV;
  1524. break;
  1525. }
  1526. error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
  1527. io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1528. break;
  1529. }
  1530. case DIOCRADDTABLES: {
  1531. struct pfioc_table *io = (struct pfioc_table *)addr;
  1532. if (io->pfrio_esize != sizeof(struct pfr_table)) {
  1533. error = ENODEV;
  1534. break;
  1535. }
  1536. error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
  1537. &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1538. break;
  1539. }
  1540. case DIOCRDELTABLES: {
  1541. struct pfioc_table *io = (struct pfioc_table *)addr;
  1542. if (io->pfrio_esize != sizeof(struct pfr_table)) {
  1543. error = ENODEV;
  1544. break;
  1545. }
  1546. error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
  1547. &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1548. break;
  1549. }
  1550. case DIOCRGETTABLES: {
  1551. struct pfioc_table *io = (struct pfioc_table *)addr;
  1552. if (io->pfrio_esize != sizeof(struct pfr_table)) {
  1553. error = ENODEV;
  1554. break;
  1555. }
  1556. error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
  1557. &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1558. break;
  1559. }
  1560. case DIOCRGETTSTATS: {
  1561. struct pfioc_table *io = (struct pfioc_table *)addr;
  1562. if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
  1563. error = ENODEV;
  1564. break;
  1565. }
  1566. error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
  1567. &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1568. break;
  1569. }
  1570. case DIOCRCLRTSTATS: {
  1571. struct pfioc_table *io = (struct pfioc_table *)addr;
  1572. if (io->pfrio_esize != sizeof(struct pfr_table)) {
  1573. error = ENODEV;
  1574. break;
  1575. }
  1576. error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
  1577. &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1578. break;
  1579. }
  1580. case DIOCRSETTFLAGS: {
  1581. struct pfioc_table *io = (struct pfioc_table *)addr;
  1582. if (io->pfrio_esize != sizeof(struct pfr_table)) {
  1583. error = ENODEV;
  1584. break;
  1585. }
  1586. error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
  1587. io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
  1588. &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1589. break;
  1590. }
  1591. case DIOCRCLRADDRS: {
  1592. struct pfioc_table *io = (struct pfioc_table *)addr;
  1593. if (io->pfrio_esize != 0) {
  1594. error = ENODEV;
  1595. break;
  1596. }
  1597. error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
  1598. io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1599. break;
  1600. }
  1601. case DIOCRADDADDRS: {
  1602. struct pfioc_table *io = (struct pfioc_table *)addr;
  1603. if (io->pfrio_esize != sizeof(struct pfr_addr)) {
  1604. error = ENODEV;
  1605. break;
  1606. }
  1607. error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
  1608. io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
  1609. PFR_FLAG_USERIOCTL);
  1610. break;
  1611. }
  1612. case DIOCRDELADDRS: {
  1613. struct pfioc_table *io = (struct pfioc_table *)addr;
  1614. if (io->pfrio_esize != sizeof(struct pfr_addr)) {
  1615. error = ENODEV;
  1616. break;
  1617. }
  1618. error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
  1619. io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
  1620. PFR_FLAG_USERIOCTL);
  1621. break;
  1622. }
  1623. case DIOCRSETADDRS: {
  1624. struct pfioc_table *io = (struct pfioc_table *)addr;
  1625. if (io->pfrio_esize != sizeof(struct pfr_addr)) {
  1626. error = ENODEV;
  1627. break;
  1628. }
  1629. error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
  1630. io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
  1631. &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
  1632. PFR_FLAG_USERIOCTL, 0);
  1633. break;
  1634. }
  1635. case DIOCRGETADDRS: {
  1636. struct pfioc_table *io = (struct pfioc_table *)addr;
  1637. if (io->pfrio_esize != sizeof(struct pfr_addr)) {
  1638. error = ENODEV;
  1639. break;
  1640. }
  1641. error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
  1642. &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1643. break;
  1644. }
  1645. case DIOCRGETASTATS: {
  1646. struct pfioc_table *io = (struct pfioc_table *)addr;
  1647. if (io->pfrio_esize != sizeof(struct pfr_astats)) {
  1648. error = ENODEV;
  1649. break;
  1650. }
  1651. error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
  1652. &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1653. break;
  1654. }
  1655. case DIOCRCLRASTATS: {
  1656. struct pfioc_table *io = (struct pfioc_table *)addr;
  1657. if (io->pfrio_esize != sizeof(struct pfr_addr)) {
  1658. error = ENODEV;
  1659. break;
  1660. }
  1661. error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
  1662. io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
  1663. PFR_FLAG_USERIOCTL);
  1664. break;
  1665. }
  1666. case DIOCRTSTADDRS: {
  1667. struct pfioc_table *io = (struct pfioc_table *)addr;
  1668. if (io->pfrio_esize != sizeof(struct pfr_addr)) {
  1669. error = ENODEV;
  1670. break;
  1671. }
  1672. error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
  1673. io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
  1674. PFR_FLAG_USERIOCTL);
  1675. break;
  1676. }
  1677. case DIOCRINADEFINE: {
  1678. struct pfioc_table *io = (struct pfioc_table *)addr;
  1679. if (io->pfrio_esize != sizeof(struct pfr_addr)) {
  1680. error = ENODEV;
  1681. break;
  1682. }
  1683. error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
  1684. io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
  1685. io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
  1686. break;
  1687. }
  1688. case DIOCOSFPADD: {
  1689. struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
  1690. error = pf_osfp_add(io);
  1691. break;
  1692. }
  1693. case DIOCOSFPGET: {
  1694. struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
  1695. error = pf_osfp_get(io);
  1696. break;
  1697. }
  1698. case DIOCXBEGIN: {
  1699. struct pfioc_trans *io = (struct pfioc_trans *)addr;
  1700. struct pfioc_trans_e *ioe;
  1701. struct pfr_table *table;
  1702. int i;
  1703. if (io->esize != sizeof(*ioe)) {
  1704. error = ENODEV;
  1705. goto fail;
  1706. }
  1707. ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
  1708. table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
  1709. pf_default_rule_new = pf_default_rule;
  1710. bzero(&pf_trans_set, sizeof(pf_trans_set));
  1711. for (i = 0; i < io->size; i++) {
  1712. if (copyin(io->array+i, ioe, sizeof(*ioe))) {
  1713. free(table, M_TEMP, 0);
  1714. free(ioe, M_TEMP, 0);
  1715. error = EFAULT;
  1716. goto fail;
  1717. }
  1718. switch (ioe->type) {
  1719. case PF_TRANS_TABLE:
  1720. bzero(table, sizeof(*table));
  1721. strlcpy(table->pfrt_anchor, ioe->anchor,
  1722. sizeof(table->pfrt_anchor));
  1723. if ((error = pfr_ina_begin(table,
  1724. &ioe->ticket, NULL, 0))) {
  1725. free(table, M_TEMP, 0);
  1726. free(ioe, M_TEMP, 0);
  1727. goto fail;
  1728. }
  1729. break;
  1730. default:
  1731. if ((error = pf_begin_rules(&ioe->ticket,
  1732. ioe->anchor))) {
  1733. free(table, M_TEMP, 0);
  1734. free(ioe, M_TEMP, 0);
  1735. goto fail;
  1736. }
  1737. break;
  1738. }
  1739. if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
  1740. free(table, M_TEMP, 0);
  1741. free(ioe, M_TEMP, 0);
  1742. error = EFAULT;
  1743. goto fail;
  1744. }
  1745. }
  1746. free(table, M_TEMP, 0);
  1747. free(ioe, M_TEMP, 0);
  1748. break;
  1749. }
  1750. case DIOCXROLLBACK: {
  1751. struct pfioc_trans *io = (struct pfioc_trans *)addr;
  1752. struct pfioc_trans_e *ioe;
  1753. struct pfr_table *table;
  1754. int i;
  1755. if (io->esize != sizeof(*ioe)) {
  1756. error = ENODEV;
  1757. goto fail;
  1758. }
  1759. ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
  1760. table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
  1761. for (i = 0; i < io->size; i++) {
  1762. if (copyin(io->array+i, ioe, sizeof(*ioe))) {
  1763. free(table, M_TEMP, 0);
  1764. free(ioe, M_TEMP, 0);
  1765. error = EFAULT;
  1766. goto fail;
  1767. }
  1768. switch (ioe->type) {
  1769. case PF_TRANS_TABLE:
  1770. bzero(table, sizeof(*table));
  1771. strlcpy(table->pfrt_anchor, ioe->anchor,
  1772. sizeof(table->pfrt_anchor));
  1773. if ((error = pfr_ina_rollback(table,
  1774. ioe->ticket, NULL, 0))) {
  1775. free(table, M_TEMP, 0);
  1776. free(ioe, M_TEMP, 0);
  1777. goto fail; /* really bad */
  1778. }
  1779. break;
  1780. default:
  1781. if ((error = pf_rollback_rules(ioe->ticket,
  1782. ioe->anchor))) {
  1783. free(table, M_TEMP, 0);
  1784. free(ioe, M_TEMP, 0);
  1785. goto fail; /* really bad */
  1786. }
  1787. break;
  1788. }
  1789. }
  1790. free(table, M_TEMP, 0);
  1791. free(ioe, M_TEMP, 0);
  1792. break;
  1793. }
  1794. case DIOCXCOMMIT: {
  1795. struct pfioc_trans *io = (struct pfioc_trans *)addr;
  1796. struct pfioc_trans_e *ioe;
  1797. struct pfr_table *table;
  1798. struct pf_ruleset *rs;
  1799. int i;
  1800. if (io->esize != sizeof(*ioe)) {
  1801. error = ENODEV;
  1802. goto fail;
  1803. }
  1804. ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK);
  1805. table = malloc(sizeof(*table), M_TEMP, M_WAITOK);
  1806. /* first makes sure everything will succeed */
  1807. for (i = 0; i < io->size; i++) {
  1808. if (copyin(io->array+i, ioe, sizeof(*ioe))) {
  1809. free(table, M_TEMP, 0);
  1810. free(ioe, M_TEMP, 0);
  1811. error = EFAULT;
  1812. goto fail;
  1813. }
  1814. switch (ioe->type) {
  1815. case PF_TRANS_TABLE:
  1816. rs = pf_find_ruleset(ioe->anchor);
  1817. if (rs == NULL || !rs->topen || ioe->ticket !=
  1818. rs->tticket) {
  1819. free(table, M_TEMP, 0);
  1820. free(ioe, M_TEMP, 0);
  1821. error = EBUSY;
  1822. goto fail;
  1823. }
  1824. break;
  1825. default:
  1826. rs = pf_find_ruleset(ioe->anchor);
  1827. if (rs == NULL ||
  1828. !rs->rules.inactive.open ||
  1829. rs->rules.inactive.ticket !=
  1830. ioe->ticket) {
  1831. free(table, M_TEMP, 0);
  1832. free(ioe, M_TEMP, 0);
  1833. error = EBUSY;
  1834. goto fail;
  1835. }
  1836. break;
  1837. }
  1838. }
  1839. /*
  1840. * Checked already in DIOCSETLIMIT, but check again as the
  1841. * situation might have changed.
  1842. */
  1843. for (i = 0; i < PF_LIMIT_MAX; i++) {
  1844. if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
  1845. pf_pool_limits[i].limit_new) {
  1846. free(table, M_TEMP, 0);
  1847. free(ioe, M_TEMP, 0);
  1848. error = EBUSY;
  1849. goto fail;
  1850. }
  1851. }
  1852. /* now do the commit - no errors should happen here */
  1853. for (i = 0; i < io->size; i++) {
  1854. if (copyin(io->array+i, ioe, sizeof(*ioe))) {
  1855. free(table, M_TEMP, 0);
  1856. free(ioe, M_TEMP, 0);
  1857. error = EFAULT;
  1858. goto fail;
  1859. }
  1860. switch (ioe->type) {
  1861. case PF_TRANS_TABLE:
  1862. bzero(table, sizeof(*table));
  1863. strlcpy(table->pfrt_anchor, ioe->anchor,
  1864. sizeof(table->pfrt_anchor));
  1865. if ((error = pfr_ina_commit(table, ioe->ticket,
  1866. NULL, NULL, 0))) {
  1867. free(table, M_TEMP, 0);
  1868. free(ioe, M_TEMP, 0);
  1869. goto fail; /* really bad */
  1870. }
  1871. break;
  1872. default:
  1873. if ((error = pf_commit_rules(ioe->ticket,
  1874. ioe->anchor))) {
  1875. free(table, M_TEMP, 0);
  1876. free(ioe, M_TEMP, 0);
  1877. goto fail; /* really bad */
  1878. }
  1879. break;
  1880. }
  1881. }
  1882. for (i = 0; i < PF_LIMIT_MAX; i++) {
  1883. if (pf_pool_limits[i].limit_new !=
  1884. pf_pool_limits[i].limit &&
  1885. pool_sethardlimit(pf_pool_limits[i].pp,
  1886. pf_pool_limits[i].limit_new, NULL, 0) != 0) {
  1887. free(table, M_TEMP, 0);
  1888. free(ioe, M_TEMP, 0);
  1889. error = EBUSY;
  1890. goto fail; /* really bad */
  1891. }
  1892. pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
  1893. }
  1894. for (i = 0; i < PFTM_MAX; i++) {
  1895. int old = pf_default_rule.timeout[i];
  1896. pf_default_rule.timeout[i] =
  1897. pf_default_rule_new.timeout[i];
  1898. if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
  1899. pf_default_rule.timeout[i] < old)
  1900. wakeup(pf_purge_thread);
  1901. }
  1902. pfi_xcommit();
  1903. pf_trans_set_commit();
  1904. free(table, M_TEMP, 0);
  1905. free(ioe, M_TEMP, 0);
  1906. break;
  1907. }
  1908. case DIOCGETSRCNODES: {
  1909. struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
  1910. struct pf_src_node *n, *p, *pstore;
  1911. u_int32_t nr = 0;
  1912. int space = psn->psn_len;
  1913. if (space == 0) {
  1914. RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
  1915. nr++;
  1916. psn->psn_len = sizeof(struct pf_src_node) * nr;
  1917. break;
  1918. }
  1919. pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
  1920. p = psn->psn_src_nodes;
  1921. RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
  1922. int secs = time_uptime, diff;
  1923. if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
  1924. break;
  1925. bcopy(n, pstore, sizeof(*pstore));
  1926. bzero(&pstore->entry, sizeof(pstore->entry));
  1927. pstore->rule.ptr = NULL;
  1928. pstore->kif = NULL;
  1929. if (n->rule.ptr != NULL)
  1930. pstore->rule.nr = n->rule.ptr->nr;
  1931. pstore->creation = secs - pstore->creation;
  1932. if (pstore->expire > secs)
  1933. pstore->expire -= secs;
  1934. else
  1935. pstore->expire = 0;
  1936. /* adjust the connection rate estimate */
  1937. diff = secs - n->conn_rate.last;
  1938. if (diff >= n->conn_rate.seconds)
  1939. pstore->conn_rate.count = 0;
  1940. else
  1941. pstore->conn_rate.count -=
  1942. n->conn_rate.count * diff /
  1943. n->conn_rate.seconds;
  1944. error = copyout(pstore, p, sizeof(*p));
  1945. if (error) {
  1946. free(pstore, M_TEMP, 0);
  1947. goto fail;
  1948. }
  1949. p++;
  1950. nr++;
  1951. }
  1952. psn->psn_len = sizeof(struct pf_src_node) * nr;
  1953. free(pstore, M_TEMP, 0);
  1954. break;
  1955. }
  1956. case DIOCCLRSRCNODES: {
  1957. struct pf_src_node *n;
  1958. struct pf_state *state;
  1959. RB_FOREACH(state, pf_state_tree_id, &tree_id)
  1960. pf_src_tree_remove_state(state);
  1961. RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
  1962. n->expire = 1;
  1963. pf_purge_expired_src_nodes(1);
  1964. break;
  1965. }
  1966. case DIOCKILLSRCNODES: {
  1967. struct pf_src_node *sn;
  1968. struct pf_state *s;
  1969. struct pfioc_src_node_kill *psnk =
  1970. (struct pfioc_src_node_kill *)addr;
  1971. u_int killed = 0;
  1972. RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
  1973. if (PF_MATCHA(psnk->psnk_src.neg,
  1974. &psnk->psnk_src.addr.v.a.addr,
  1975. &psnk->psnk_src.addr.v.a.mask,
  1976. &sn->addr, sn->af) &&
  1977. PF_MATCHA(psnk->psnk_dst.neg,
  1978. &psnk->psnk_dst.addr.v.a.addr,
  1979. &psnk->psnk_dst.addr.v.a.mask,
  1980. &sn->raddr, sn->af)) {
  1981. /* Handle state to src_node linkage */
  1982. if (sn->states != 0)
  1983. RB_FOREACH(s, pf_state_tree_id,
  1984. &tree_id)
  1985. pf_state_rm_src_node(s, sn);
  1986. sn->expire = 1;
  1987. killed++;
  1988. }
  1989. }
  1990. if (killed > 0)
  1991. pf_purge_expired_src_nodes(1);
  1992. psnk->psnk_killed = killed;
  1993. break;
  1994. }
  1995. case DIOCSETHOSTID: {
  1996. u_int32_t *hostid = (u_int32_t *)addr;
  1997. if (*hostid == 0)
  1998. pf_trans_set.hostid = arc4random();
  1999. else
  2000. pf_trans_set.hostid = *hostid;
  2001. pf_trans_set.mask |= PF_TSET_HOSTID;
  2002. break;
  2003. }
  2004. case DIOCOSFPFLUSH:
  2005. pf_osfp_flush();
  2006. break;
  2007. case DIOCIGETIFACES: {
  2008. struct pfioc_iface *io = (struct pfioc_iface *)addr;
  2009. if (io->pfiio_esize != sizeof(struct pfi_kif)) {
  2010. error = ENODEV;
  2011. break;
  2012. }
  2013. error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
  2014. &io->pfiio_size);
  2015. break;
  2016. }
  2017. case DIOCSETIFFLAG: {
  2018. struct pfioc_iface *io = (struct pfioc_iface *)addr;
  2019. error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
  2020. break;
  2021. }
  2022. case DIOCCLRIFFLAG: {
  2023. struct pfioc_iface *io = (struct pfioc_iface *)addr;
  2024. error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
  2025. break;
  2026. }
  2027. case DIOCSETREASS: {
  2028. u_int32_t *reass = (u_int32_t *)addr;
  2029. pf_trans_set.reass = *reass;
  2030. pf_trans_set.mask |= PF_TSET_REASS;
  2031. break;
  2032. }
  2033. default:
  2034. error = ENODEV;
  2035. break;
  2036. }
  2037. fail:
  2038. splx(s);
  2039. if (flags & FWRITE)
  2040. rw_exit_write(&pf_consistency_lock);
  2041. else
  2042. rw_exit_read(&pf_consistency_lock);
  2043. return (error);
  2044. }
  2045. void
  2046. pf_trans_set_commit(void)
  2047. {
  2048. if (pf_trans_set.mask & PF_TSET_STATUSIF)
  2049. strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ);
  2050. if (pf_trans_set.mask & PF_TSET_DEBUG)
  2051. pf_status.debug = pf_trans_set.debug;
  2052. if (pf_trans_set.mask & PF_TSET_HOSTID)
  2053. pf_status.hostid = pf_trans_set.hostid;
  2054. if (pf_trans_set.mask & PF_TSET_REASS)
  2055. pf_status.reass = pf_trans_set.reass;
  2056. }
  2057. void
  2058. pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
  2059. {
  2060. bcopy(from, to, sizeof(*to));
  2061. to->kif = NULL;
  2062. }
  2063. int
  2064. pf_rule_copyin(struct pf_rule *from, struct pf_rule *to,
  2065. struct pf_ruleset *ruleset)
  2066. {
  2067. int i;
  2068. to->src = from->src;
  2069. to->dst = from->dst;
  2070. /* XXX union skip[] */
  2071. strlcpy(to->label, from->label, sizeof(to->label));
  2072. strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
  2073. strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
  2074. strlcpy(to->qname, from->qname, sizeof(to->qname));
  2075. strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
  2076. strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
  2077. strlcpy(to->match_tagname, from->match_tagname,
  2078. sizeof(to->match_tagname));
  2079. strlcpy(to->overload_tblname, from->overload_tblname,
  2080. sizeof(to->overload_tblname));
  2081. pf_pool_copyin(&from->nat, &to->nat);
  2082. pf_pool_copyin(&from->rdr, &to->rdr);
  2083. pf_pool_copyin(&from->route, &to->route);
  2084. if (pf_kif_setup(to->ifname, &to->kif))
  2085. return (EINVAL);
  2086. if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif))
  2087. return (EINVAL);
  2088. if (to->overload_tblname[0]) {
  2089. if ((to->overload_tbl = pfr_attach_table(ruleset,
  2090. to->overload_tblname, 0)) == NULL)
  2091. return (EINVAL);
  2092. else
  2093. to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE;
  2094. }
  2095. if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif))
  2096. return (EINVAL);
  2097. if (pf_kif_setup(to->nat.ifname, &to->nat.kif))
  2098. return (EINVAL);
  2099. if (pf_kif_setup(to->route.ifname, &to->route.kif))
  2100. return (EINVAL);
  2101. to->os_fingerprint = from->os_fingerprint;
  2102. to->rtableid = from->rtableid;
  2103. if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
  2104. return (EBUSY);
  2105. to->onrdomain = from->onrdomain;
  2106. if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain))
  2107. return (EBUSY);
  2108. if (to->onrdomain >= 0) /* make sure it is a real rdomain */
  2109. to->onrdomain = rtable_l2(to->onrdomain);
  2110. for (i = 0; i < PFTM_MAX; i++)
  2111. to->timeout[i] = from->timeout[i];
  2112. to->states_tot = from->states_tot;
  2113. to->max_states = from->max_states;
  2114. to->max_src_nodes = from->max_src_nodes;
  2115. to->max_src_states = from->max_src_states;
  2116. to->max_src_conn = from->max_src_conn;
  2117. to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
  2118. to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
  2119. if (to->qname[0] != 0) {
  2120. if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
  2121. return (EBUSY);
  2122. if (to->pqname[0] != 0) {
  2123. if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
  2124. return (EBUSY);
  2125. } else
  2126. to->pqid = to->qid;
  2127. }
  2128. to->rt_listid = from->rt_listid;
  2129. to->prob = from->prob;
  2130. to->return_icmp = from->return_icmp;
  2131. to->return_icmp6 = from->return_icmp6;
  2132. to->max_mss = from->max_mss;
  2133. if (to->tagname[0])
  2134. if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
  2135. return (EBUSY);
  2136. if (to->match_tagname[0])
  2137. if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
  2138. return (EBUSY);
  2139. to->scrub_flags = from->scrub_flags;
  2140. to->uid = from->uid;
  2141. to->gid = from->gid;
  2142. to->rule_flag = from->rule_flag;
  2143. to->action = from->action;
  2144. to->direction = from->direction;
  2145. to->log = from->log;
  2146. to->logif = from->logif;
  2147. #if NPFLOG > 0
  2148. if (!to->log)
  2149. to->logif = 0;
  2150. #endif /* NPFLOG > 0 */
  2151. to->quick = from->quick;
  2152. to->ifnot = from->ifnot;
  2153. to->rcvifnot = from->rcvifnot;
  2154. to->match_tag_not = from->match_tag_not;
  2155. to->keep_state = from->keep_state;
  2156. to->af = from->af;
  2157. to->naf = from->naf;
  2158. to->proto = from->proto;
  2159. to->type = from->type;
  2160. to->code = from->code;
  2161. to->flags = from->flags;
  2162. to->flagset = from->flagset;
  2163. to->min_ttl = from->min_ttl;
  2164. to->allow_opts = from->allow_opts;
  2165. to->rt = from->rt;
  2166. to->return_ttl = from->return_ttl;
  2167. to->tos = from->tos;
  2168. to->set_tos = from->set_tos;
  2169. to->anchor_relative = from->anchor_relative; /* XXX */
  2170. to->anchor_wildcard = from->anchor_wildcard; /* XXX */
  2171. to->flush = from->flush;
  2172. to->divert.addr = from->divert.addr;
  2173. to->divert.port = from->divert.port;
  2174. to->divert_packet.addr = from->divert_packet.addr;
  2175. to->divert_packet.port = from->divert_packet.port;
  2176. to->prio = from->prio;
  2177. to->set_prio[0] = from->set_prio[0];
  2178. to->set_prio[1] = from->set_prio[1];
  2179. return (0);
  2180. }