frag6.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. /*-
  2. * SPDX-License-Identifier: BSD-3-Clause
  3. *
  4. * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
  5. * All rights reserved.
  6. * Copyright (c) 2019 Netflix, Inc.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the name of the project nor the names of its contributors
  17. * may be used to endorse or promote products derived from this software
  18. * without specific prior written permission.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
  21. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23. * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
  24. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  25. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  26. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  27. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  28. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  29. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  30. * SUCH DAMAGE.
  31. *
  32. * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
  33. */
  34. #include <sys/cdefs.h>
  35. __FBSDID("$FreeBSD$");
  36. #include "opt_rss.h"
  37. #include <sys/param.h>
  38. #include <sys/systm.h>
  39. #include <sys/domain.h>
  40. #include <sys/eventhandler.h>
  41. #include <sys/hash.h>
  42. #include <sys/kernel.h>
  43. #include <sys/malloc.h>
  44. #include <sys/mbuf.h>
  45. #include <sys/protosw.h>
  46. #include <sys/queue.h>
  47. #include <sys/socket.h>
  48. #include <sys/sysctl.h>
  49. #include <sys/syslog.h>
  50. #include <net/if.h>
  51. #include <net/if_var.h>
  52. #include <net/netisr.h>
  53. #include <net/route.h>
  54. #include <net/vnet.h>
  55. #include <netinet/in.h>
  56. #include <netinet/in_var.h>
  57. #include <netinet/ip6.h>
  58. #include <netinet6/ip6_var.h>
  59. #include <netinet/icmp6.h>
  60. #include <netinet/in_systm.h> /* For ECN definitions. */
  61. #include <netinet/ip.h> /* For ECN definitions. */
  62. #ifdef MAC
  63. #include <security/mac/mac_framework.h>
  64. #endif
  65. /*
  66. * A "big picture" of how IPv6 fragment queues are all linked together.
  67. *
  68. * struct ip6qbucket ip6qb[...]; hashed buckets
  69. * ||||||||
  70. * |
  71. * +--- TAILQ(struct ip6q, packets) *q6; tailq entries holding
  72. * |||||||| fragmented packets
  73. * | (1 per original packet)
  74. * |
  75. * +--- TAILQ(struct ip6asfrag, ip6q_frags) *af6; tailq entries of IPv6
  76. * | *ip6af;fragment packets
  77. * | for one original packet
  78. * + *mbuf
  79. */
  80. /* Reassembly headers are stored in hash buckets. */
  81. #define IP6REASS_NHASH_LOG2 10
  82. #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
  83. #define IP6REASS_HMASK (IP6REASS_NHASH - 1)
  84. TAILQ_HEAD(ip6qhead, ip6q);
  85. struct ip6qbucket {
  86. struct ip6qhead packets;
  87. struct mtx lock;
  88. int count;
  89. };
  90. struct ip6asfrag {
  91. TAILQ_ENTRY(ip6asfrag) ip6af_tq;
  92. struct mbuf *ip6af_m;
  93. int ip6af_offset; /* Offset in ip6af_m to next header. */
  94. int ip6af_frglen; /* Fragmentable part length. */
  95. int ip6af_off; /* Fragment offset. */
  96. bool ip6af_mff; /* More fragment bit in frag off. */
  97. };
  98. static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header");
  99. #ifdef VIMAGE
  100. /* A flag to indicate if IPv6 fragmentation is initialized. */
  101. VNET_DEFINE_STATIC(bool, frag6_on);
  102. #define V_frag6_on VNET(frag6_on)
  103. #endif
  104. /* System wide (global) maximum and count of packets in reassembly queues. */
  105. static int ip6_maxfrags;
  106. static volatile u_int frag6_nfrags = 0;
  107. /* Maximum and current packets in per-VNET reassembly queue. */
  108. VNET_DEFINE_STATIC(int, ip6_maxfragpackets);
  109. VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets);
  110. #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets)
  111. #define V_frag6_nfragpackets VNET(frag6_nfragpackets)
  112. /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */
  113. VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize);
  114. VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket);
  115. #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize)
  116. #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket)
  117. /* Per-VNET reassembly queue buckets. */
  118. VNET_DEFINE_STATIC(struct ip6qbucket, ip6qb[IP6REASS_NHASH]);
  119. VNET_DEFINE_STATIC(uint32_t, ip6qb_hashseed);
  120. #define V_ip6qb VNET(ip6qb)
  121. #define V_ip6qb_hashseed VNET(ip6qb_hashseed)
  122. #define IP6QB_LOCK(_b) mtx_lock(&V_ip6qb[(_b)].lock)
  123. #define IP6QB_TRYLOCK(_b) mtx_trylock(&V_ip6qb[(_b)].lock)
  124. #define IP6QB_LOCK_ASSERT(_b) mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED)
  125. #define IP6QB_UNLOCK(_b) mtx_unlock(&V_ip6qb[(_b)].lock)
  126. #define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].packets)
  127. /*
  128. * By default, limit the number of IP6 fragments across all reassembly
  129. * queues to 1/32 of the total number of mbuf clusters.
  130. *
  131. * Limit the total number of reassembly queues per VNET to the
  132. * IP6 fragment limit, but ensure the limit will not allow any bucket
  133. * to grow above 100 items. (The bucket limit is
  134. * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
  135. * multiplier to reach a 100-item limit.)
  136. * The 100-item limit was chosen as brief testing seems to show that
  137. * this produces "reasonable" performance on some subset of systems
  138. * under DoS attack.
  139. */
  140. #define IP6_MAXFRAGS (nmbclusters / 32)
  141. #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
  142. /*
  143. * Sysctls and helper function.
  144. */
  145. SYSCTL_DECL(_net_inet6_ip6);
  146. SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfrags,
  147. CTLFLAG_RD, __DEVOLATILE(u_int *, &frag6_nfrags), 0,
  148. "Global number of IPv6 fragments across all reassembly queues.");
  149. static void
  150. frag6_set_bucketsize(void)
  151. {
  152. int i;
  153. if ((i = V_ip6_maxfragpackets) > 0)
  154. V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
  155. }
  156. SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags,
  157. CTLFLAG_RW, &ip6_maxfrags, 0,
  158. "Maximum allowed number of outstanding IPv6 packet fragments. "
  159. "A value of 0 means no fragmented packets will be accepted, while a "
  160. "a value of -1 means no limit");
  161. static int
  162. sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)
  163. {
  164. int error, val;
  165. val = V_ip6_maxfragpackets;
  166. error = sysctl_handle_int(oidp, &val, 0, req);
  167. if (error != 0 || !req->newptr)
  168. return (error);
  169. V_ip6_maxfragpackets = val;
  170. frag6_set_bucketsize();
  171. return (0);
  172. }
  173. SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
  174. CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
  175. NULL, 0, sysctl_ip6_maxfragpackets, "I",
  176. "Default maximum number of outstanding fragmented IPv6 packets. "
  177. "A value of 0 means no fragmented packets will be accepted, while a "
  178. "a value of -1 means no limit");
  179. SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfragpackets,
  180. CTLFLAG_VNET | CTLFLAG_RD,
  181. __DEVOLATILE(u_int *, &VNET_NAME(frag6_nfragpackets)), 0,
  182. "Per-VNET number of IPv6 fragments across all reassembly queues.");
  183. SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket,
  184. CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0,
  185. "Maximum allowed number of fragments per packet");
  186. SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize,
  187. CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0,
  188. "Maximum number of reassembly queues per hash bucket");
  189. /*
  190. * Remove the IPv6 fragmentation header from the mbuf.
  191. */
  192. int
  193. ip6_deletefraghdr(struct mbuf *m, int offset, int wait __unused)
  194. {
  195. struct ip6_hdr *ip6;
  196. KASSERT(m->m_len >= offset + sizeof(struct ip6_frag),
  197. ("%s: ext headers not contigous in mbuf %p m_len %d >= "
  198. "offset %d + %zu\n", __func__, m, m->m_len, offset,
  199. sizeof(struct ip6_frag)));
  200. /* Delete frag6 header. */
  201. ip6 = mtod(m, struct ip6_hdr *);
  202. bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), offset);
  203. m->m_data += sizeof(struct ip6_frag);
  204. m->m_len -= sizeof(struct ip6_frag);
  205. m->m_flags |= M_FRAGMENTED;
  206. return (0);
  207. }
  208. /*
  209. * Free a fragment reassembly header and all associated datagrams.
  210. */
  211. static void
  212. frag6_freef(struct ip6q *q6, uint32_t bucket)
  213. {
  214. struct ip6_hdr *ip6;
  215. struct ip6asfrag *af6;
  216. struct mbuf *m;
  217. IP6QB_LOCK_ASSERT(bucket);
  218. while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
  219. m = af6->ip6af_m;
  220. TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
  221. /*
  222. * Return ICMP time exceeded error for the 1st fragment.
  223. * Just free other fragments.
  224. */
  225. if (af6->ip6af_off == 0 && m->m_pkthdr.rcvif != NULL) {
  226. /* Adjust pointer. */
  227. ip6 = mtod(m, struct ip6_hdr *);
  228. /* Restore source and destination addresses. */
  229. ip6->ip6_src = q6->ip6q_src;
  230. ip6->ip6_dst = q6->ip6q_dst;
  231. icmp6_error(m, ICMP6_TIME_EXCEEDED,
  232. ICMP6_TIME_EXCEED_REASSEMBLY, 0);
  233. } else
  234. m_freem(m);
  235. free(af6, M_FRAG6);
  236. }
  237. TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq);
  238. V_ip6qb[bucket].count--;
  239. atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
  240. #ifdef MAC
  241. mac_ip6q_destroy(q6);
  242. #endif
  243. free(q6, M_FRAG6);
  244. atomic_subtract_int(&V_frag6_nfragpackets, 1);
  245. }
  246. /*
  247. * Drain off all datagram fragments belonging to
  248. * the given network interface.
  249. */
  250. static void
  251. frag6_cleanup(void *arg __unused, struct ifnet *ifp)
  252. {
  253. struct ip6qhead *head;
  254. struct ip6q *q6;
  255. struct ip6asfrag *af6;
  256. uint32_t bucket;
  257. KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
  258. CURVNET_SET_QUIET(ifp->if_vnet);
  259. #ifdef VIMAGE
  260. /*
  261. * Skip processing if IPv6 reassembly is not initialised or
  262. * torn down by frag6_destroy().
  263. */
  264. if (!V_frag6_on) {
  265. CURVNET_RESTORE();
  266. return;
  267. }
  268. #endif
  269. for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
  270. IP6QB_LOCK(bucket);
  271. head = IP6QB_HEAD(bucket);
  272. /* Scan fragment list. */
  273. TAILQ_FOREACH(q6, head, ip6q_tq) {
  274. TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
  275. /* Clear no longer valid rcvif pointer. */
  276. if (af6->ip6af_m->m_pkthdr.rcvif == ifp)
  277. af6->ip6af_m->m_pkthdr.rcvif = NULL;
  278. }
  279. }
  280. IP6QB_UNLOCK(bucket);
  281. }
  282. CURVNET_RESTORE();
  283. }
  284. EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0);
  285. /*
  286. * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with
  287. * each other, in terms of next header field handling in fragment header.
  288. * While the sender will use the same value for all of the fragmented packets,
  289. * receiver is suggested not to check for consistency.
  290. *
  291. * Fragment rules (p18,p19):
  292. * (2) A Fragment header containing:
  293. * The Next Header value that identifies the first header
  294. * after the Per-Fragment headers of the original packet.
  295. * -> next header field is same for all fragments
  296. *
  297. * Reassembly rule (p20):
  298. * The Next Header field of the last header of the Per-Fragment
  299. * headers is obtained from the Next Header field of the first
  300. * fragment's Fragment header.
  301. * -> should grab it from the first fragment only
  302. *
  303. * The following note also contradicts with fragment rule - no one is going to
  304. * send different fragment with different next header field.
  305. *
  306. * Additional note (p22) [not an error]:
  307. * The Next Header values in the Fragment headers of different
  308. * fragments of the same original packet may differ. Only the value
  309. * from the Offset zero fragment packet is used for reassembly.
  310. * -> should grab it from the first fragment only
  311. *
  312. * There is no explicit reason given in the RFC. Historical reason maybe?
  313. */
  314. /*
  315. * Fragment input.
  316. */
  317. int
  318. frag6_input(struct mbuf **mp, int *offp, int proto)
  319. {
  320. struct mbuf *m, *t;
  321. struct ip6_hdr *ip6;
  322. struct ip6_frag *ip6f;
  323. struct ip6qhead *head;
  324. struct ip6q *q6;
  325. struct ip6asfrag *af6, *ip6af, *af6tmp;
  326. struct in6_ifaddr *ia6;
  327. struct ifnet *dstifp, *srcifp;
  328. uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
  329. sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
  330. uint32_t bucket, *hashkeyp;
  331. int fragoff, frgpartlen; /* Must be larger than uint16_t. */
  332. int nxt, offset, plen;
  333. uint8_t ecn, ecn0;
  334. bool only_frag;
  335. #ifdef RSS
  336. struct ip6_direct_ctx *ip6dc;
  337. struct m_tag *mtag;
  338. #endif
  339. m = *mp;
  340. offset = *offp;
  341. M_ASSERTPKTHDR(m);
  342. if (m->m_len < offset + sizeof(struct ip6_frag)) {
  343. m = m_pullup(m, offset + sizeof(struct ip6_frag));
  344. if (m == NULL) {
  345. IP6STAT_INC(ip6s_exthdrtoolong);
  346. *mp = NULL;
  347. return (IPPROTO_DONE);
  348. }
  349. }
  350. ip6 = mtod(m, struct ip6_hdr *);
  351. dstifp = NULL;
  352. /* Find the destination interface of the packet. */
  353. ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
  354. if (ia6 != NULL) {
  355. dstifp = ia6->ia_ifp;
  356. ifa_free(&ia6->ia_ifa);
  357. }
  358. /* Jumbo payload cannot contain a fragment header. */
  359. if (ip6->ip6_plen == 0) {
  360. icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
  361. in6_ifstat_inc(dstifp, ifs6_reass_fail);
  362. *mp = NULL;
  363. return (IPPROTO_DONE);
  364. }
  365. /*
  366. * Check whether fragment packet's fragment length is a
  367. * multiple of 8 octets (unless it is the last one).
  368. * sizeof(struct ip6_frag) == 8
  369. * sizeof(struct ip6_hdr) = 40
  370. */
  371. ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
  372. if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
  373. (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
  374. icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  375. offsetof(struct ip6_hdr, ip6_plen));
  376. in6_ifstat_inc(dstifp, ifs6_reass_fail);
  377. *mp = NULL;
  378. return (IPPROTO_DONE);
  379. }
  380. IP6STAT_INC(ip6s_fragments);
  381. in6_ifstat_inc(dstifp, ifs6_reass_reqd);
  382. /*
  383. * Handle "atomic" fragments (offset and m bit set to 0) upfront,
  384. * unrelated to any reassembly. We need to remove the frag hdr
  385. * which is ugly.
  386. * See RFC 6946 and section 4.5 of RFC 8200.
  387. */
  388. if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
  389. IP6STAT_INC(ip6s_atomicfrags);
  390. nxt = ip6f->ip6f_nxt;
  391. /*
  392. * Set nxt(-hdr field value) to the original value.
  393. * We cannot just set ip6->ip6_nxt as there might be
  394. * an unfragmentable part with extension headers and
  395. * we must update the last one.
  396. */
  397. m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
  398. (caddr_t)&nxt);
  399. ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) -
  400. sizeof(struct ip6_frag));
  401. if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0)
  402. goto dropfrag2;
  403. m->m_pkthdr.len -= sizeof(struct ip6_frag);
  404. in6_ifstat_inc(dstifp, ifs6_reass_ok);
  405. *mp = m;
  406. return (nxt);
  407. }
  408. /* Offset now points to data portion. */
  409. offset += sizeof(struct ip6_frag);
  410. /* Get fragment length and discard 0-byte fragments. */
  411. frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
  412. if (frgpartlen == 0) {
  413. icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  414. offsetof(struct ip6_hdr, ip6_plen));
  415. in6_ifstat_inc(dstifp, ifs6_reass_fail);
  416. IP6STAT_INC(ip6s_fragdropped);
  417. *mp = NULL;
  418. return (IPPROTO_DONE);
  419. }
  420. /*
  421. * Enforce upper bound on number of fragments for the entire system.
  422. * If maxfrag is 0, never accept fragments.
  423. * If maxfrag is -1, accept all fragments without limitation.
  424. */
  425. if (ip6_maxfrags < 0)
  426. ;
  427. else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
  428. goto dropfrag2;
  429. /*
  430. * Validate that a full header chain to the ULP is present in the
  431. * packet containing the first fragment as per RFC RFC7112 and
  432. * RFC 8200 pages 18,19:
  433. * The first fragment packet is composed of:
  434. * (3) Extension headers, if any, and the Upper-Layer header. These
  435. * headers must be in the first fragment. ...
  436. */
  437. fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
  438. /* XXX TODO. thj has D16851 open for this. */
  439. /* Send ICMPv6 4,3 in case of violation. */
  440. /* Store receive network interface pointer for later. */
  441. srcifp = m->m_pkthdr.rcvif;
  442. /* Generate a hash value for fragment bucket selection. */
  443. hashkeyp = hashkey;
  444. memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
  445. hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
  446. memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
  447. hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
  448. *hashkeyp = ip6f->ip6f_ident;
  449. bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed);
  450. bucket &= IP6REASS_HMASK;
  451. IP6QB_LOCK(bucket);
  452. head = IP6QB_HEAD(bucket);
  453. TAILQ_FOREACH(q6, head, ip6q_tq)
  454. if (ip6f->ip6f_ident == q6->ip6q_ident &&
  455. IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
  456. IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
  457. #ifdef MAC
  458. && mac_ip6q_match(m, q6)
  459. #endif
  460. )
  461. break;
  462. only_frag = false;
  463. if (q6 == NULL) {
  464. /* A first fragment to arrive creates a reassembly queue. */
  465. only_frag = true;
  466. /*
  467. * Enforce upper bound on number of fragmented packets
  468. * for which we attempt reassembly;
  469. * If maxfragpackets is 0, never accept fragments.
  470. * If maxfragpackets is -1, accept all fragments without
  471. * limitation.
  472. */
  473. if (V_ip6_maxfragpackets < 0)
  474. ;
  475. else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize ||
  476. atomic_load_int(&V_frag6_nfragpackets) >=
  477. (u_int)V_ip6_maxfragpackets)
  478. goto dropfrag;
  479. /* Allocate IPv6 fragement packet queue entry. */
  480. q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FRAG6,
  481. M_NOWAIT | M_ZERO);
  482. if (q6 == NULL)
  483. goto dropfrag;
  484. #ifdef MAC
  485. if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
  486. free(q6, M_FRAG6);
  487. goto dropfrag;
  488. }
  489. mac_ip6q_create(m, q6);
  490. #endif
  491. atomic_add_int(&V_frag6_nfragpackets, 1);
  492. /* ip6q_nxt will be filled afterwards, from 1st fragment. */
  493. TAILQ_INIT(&q6->ip6q_frags);
  494. q6->ip6q_ident = ip6f->ip6f_ident;
  495. q6->ip6q_ttl = IPV6_FRAGTTL;
  496. q6->ip6q_src = ip6->ip6_src;
  497. q6->ip6q_dst = ip6->ip6_dst;
  498. q6->ip6q_ecn =
  499. (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
  500. q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
  501. /* Add the fragemented packet to the bucket. */
  502. TAILQ_INSERT_HEAD(head, q6, ip6q_tq);
  503. V_ip6qb[bucket].count++;
  504. }
  505. /*
  506. * If it is the 1st fragment, record the length of the
  507. * unfragmentable part and the next header of the fragment header.
  508. * Assume the first 1st fragement to arrive will be correct.
  509. * We do not have any duplicate checks here yet so another packet
  510. * with fragoff == 0 could come and overwrite the ip6q_unfrglen
  511. * and worse, the next header, at any time.
  512. */
  513. if (fragoff == 0 && q6->ip6q_unfrglen == -1) {
  514. q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
  515. sizeof(struct ip6_frag);
  516. q6->ip6q_nxt = ip6f->ip6f_nxt;
  517. /* XXX ECN? */
  518. }
  519. /*
  520. * Check that the reassembled packet would not exceed 65535 bytes
  521. * in size.
  522. * If it would exceed, discard the fragment and return an ICMP error.
  523. */
  524. if (q6->ip6q_unfrglen >= 0) {
  525. /* The 1st fragment has already arrived. */
  526. if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
  527. if (only_frag) {
  528. TAILQ_REMOVE(head, q6, ip6q_tq);
  529. V_ip6qb[bucket].count--;
  530. atomic_subtract_int(&V_frag6_nfragpackets, 1);
  531. #ifdef MAC
  532. mac_ip6q_destroy(q6);
  533. #endif
  534. free(q6, M_FRAG6);
  535. }
  536. IP6QB_UNLOCK(bucket);
  537. icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  538. offset - sizeof(struct ip6_frag) +
  539. offsetof(struct ip6_frag, ip6f_offlg));
  540. *mp = NULL;
  541. return (IPPROTO_DONE);
  542. }
  543. } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
  544. if (only_frag) {
  545. TAILQ_REMOVE(head, q6, ip6q_tq);
  546. V_ip6qb[bucket].count--;
  547. atomic_subtract_int(&V_frag6_nfragpackets, 1);
  548. #ifdef MAC
  549. mac_ip6q_destroy(q6);
  550. #endif
  551. free(q6, M_FRAG6);
  552. }
  553. IP6QB_UNLOCK(bucket);
  554. icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
  555. offset - sizeof(struct ip6_frag) +
  556. offsetof(struct ip6_frag, ip6f_offlg));
  557. *mp = NULL;
  558. return (IPPROTO_DONE);
  559. }
  560. /*
  561. * If it is the first fragment, do the above check for each
  562. * fragment already stored in the reassembly queue.
  563. */
  564. if (fragoff == 0 && !only_frag) {
  565. TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) {
  566. if (q6->ip6q_unfrglen + af6->ip6af_off +
  567. af6->ip6af_frglen > IPV6_MAXPACKET) {
  568. struct ip6_hdr *ip6err;
  569. struct mbuf *merr;
  570. int erroff;
  571. merr = af6->ip6af_m;
  572. erroff = af6->ip6af_offset;
  573. /* Dequeue the fragment. */
  574. TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
  575. q6->ip6q_nfrag--;
  576. atomic_subtract_int(&frag6_nfrags, 1);
  577. free(af6, M_FRAG6);
  578. /* Set a valid receive interface pointer. */
  579. merr->m_pkthdr.rcvif = srcifp;
  580. /* Adjust pointer. */
  581. ip6err = mtod(merr, struct ip6_hdr *);
  582. /*
  583. * Restore source and destination addresses
  584. * in the erroneous IPv6 header.
  585. */
  586. ip6err->ip6_src = q6->ip6q_src;
  587. ip6err->ip6_dst = q6->ip6q_dst;
  588. icmp6_error(merr, ICMP6_PARAM_PROB,
  589. ICMP6_PARAMPROB_HEADER,
  590. erroff - sizeof(struct ip6_frag) +
  591. offsetof(struct ip6_frag, ip6f_offlg));
  592. }
  593. }
  594. }
  595. /* Allocate an IPv6 fragement queue entry for this fragmented part. */
  596. ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FRAG6,
  597. M_NOWAIT | M_ZERO);
  598. if (ip6af == NULL)
  599. goto dropfrag;
  600. ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false;
  601. ip6af->ip6af_off = fragoff;
  602. ip6af->ip6af_frglen = frgpartlen;
  603. ip6af->ip6af_offset = offset;
  604. ip6af->ip6af_m = m;
  605. if (only_frag) {
  606. /*
  607. * Do a manual insert rather than a hard-to-understand cast
  608. * to a different type relying on data structure order to work.
  609. */
  610. TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq);
  611. goto postinsert;
  612. }
  613. /* Do duplicate, condition, and boundry checks. */
  614. /*
  615. * Handle ECN by comparing this segment with the first one;
  616. * if CE is set, do not lose CE.
  617. * Drop if CE and not-ECT are mixed for the same packet.
  618. */
  619. ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
  620. ecn0 = q6->ip6q_ecn;
  621. if (ecn == IPTOS_ECN_CE) {
  622. if (ecn0 == IPTOS_ECN_NOTECT) {
  623. free(ip6af, M_FRAG6);
  624. goto dropfrag;
  625. }
  626. if (ecn0 != IPTOS_ECN_CE)
  627. q6->ip6q_ecn = IPTOS_ECN_CE;
  628. }
  629. if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
  630. free(ip6af, M_FRAG6);
  631. goto dropfrag;
  632. }
  633. /* Find a fragmented part which begins after this one does. */
  634. TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq)
  635. if (af6->ip6af_off > ip6af->ip6af_off)
  636. break;
  637. /*
  638. * If the incoming framgent overlaps some existing fragments in
  639. * the reassembly queue, drop both the new fragment and the
  640. * entire reassembly queue. However, if the new fragment
  641. * is an exact duplicate of an existing fragment, only silently
  642. * drop the existing fragment and leave the fragmentation queue
  643. * unchanged, as allowed by the RFC. (RFC 8200, 4.5)
  644. */
  645. if (af6 != NULL)
  646. af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq);
  647. else
  648. af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
  649. if (af6tmp != NULL) {
  650. if (af6tmp->ip6af_off + af6tmp->ip6af_frglen -
  651. ip6af->ip6af_off > 0) {
  652. if (af6tmp->ip6af_off != ip6af->ip6af_off ||
  653. af6tmp->ip6af_frglen != ip6af->ip6af_frglen)
  654. frag6_freef(q6, bucket);
  655. free(ip6af, M_FRAG6);
  656. goto dropfrag;
  657. }
  658. }
  659. if (af6 != NULL) {
  660. if (ip6af->ip6af_off + ip6af->ip6af_frglen -
  661. af6->ip6af_off > 0) {
  662. if (af6->ip6af_off != ip6af->ip6af_off ||
  663. af6->ip6af_frglen != ip6af->ip6af_frglen)
  664. frag6_freef(q6, bucket);
  665. free(ip6af, M_FRAG6);
  666. goto dropfrag;
  667. }
  668. }
  669. #ifdef MAC
  670. mac_ip6q_update(m, q6);
  671. #endif
  672. /*
  673. * Stick new segment in its place; check for complete reassembly.
  674. * If not complete, check fragment limit. Move to front of packet
  675. * queue, as we are the most recently active fragmented packet.
  676. */
  677. if (af6 != NULL)
  678. TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq);
  679. else
  680. TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq);
  681. postinsert:
  682. atomic_add_int(&frag6_nfrags, 1);
  683. q6->ip6q_nfrag++;
  684. plen = 0;
  685. TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
  686. if (af6->ip6af_off != plen) {
  687. if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
  688. IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
  689. frag6_freef(q6, bucket);
  690. }
  691. IP6QB_UNLOCK(bucket);
  692. *mp = NULL;
  693. return (IPPROTO_DONE);
  694. }
  695. plen += af6->ip6af_frglen;
  696. }
  697. af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
  698. if (af6->ip6af_mff) {
  699. if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
  700. IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
  701. frag6_freef(q6, bucket);
  702. }
  703. IP6QB_UNLOCK(bucket);
  704. *mp = NULL;
  705. return (IPPROTO_DONE);
  706. }
  707. /* Reassembly is complete; concatenate fragments. */
  708. ip6af = TAILQ_FIRST(&q6->ip6q_frags);
  709. t = m = ip6af->ip6af_m;
  710. TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq);
  711. while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
  712. m->m_pkthdr.csum_flags &=
  713. af6->ip6af_m->m_pkthdr.csum_flags;
  714. m->m_pkthdr.csum_data +=
  715. af6->ip6af_m->m_pkthdr.csum_data;
  716. TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
  717. t = m_last(t);
  718. m_adj(af6->ip6af_m, af6->ip6af_offset);
  719. m_demote_pkthdr(af6->ip6af_m);
  720. m_cat(t, af6->ip6af_m);
  721. free(af6, M_FRAG6);
  722. }
  723. while (m->m_pkthdr.csum_data & 0xffff0000)
  724. m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
  725. (m->m_pkthdr.csum_data >> 16);
  726. /* Adjust offset to point where the original next header starts. */
  727. offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
  728. free(ip6af, M_FRAG6);
  729. ip6 = mtod(m, struct ip6_hdr *);
  730. ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr));
  731. if (q6->ip6q_ecn == IPTOS_ECN_CE)
  732. ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
  733. nxt = q6->ip6q_nxt;
  734. TAILQ_REMOVE(head, q6, ip6q_tq);
  735. V_ip6qb[bucket].count--;
  736. atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
  737. ip6_deletefraghdr(m, offset, M_NOWAIT);
  738. /* Set nxt(-hdr field value) to the original value. */
  739. m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
  740. (caddr_t)&nxt);
  741. #ifdef MAC
  742. mac_ip6q_reassemble(q6, m);
  743. mac_ip6q_destroy(q6);
  744. #endif
  745. free(q6, M_FRAG6);
  746. atomic_subtract_int(&V_frag6_nfragpackets, 1);
  747. if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
  748. plen = 0;
  749. for (t = m; t; t = t->m_next)
  750. plen += t->m_len;
  751. m->m_pkthdr.len = plen;
  752. /* Set a valid receive interface pointer. */
  753. m->m_pkthdr.rcvif = srcifp;
  754. }
  755. #ifdef RSS
  756. mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
  757. M_NOWAIT);
  758. if (mtag == NULL)
  759. goto dropfrag;
  760. ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
  761. ip6dc->ip6dc_nxt = nxt;
  762. ip6dc->ip6dc_off = offset;
  763. m_tag_prepend(m, mtag);
  764. #endif
  765. IP6QB_UNLOCK(bucket);
  766. IP6STAT_INC(ip6s_reassembled);
  767. in6_ifstat_inc(dstifp, ifs6_reass_ok);
  768. #ifdef RSS
  769. /* Queue/dispatch for reprocessing. */
  770. netisr_dispatch(NETISR_IPV6_DIRECT, m);
  771. *mp = NULL;
  772. return (IPPROTO_DONE);
  773. #endif
  774. /* Tell launch routine the next header. */
  775. *mp = m;
  776. *offp = offset;
  777. return (nxt);
  778. dropfrag:
  779. IP6QB_UNLOCK(bucket);
  780. dropfrag2:
  781. in6_ifstat_inc(dstifp, ifs6_reass_fail);
  782. IP6STAT_INC(ip6s_fragdropped);
  783. m_freem(m);
  784. *mp = NULL;
  785. return (IPPROTO_DONE);
  786. }
  787. /*
  788. * IPv6 reassembling timer processing;
  789. * if a timer expires on a reassembly queue, discard it.
  790. */
  791. void
  792. frag6_slowtimo(void)
  793. {
  794. VNET_ITERATOR_DECL(vnet_iter);
  795. struct ip6qhead *head;
  796. struct ip6q *q6, *q6tmp;
  797. uint32_t bucket;
  798. VNET_LIST_RLOCK_NOSLEEP();
  799. VNET_FOREACH(vnet_iter) {
  800. CURVNET_SET(vnet_iter);
  801. for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
  802. IP6QB_LOCK(bucket);
  803. head = IP6QB_HEAD(bucket);
  804. TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp)
  805. if (--q6->ip6q_ttl == 0) {
  806. IP6STAT_ADD(ip6s_fragtimeout,
  807. q6->ip6q_nfrag);
  808. /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
  809. frag6_freef(q6, bucket);
  810. }
  811. /*
  812. * If we are over the maximum number of fragments
  813. * (due to the limit being lowered), drain off
  814. * enough to get down to the new limit.
  815. * Note that we drain all reassembly queues if
  816. * maxfragpackets is 0 (fragmentation is disabled),
  817. * and do not enforce a limit when maxfragpackets
  818. * is negative.
  819. */
  820. while ((V_ip6_maxfragpackets == 0 ||
  821. (V_ip6_maxfragpackets > 0 &&
  822. V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) &&
  823. (q6 = TAILQ_LAST(head, ip6qhead)) != NULL) {
  824. IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
  825. /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
  826. frag6_freef(q6, bucket);
  827. }
  828. IP6QB_UNLOCK(bucket);
  829. }
  830. /*
  831. * If we are still over the maximum number of fragmented
  832. * packets, drain off enough to get down to the new limit.
  833. */
  834. bucket = 0;
  835. while (V_ip6_maxfragpackets >= 0 &&
  836. atomic_load_int(&V_frag6_nfragpackets) >
  837. (u_int)V_ip6_maxfragpackets) {
  838. IP6QB_LOCK(bucket);
  839. q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead);
  840. if (q6 != NULL) {
  841. IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
  842. /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
  843. frag6_freef(q6, bucket);
  844. }
  845. IP6QB_UNLOCK(bucket);
  846. bucket = (bucket + 1) % IP6REASS_NHASH;
  847. }
  848. CURVNET_RESTORE();
  849. }
  850. VNET_LIST_RUNLOCK_NOSLEEP();
  851. }
  852. /*
  853. * Eventhandler to adjust limits in case nmbclusters change.
  854. */
  855. static void
  856. frag6_change(void *tag)
  857. {
  858. VNET_ITERATOR_DECL(vnet_iter);
  859. ip6_maxfrags = IP6_MAXFRAGS;
  860. VNET_LIST_RLOCK_NOSLEEP();
  861. VNET_FOREACH(vnet_iter) {
  862. CURVNET_SET(vnet_iter);
  863. V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
  864. frag6_set_bucketsize();
  865. CURVNET_RESTORE();
  866. }
  867. VNET_LIST_RUNLOCK_NOSLEEP();
  868. }
  869. /*
  870. * Initialise reassembly queue and fragment identifier.
  871. */
  872. void
  873. frag6_init(void)
  874. {
  875. uint32_t bucket;
  876. V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
  877. frag6_set_bucketsize();
  878. for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
  879. TAILQ_INIT(IP6QB_HEAD(bucket));
  880. mtx_init(&V_ip6qb[bucket].lock, "ip6qb", NULL, MTX_DEF);
  881. V_ip6qb[bucket].count = 0;
  882. }
  883. V_ip6qb_hashseed = arc4random();
  884. V_ip6_maxfragsperpacket = 64;
  885. #ifdef VIMAGE
  886. V_frag6_on = true;
  887. #endif
  888. if (!IS_DEFAULT_VNET(curvnet))
  889. return;
  890. ip6_maxfrags = IP6_MAXFRAGS;
  891. EVENTHANDLER_REGISTER(nmbclusters_change,
  892. frag6_change, NULL, EVENTHANDLER_PRI_ANY);
  893. }
  894. /*
  895. * Drain off all datagram fragments.
  896. */
  897. static void
  898. frag6_drain_one(void)
  899. {
  900. struct ip6q *q6;
  901. uint32_t bucket;
  902. for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
  903. IP6QB_LOCK(bucket);
  904. while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) {
  905. IP6STAT_INC(ip6s_fragdropped);
  906. /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
  907. frag6_freef(q6, bucket);
  908. }
  909. IP6QB_UNLOCK(bucket);
  910. }
  911. }
  912. void
  913. frag6_drain(void)
  914. {
  915. VNET_ITERATOR_DECL(vnet_iter);
  916. VNET_LIST_RLOCK_NOSLEEP();
  917. VNET_FOREACH(vnet_iter) {
  918. CURVNET_SET(vnet_iter);
  919. frag6_drain_one();
  920. CURVNET_RESTORE();
  921. }
  922. VNET_LIST_RUNLOCK_NOSLEEP();
  923. }
  924. #ifdef VIMAGE
  925. /*
  926. * Clear up IPv6 reassembly structures.
  927. */
  928. void
  929. frag6_destroy(void)
  930. {
  931. uint32_t bucket;
  932. frag6_drain_one();
  933. V_frag6_on = false;
  934. for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
  935. KASSERT(V_ip6qb[bucket].count == 0,
  936. ("%s: V_ip6qb[%d] (%p) count not 0 (%d)", __func__,
  937. bucket, &V_ip6qb[bucket], V_ip6qb[bucket].count));
  938. mtx_destroy(&V_ip6qb[bucket].lock);
  939. }
  940. }
  941. #endif