uipc_mbuf.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. /* $OpenBSD: uipc_mbuf.c,v 1.206 2015/07/15 22:29:32 deraadt Exp $ */
  2. /* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */
  3. /*
  4. * Copyright (c) 1982, 1986, 1988, 1991, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of the University nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  20. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  21. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  22. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  23. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  24. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  25. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  26. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  27. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  28. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  29. * SUCH DAMAGE.
  30. *
  31. * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
  32. */
  33. /*
  34. * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
  35. *
  36. * NRL grants permission for redistribution and use in source and binary
  37. * forms, with or without modification, of the software and documentation
  38. * created at NRL provided that the following conditions are met:
  39. *
  40. * 1. Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * 2. Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in the
  44. * documentation and/or other materials provided with the distribution.
  45. * 3. All advertising materials mentioning features or use of this software
  46. * must display the following acknowledgements:
  47. * This product includes software developed by the University of
  48. * California, Berkeley and its contributors.
  49. * This product includes software developed at the Information
  50. * Technology Division, US Naval Research Laboratory.
  51. * 4. Neither the name of the NRL nor the names of its contributors
  52. * may be used to endorse or promote products derived from this software
  53. * without specific prior written permission.
  54. *
  55. * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
  56. * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  57. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  58. * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR
  59. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  60. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  61. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  62. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  63. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  64. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  65. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  66. *
  67. * The views and conclusions contained in the software and documentation
  68. * are those of the authors and should not be interpreted as representing
  69. * official policies, either expressed or implied, of the US Naval
  70. * Research Laboratory (NRL).
  71. */
  72. #include <sys/param.h>
  73. #include <sys/systm.h>
  74. #include <sys/malloc.h>
  75. #include <sys/mbuf.h>
  76. #include <sys/kernel.h>
  77. #include <sys/syslog.h>
  78. #include <sys/domain.h>
  79. #include <sys/protosw.h>
  80. #include <sys/pool.h>
  81. #include <sys/socket.h>
  82. #include <sys/socketvar.h>
  83. #include <net/if.h>
  84. #include <uvm/uvm_extern.h>
  85. #ifdef DDB
  86. #include <machine/db_machdep.h>
  87. #endif
  88. struct mbstat mbstat; /* mbuf stats */
  89. struct mutex mbstatmtx = MUTEX_INITIALIZER(IPL_NET);
  90. struct pool mbpool; /* mbuf pool */
  91. struct pool mtagpool;
  92. /* mbuf cluster pools */
  93. u_int mclsizes[] = {
  94. MCLBYTES, /* must be at slot 0 */
  95. 4 * 1024,
  96. 8 * 1024,
  97. 9 * 1024,
  98. 12 * 1024,
  99. 16 * 1024,
  100. 64 * 1024
  101. };
  102. static char mclnames[MCLPOOLS][8];
  103. struct pool mclpools[MCLPOOLS];
  104. struct pool *m_clpool(u_int);
  105. int max_linkhdr; /* largest link-level header */
  106. int max_protohdr; /* largest protocol header */
  107. int max_hdr; /* largest link+protocol header */
  108. struct mutex m_extref_mtx = MUTEX_INITIALIZER(IPL_NET);
  109. void m_extfree(struct mbuf *);
  110. struct mbuf *m_copym0(struct mbuf *, int, int, int, int);
  111. void nmbclust_update(void);
  112. void m_zero(struct mbuf *);
  113. const char *mclpool_warnmsg =
  114. "WARNING: mclpools limit reached; increase kern.maxclusters";
  115. /*
  116. * Initialize the mbuf allocator.
  117. */
  118. void
  119. mbinit(void)
  120. {
  121. int i;
  122. #if DIAGNOSTIC
  123. if (mclsizes[0] != MCLBYTES)
  124. panic("mbinit: the smallest cluster size != MCLBYTES");
  125. if (mclsizes[nitems(mclsizes) - 1] != MAXMCLBYTES)
  126. panic("mbinit: the largest cluster size != MAXMCLBYTES");
  127. #endif
  128. pool_init(&mbpool, MSIZE, 0, 0, 0, "mbufpl", NULL);
  129. pool_setipl(&mbpool, IPL_NET);
  130. pool_set_constraints(&mbpool, &kp_dma_contig);
  131. pool_setlowat(&mbpool, mblowat);
  132. pool_init(&mtagpool, PACKET_TAG_MAXSIZE + sizeof(struct m_tag),
  133. 0, 0, 0, "mtagpl", NULL);
  134. pool_setipl(&mtagpool, IPL_NET);
  135. for (i = 0; i < nitems(mclsizes); i++) {
  136. snprintf(mclnames[i], sizeof(mclnames[0]), "mcl%dk",
  137. mclsizes[i] >> 10);
  138. pool_init(&mclpools[i], mclsizes[i], 0, 0, 0,
  139. mclnames[i], NULL);
  140. pool_setipl(&mclpools[i], IPL_NET);
  141. pool_set_constraints(&mclpools[i], &kp_dma_contig);
  142. pool_setlowat(&mclpools[i], mcllowat);
  143. }
  144. nmbclust_update();
  145. }
  146. void
  147. nmbclust_update(void)
  148. {
  149. int i;
  150. /*
  151. * Set the hard limit on the mclpools to the number of
  152. * mbuf clusters the kernel is to support. Log the limit
  153. * reached message max once a minute.
  154. */
  155. for (i = 0; i < nitems(mclsizes); i++) {
  156. (void)pool_sethardlimit(&mclpools[i], nmbclust,
  157. mclpool_warnmsg, 60);
  158. /*
  159. * XXX this needs to be reconsidered.
  160. * Setting the high water mark to nmbclust is too high
  161. * but we need to have enough spare buffers around so that
  162. * allocations in interrupt context don't fail or mclgeti()
  163. * drivers may end up with empty rings.
  164. */
  165. pool_sethiwat(&mclpools[i], nmbclust);
  166. }
  167. pool_sethiwat(&mbpool, nmbclust);
  168. }
  169. /*
  170. * Space allocation routines.
  171. */
  172. struct mbuf *
  173. m_get(int nowait, int type)
  174. {
  175. struct mbuf *m;
  176. m = pool_get(&mbpool, nowait == M_WAIT ? PR_WAITOK : PR_NOWAIT);
  177. if (m == NULL)
  178. return (NULL);
  179. mtx_enter(&mbstatmtx);
  180. mbstat.m_mtypes[type]++;
  181. mtx_leave(&mbstatmtx);
  182. m->m_type = type;
  183. m->m_next = NULL;
  184. m->m_nextpkt = NULL;
  185. m->m_data = m->m_dat;
  186. m->m_flags = 0;
  187. return (m);
  188. }
  189. /*
  190. * ATTN: When changing anything here check m_inithdr() and m_defrag() those
  191. * may need to change as well.
  192. */
  193. struct mbuf *
  194. m_gethdr(int nowait, int type)
  195. {
  196. struct mbuf *m;
  197. m = pool_get(&mbpool, nowait == M_WAIT ? PR_WAITOK : PR_NOWAIT);
  198. if (m == NULL)
  199. return (NULL);
  200. mtx_enter(&mbstatmtx);
  201. mbstat.m_mtypes[type]++;
  202. mtx_leave(&mbstatmtx);
  203. m->m_type = type;
  204. return (m_inithdr(m));
  205. }
  206. struct mbuf *
  207. m_inithdr(struct mbuf *m)
  208. {
  209. /* keep in sync with m_gethdr */
  210. m->m_next = NULL;
  211. m->m_nextpkt = NULL;
  212. m->m_data = m->m_pktdat;
  213. m->m_flags = M_PKTHDR;
  214. memset(&m->m_pkthdr, 0, sizeof(m->m_pkthdr));
  215. m->m_pkthdr.pf.prio = IFQ_DEFPRIO;
  216. return (m);
  217. }
  218. struct mbuf *
  219. m_getclr(int nowait, int type)
  220. {
  221. struct mbuf *m;
  222. MGET(m, nowait, type);
  223. if (m == NULL)
  224. return (NULL);
  225. memset(mtod(m, caddr_t), 0, MLEN);
  226. return (m);
  227. }
  228. struct pool *
  229. m_clpool(u_int pktlen)
  230. {
  231. struct pool *pp;
  232. int pi;
  233. for (pi = 0; pi < nitems(mclpools); pi++) {
  234. pp = &mclpools[pi];
  235. if (pktlen <= pp->pr_size)
  236. return (pp);
  237. }
  238. return (NULL);
  239. }
  240. struct mbuf *
  241. m_clget(struct mbuf *m, int how, u_int pktlen)
  242. {
  243. struct mbuf *m0 = NULL;
  244. struct pool *pp;
  245. caddr_t buf;
  246. pp = m_clpool(pktlen);
  247. #ifdef DIAGNOSTIC
  248. if (pp == NULL)
  249. panic("m_clget: request for %u byte cluster", pktlen);
  250. #endif
  251. if (m == NULL) {
  252. m0 = m_gethdr(how, MT_DATA);
  253. if (m0 == NULL)
  254. return (NULL);
  255. m = m0;
  256. }
  257. buf = pool_get(pp, how == M_WAIT ? PR_WAITOK : PR_NOWAIT);
  258. if (buf == NULL) {
  259. if (m0)
  260. m_freem(m0);
  261. return (NULL);
  262. }
  263. MEXTADD(m, buf, pp->pr_size, M_EXTWR, m_extfree_pool, pp);
  264. return (m);
  265. }
  266. void
  267. m_extfree_pool(caddr_t buf, u_int size, void *pp)
  268. {
  269. pool_put(pp, buf);
  270. }
  271. struct mbuf *
  272. m_free(struct mbuf *m)
  273. {
  274. struct mbuf *n;
  275. if (m == NULL)
  276. return (NULL);
  277. mtx_enter(&mbstatmtx);
  278. mbstat.m_mtypes[m->m_type]--;
  279. mtx_leave(&mbstatmtx);
  280. n = m->m_next;
  281. if (m->m_flags & M_ZEROIZE) {
  282. m_zero(m);
  283. /* propagate M_ZEROIZE to the next mbuf in the chain */
  284. if (n)
  285. n->m_flags |= M_ZEROIZE;
  286. }
  287. if (m->m_flags & M_PKTHDR)
  288. m_tag_delete_chain(m);
  289. if (m->m_flags & M_EXT)
  290. m_extfree(m);
  291. pool_put(&mbpool, m);
  292. return (n);
  293. }
  294. void
  295. m_extref(struct mbuf *o, struct mbuf *n)
  296. {
  297. int refs = MCLISREFERENCED(o);
  298. n->m_flags |= o->m_flags & (M_EXT|M_EXTWR);
  299. if (refs)
  300. mtx_enter(&m_extref_mtx);
  301. n->m_ext.ext_nextref = o->m_ext.ext_nextref;
  302. n->m_ext.ext_prevref = o;
  303. o->m_ext.ext_nextref = n;
  304. n->m_ext.ext_nextref->m_ext.ext_prevref = n;
  305. if (refs)
  306. mtx_leave(&m_extref_mtx);
  307. MCLREFDEBUGN((n), __FILE__, __LINE__);
  308. }
  309. static inline u_int
  310. m_extunref(struct mbuf *m)
  311. {
  312. int refs = 1;
  313. if (!MCLISREFERENCED(m))
  314. return (0);
  315. mtx_enter(&m_extref_mtx);
  316. if (MCLISREFERENCED(m)) {
  317. m->m_ext.ext_nextref->m_ext.ext_prevref =
  318. m->m_ext.ext_prevref;
  319. m->m_ext.ext_prevref->m_ext.ext_nextref =
  320. m->m_ext.ext_nextref;
  321. } else
  322. refs = 0;
  323. mtx_leave(&m_extref_mtx);
  324. return (refs);
  325. }
  326. void
  327. m_extfree(struct mbuf *m)
  328. {
  329. if (m_extunref(m) == 0) {
  330. (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
  331. m->m_ext.ext_size, m->m_ext.ext_arg);
  332. }
  333. m->m_flags &= ~(M_EXT|M_EXTWR);
  334. }
  335. void
  336. m_freem(struct mbuf *m)
  337. {
  338. while (m != NULL)
  339. m = m_free(m);
  340. }
  341. /*
  342. * mbuf chain defragmenter. This function uses some evil tricks to defragment
  343. * an mbuf chain into a single buffer without changing the mbuf pointer.
  344. * This needs to know a lot of the mbuf internals to make this work.
  345. */
  346. int
  347. m_defrag(struct mbuf *m, int how)
  348. {
  349. struct mbuf *m0;
  350. if (m->m_next == NULL)
  351. return (0);
  352. #ifdef DIAGNOSTIC
  353. if (!(m->m_flags & M_PKTHDR))
  354. panic("m_defrag: no packet hdr or not a chain");
  355. #endif
  356. if ((m0 = m_gethdr(how, m->m_type)) == NULL)
  357. return (ENOBUFS);
  358. if (m->m_pkthdr.len > MHLEN) {
  359. MCLGETI(m0, how, NULL, m->m_pkthdr.len);
  360. if (!(m0->m_flags & M_EXT)) {
  361. m_free(m0);
  362. return (ENOBUFS);
  363. }
  364. }
  365. m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
  366. m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
  367. /* free chain behind and possible ext buf on the first mbuf */
  368. m_freem(m->m_next);
  369. m->m_next = NULL;
  370. if (m->m_flags & M_EXT)
  371. m_extfree(m);
  372. /*
  373. * Bounce copy mbuf over to the original mbuf and set everything up.
  374. * This needs to reset or clear all pointers that may go into the
  375. * original mbuf chain.
  376. */
  377. if (m0->m_flags & M_EXT) {
  378. memcpy(&m->m_ext, &m0->m_ext, sizeof(struct mbuf_ext));
  379. MCLINITREFERENCE(m);
  380. m->m_flags |= m0->m_flags & (M_EXT|M_EXTWR);
  381. m->m_data = m->m_ext.ext_buf;
  382. } else {
  383. m->m_data = m->m_pktdat;
  384. memcpy(m->m_data, m0->m_data, m0->m_len);
  385. }
  386. m->m_pkthdr.len = m->m_len = m0->m_len;
  387. m0->m_flags &= ~(M_EXT|M_EXTWR); /* cluster is gone */
  388. m_free(m0);
  389. return (0);
  390. }
  391. /*
  392. * Mbuffer utility routines.
  393. */
  394. /*
  395. * Ensure len bytes of contiguous space at the beginning of the mbuf chain
  396. */
  397. struct mbuf *
  398. m_prepend(struct mbuf *m, int len, int how)
  399. {
  400. struct mbuf *mn;
  401. if (len > MHLEN)
  402. panic("mbuf prepend length too big");
  403. if (M_LEADINGSPACE(m) >= len) {
  404. m->m_data -= len;
  405. m->m_len += len;
  406. } else {
  407. MGET(mn, how, m->m_type);
  408. if (mn == NULL) {
  409. m_freem(m);
  410. return (NULL);
  411. }
  412. if (m->m_flags & M_PKTHDR)
  413. M_MOVE_PKTHDR(mn, m);
  414. mn->m_next = m;
  415. m = mn;
  416. MH_ALIGN(m, len);
  417. m->m_len = len;
  418. }
  419. if (m->m_flags & M_PKTHDR)
  420. m->m_pkthdr.len += len;
  421. return (m);
  422. }
  423. /*
  424. * Make a copy of an mbuf chain starting "off" bytes from the beginning,
  425. * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
  426. * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
  427. */
  428. struct mbuf *
  429. m_copym(struct mbuf *m, int off, int len, int wait)
  430. {
  431. return m_copym0(m, off, len, wait, 0); /* shallow copy on M_EXT */
  432. }
  433. /*
  434. * m_copym2() is like m_copym(), except it COPIES cluster mbufs, instead
  435. * of merely bumping the reference count.
  436. */
  437. struct mbuf *
  438. m_copym2(struct mbuf *m, int off, int len, int wait)
  439. {
  440. return m_copym0(m, off, len, wait, 1); /* deep copy */
  441. }
  442. struct mbuf *
  443. m_copym0(struct mbuf *m0, int off, int len, int wait, int deep)
  444. {
  445. struct mbuf *m, *n, **np;
  446. struct mbuf *top;
  447. int copyhdr = 0;
  448. if (off < 0 || len < 0)
  449. panic("m_copym0: off %d, len %d", off, len);
  450. if (off == 0 && m0->m_flags & M_PKTHDR)
  451. copyhdr = 1;
  452. if ((m = m_getptr(m0, off, &off)) == NULL)
  453. panic("m_copym0: short mbuf chain");
  454. np = &top;
  455. top = NULL;
  456. while (len > 0) {
  457. if (m == NULL) {
  458. if (len != M_COPYALL)
  459. panic("m_copym0: m == NULL and not COPYALL");
  460. break;
  461. }
  462. MGET(n, wait, m->m_type);
  463. *np = n;
  464. if (n == NULL)
  465. goto nospace;
  466. if (copyhdr) {
  467. if (m_dup_pkthdr(n, m0, wait))
  468. goto nospace;
  469. if (len != M_COPYALL)
  470. n->m_pkthdr.len = len;
  471. copyhdr = 0;
  472. }
  473. n->m_len = min(len, m->m_len - off);
  474. if (m->m_flags & M_EXT) {
  475. if (!deep) {
  476. n->m_data = m->m_data + off;
  477. n->m_ext = m->m_ext;
  478. MCLADDREFERENCE(m, n);
  479. } else {
  480. /*
  481. * we are unsure about the way m was allocated.
  482. * copy into multiple MCLBYTES cluster mbufs.
  483. */
  484. MCLGET(n, wait);
  485. n->m_len = 0;
  486. n->m_len = M_TRAILINGSPACE(n);
  487. n->m_len = min(n->m_len, len);
  488. n->m_len = min(n->m_len, m->m_len - off);
  489. memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off,
  490. n->m_len);
  491. }
  492. } else
  493. memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off,
  494. n->m_len);
  495. if (len != M_COPYALL)
  496. len -= n->m_len;
  497. off += n->m_len;
  498. #ifdef DIAGNOSTIC
  499. if (off > m->m_len)
  500. panic("m_copym0 overrun");
  501. #endif
  502. if (off == m->m_len) {
  503. m = m->m_next;
  504. off = 0;
  505. }
  506. np = &n->m_next;
  507. }
  508. return (top);
  509. nospace:
  510. m_freem(top);
  511. return (NULL);
  512. }
  513. /*
  514. * Copy data from an mbuf chain starting "off" bytes from the beginning,
  515. * continuing for "len" bytes, into the indicated buffer.
  516. */
  517. void
  518. m_copydata(struct mbuf *m, int off, int len, caddr_t cp)
  519. {
  520. unsigned count;
  521. if (off < 0)
  522. panic("m_copydata: off %d < 0", off);
  523. if (len < 0)
  524. panic("m_copydata: len %d < 0", len);
  525. if ((m = m_getptr(m, off, &off)) == NULL)
  526. panic("m_copydata: short mbuf chain");
  527. while (len > 0) {
  528. if (m == NULL)
  529. panic("m_copydata: null mbuf");
  530. count = min(m->m_len - off, len);
  531. memmove(cp, mtod(m, caddr_t) + off, count);
  532. len -= count;
  533. cp += count;
  534. off = 0;
  535. m = m->m_next;
  536. }
  537. }
  538. /*
  539. * Copy data from a buffer back into the indicated mbuf chain,
  540. * starting "off" bytes from the beginning, extending the mbuf
  541. * chain if necessary. The mbuf needs to be properly initialized
  542. * including the setting of m_len.
  543. */
  544. int
  545. m_copyback(struct mbuf *m0, int off, int len, const void *_cp, int wait)
  546. {
  547. int mlen, totlen = 0;
  548. struct mbuf *m = m0, *n;
  549. caddr_t cp = (caddr_t)_cp;
  550. int error = 0;
  551. if (m0 == NULL)
  552. return (0);
  553. while (off > (mlen = m->m_len)) {
  554. off -= mlen;
  555. totlen += mlen;
  556. if (m->m_next == NULL) {
  557. if ((n = m_get(wait, m->m_type)) == NULL) {
  558. error = ENOBUFS;
  559. goto out;
  560. }
  561. if (off + len > MLEN) {
  562. MCLGETI(n, wait, NULL, off + len);
  563. if (!(n->m_flags & M_EXT)) {
  564. m_free(n);
  565. error = ENOBUFS;
  566. goto out;
  567. }
  568. }
  569. memset(mtod(n, caddr_t), 0, off);
  570. n->m_len = len + off;
  571. m->m_next = n;
  572. }
  573. m = m->m_next;
  574. }
  575. while (len > 0) {
  576. /* extend last packet to be filled fully */
  577. if (m->m_next == NULL && (len > m->m_len - off))
  578. m->m_len += min(len - (m->m_len - off),
  579. M_TRAILINGSPACE(m));
  580. mlen = min(m->m_len - off, len);
  581. memmove(mtod(m, caddr_t) + off, cp, mlen);
  582. cp += mlen;
  583. len -= mlen;
  584. totlen += mlen + off;
  585. if (len == 0)
  586. break;
  587. off = 0;
  588. if (m->m_next == NULL) {
  589. if ((n = m_get(wait, m->m_type)) == NULL) {
  590. error = ENOBUFS;
  591. goto out;
  592. }
  593. if (len > MLEN) {
  594. MCLGETI(n, wait, NULL, len);
  595. if (!(n->m_flags & M_EXT)) {
  596. m_free(n);
  597. error = ENOBUFS;
  598. goto out;
  599. }
  600. }
  601. n->m_len = len;
  602. m->m_next = n;
  603. }
  604. m = m->m_next;
  605. }
  606. out:
  607. if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
  608. m->m_pkthdr.len = totlen;
  609. return (error);
  610. }
  611. /*
  612. * Concatenate mbuf chain n to m.
  613. * n might be copied into m (when n->m_len is small), therefore data portion of
  614. * n could be copied into an mbuf of different mbuf type.
  615. * Therefore both chains should be of the same type (e.g. MT_DATA).
  616. * Any m_pkthdr is not updated.
  617. */
  618. void
  619. m_cat(struct mbuf *m, struct mbuf *n)
  620. {
  621. while (m->m_next)
  622. m = m->m_next;
  623. while (n) {
  624. if (M_READONLY(m) || n->m_len > M_TRAILINGSPACE(m)) {
  625. /* just join the two chains */
  626. m->m_next = n;
  627. return;
  628. }
  629. /* splat the data from one into the other */
  630. memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
  631. n->m_len);
  632. m->m_len += n->m_len;
  633. n = m_free(n);
  634. }
  635. }
  636. void
  637. m_adj(struct mbuf *mp, int req_len)
  638. {
  639. int len = req_len;
  640. struct mbuf *m;
  641. int count;
  642. if ((m = mp) == NULL)
  643. return;
  644. if (len >= 0) {
  645. /*
  646. * Trim from head.
  647. */
  648. while (m != NULL && len > 0) {
  649. if (m->m_len <= len) {
  650. len -= m->m_len;
  651. m->m_len = 0;
  652. m = m->m_next;
  653. } else {
  654. m->m_len -= len;
  655. m->m_data += len;
  656. len = 0;
  657. }
  658. }
  659. if (mp->m_flags & M_PKTHDR)
  660. mp->m_pkthdr.len -= (req_len - len);
  661. } else {
  662. /*
  663. * Trim from tail. Scan the mbuf chain,
  664. * calculating its length and finding the last mbuf.
  665. * If the adjustment only affects this mbuf, then just
  666. * adjust and return. Otherwise, rescan and truncate
  667. * after the remaining size.
  668. */
  669. len = -len;
  670. count = 0;
  671. for (;;) {
  672. count += m->m_len;
  673. if (m->m_next == NULL)
  674. break;
  675. m = m->m_next;
  676. }
  677. if (m->m_len >= len) {
  678. m->m_len -= len;
  679. if (mp->m_flags & M_PKTHDR)
  680. mp->m_pkthdr.len -= len;
  681. return;
  682. }
  683. count -= len;
  684. if (count < 0)
  685. count = 0;
  686. /*
  687. * Correct length for chain is "count".
  688. * Find the mbuf with last data, adjust its length,
  689. * and toss data from remaining mbufs on chain.
  690. */
  691. m = mp;
  692. if (m->m_flags & M_PKTHDR)
  693. m->m_pkthdr.len = count;
  694. for (; m; m = m->m_next) {
  695. if (m->m_len >= count) {
  696. m->m_len = count;
  697. break;
  698. }
  699. count -= m->m_len;
  700. }
  701. while ((m = m->m_next) != NULL)
  702. m->m_len = 0;
  703. }
  704. }
  705. /*
  706. * Rearrange an mbuf chain so that len bytes are contiguous
  707. * and in the data area of an mbuf (so that mtod will work
  708. * for a structure of size len). Returns the resulting
  709. * mbuf chain on success, frees it and returns null on failure.
  710. */
  711. struct mbuf *
  712. m_pullup(struct mbuf *n, int len)
  713. {
  714. struct mbuf *m;
  715. int count;
  716. /*
  717. * If first mbuf has no cluster, and has room for len bytes
  718. * without shifting current data, pullup into it,
  719. * otherwise allocate a new mbuf to prepend to the chain.
  720. */
  721. if ((n->m_flags & M_EXT) == 0 && n->m_next &&
  722. n->m_data + len < &n->m_dat[MLEN]) {
  723. if (n->m_len >= len)
  724. return (n);
  725. m = n;
  726. n = n->m_next;
  727. len -= m->m_len;
  728. } else if ((n->m_flags & M_EXT) != 0 && len > MHLEN && n->m_next &&
  729. n->m_data + len < &n->m_ext.ext_buf[n->m_ext.ext_size]) {
  730. if (n->m_len >= len)
  731. return (n);
  732. m = n;
  733. n = n->m_next;
  734. len -= m->m_len;
  735. } else {
  736. if (len > MAXMCLBYTES)
  737. goto bad;
  738. MGET(m, M_DONTWAIT, n->m_type);
  739. if (m == NULL)
  740. goto bad;
  741. if (len > MHLEN) {
  742. MCLGETI(m, M_DONTWAIT, NULL, len);
  743. if ((m->m_flags & M_EXT) == 0) {
  744. m_free(m);
  745. goto bad;
  746. }
  747. }
  748. m->m_len = 0;
  749. if (n->m_flags & M_PKTHDR)
  750. M_MOVE_PKTHDR(m, n);
  751. }
  752. do {
  753. count = min(len, n->m_len);
  754. memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
  755. count);
  756. len -= count;
  757. m->m_len += count;
  758. n->m_len -= count;
  759. if (n->m_len)
  760. n->m_data += count;
  761. else
  762. n = m_free(n);
  763. } while (len > 0 && n);
  764. if (len > 0) {
  765. (void)m_free(m);
  766. goto bad;
  767. }
  768. m->m_next = n;
  769. return (m);
  770. bad:
  771. m_freem(n);
  772. return (NULL);
  773. }
  774. /*
  775. * Return a pointer to mbuf/offset of location in mbuf chain.
  776. */
  777. struct mbuf *
  778. m_getptr(struct mbuf *m, int loc, int *off)
  779. {
  780. while (loc >= 0) {
  781. /* Normal end of search */
  782. if (m->m_len > loc) {
  783. *off = loc;
  784. return (m);
  785. } else {
  786. loc -= m->m_len;
  787. if (m->m_next == NULL) {
  788. if (loc == 0) {
  789. /* Point at the end of valid data */
  790. *off = m->m_len;
  791. return (m);
  792. } else {
  793. return (NULL);
  794. }
  795. } else {
  796. m = m->m_next;
  797. }
  798. }
  799. }
  800. return (NULL);
  801. }
  802. /*
  803. * Inject a new mbuf chain of length siz in mbuf chain m0 at
  804. * position len0. Returns a pointer to the first injected mbuf, or
  805. * NULL on failure (m0 is left undisturbed). Note that if there is
  806. * enough space for an object of size siz in the appropriate position,
  807. * no memory will be allocated. Also, there will be no data movement in
  808. * the first len0 bytes (pointers to that will remain valid).
  809. *
  810. * XXX It is assumed that siz is less than the size of an mbuf at the moment.
  811. */
  812. struct mbuf *
  813. m_inject(struct mbuf *m0, int len0, int siz, int wait)
  814. {
  815. struct mbuf *m, *n, *n2 = NULL, *n3;
  816. unsigned len = len0, remain;
  817. if ((siz >= MHLEN) || (len0 <= 0))
  818. return (NULL);
  819. for (m = m0; m && len > m->m_len; m = m->m_next)
  820. len -= m->m_len;
  821. if (m == NULL)
  822. return (NULL);
  823. remain = m->m_len - len;
  824. if (remain == 0) {
  825. if ((m->m_next) && (M_LEADINGSPACE(m->m_next) >= siz)) {
  826. m->m_next->m_len += siz;
  827. if (m0->m_flags & M_PKTHDR)
  828. m0->m_pkthdr.len += siz;
  829. m->m_next->m_data -= siz;
  830. return m->m_next;
  831. }
  832. } else {
  833. n2 = m_copym2(m, len, remain, wait);
  834. if (n2 == NULL)
  835. return (NULL);
  836. }
  837. MGET(n, wait, MT_DATA);
  838. if (n == NULL) {
  839. if (n2)
  840. m_freem(n2);
  841. return (NULL);
  842. }
  843. n->m_len = siz;
  844. if (m0->m_flags & M_PKTHDR)
  845. m0->m_pkthdr.len += siz;
  846. m->m_len -= remain; /* Trim */
  847. if (n2) {
  848. for (n3 = n; n3->m_next != NULL; n3 = n3->m_next)
  849. ;
  850. n3->m_next = n2;
  851. } else
  852. n3 = n;
  853. for (; n3->m_next != NULL; n3 = n3->m_next)
  854. ;
  855. n3->m_next = m->m_next;
  856. m->m_next = n;
  857. return n;
  858. }
  859. /*
  860. * Partition an mbuf chain in two pieces, returning the tail --
  861. * all but the first len0 bytes. In case of failure, it returns NULL and
  862. * attempts to restore the chain to its original state.
  863. */
  864. struct mbuf *
  865. m_split(struct mbuf *m0, int len0, int wait)
  866. {
  867. struct mbuf *m, *n;
  868. unsigned len = len0, remain, olen;
  869. for (m = m0; m && len > m->m_len; m = m->m_next)
  870. len -= m->m_len;
  871. if (m == NULL)
  872. return (NULL);
  873. remain = m->m_len - len;
  874. if (m0->m_flags & M_PKTHDR) {
  875. MGETHDR(n, wait, m0->m_type);
  876. if (n == NULL)
  877. return (NULL);
  878. if (m_dup_pkthdr(n, m0, wait)) {
  879. m_freem(n);
  880. return (NULL);
  881. }
  882. n->m_pkthdr.len -= len0;
  883. olen = m0->m_pkthdr.len;
  884. m0->m_pkthdr.len = len0;
  885. if (m->m_flags & M_EXT)
  886. goto extpacket;
  887. if (remain > MHLEN) {
  888. /* m can't be the lead packet */
  889. MH_ALIGN(n, 0);
  890. n->m_next = m_split(m, len, wait);
  891. if (n->m_next == NULL) {
  892. (void) m_free(n);
  893. m0->m_pkthdr.len = olen;
  894. return (NULL);
  895. } else
  896. return (n);
  897. } else
  898. MH_ALIGN(n, remain);
  899. } else if (remain == 0) {
  900. n = m->m_next;
  901. m->m_next = NULL;
  902. return (n);
  903. } else {
  904. MGET(n, wait, m->m_type);
  905. if (n == NULL)
  906. return (NULL);
  907. M_ALIGN(n, remain);
  908. }
  909. extpacket:
  910. if (m->m_flags & M_EXT) {
  911. n->m_ext = m->m_ext;
  912. MCLADDREFERENCE(m, n);
  913. n->m_data = m->m_data + len;
  914. } else {
  915. memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
  916. }
  917. n->m_len = remain;
  918. m->m_len = len;
  919. n->m_next = m->m_next;
  920. m->m_next = NULL;
  921. return (n);
  922. }
  923. /*
  924. * Routine to copy from device local memory into mbufs.
  925. */
  926. struct mbuf *
  927. m_devget(char *buf, int totlen, int off)
  928. {
  929. struct mbuf *m;
  930. struct mbuf *top, **mp;
  931. int len;
  932. top = NULL;
  933. mp = &top;
  934. if (off < 0 || off > MHLEN)
  935. return (NULL);
  936. MGETHDR(m, M_DONTWAIT, MT_DATA);
  937. if (m == NULL)
  938. return (NULL);
  939. m->m_pkthdr.len = totlen;
  940. len = MHLEN;
  941. while (totlen > 0) {
  942. if (top != NULL) {
  943. MGET(m, M_DONTWAIT, MT_DATA);
  944. if (m == NULL) {
  945. /*
  946. * As we might get called by pfkey, make sure
  947. * we do not leak sensitive data.
  948. */
  949. top->m_flags |= M_ZEROIZE;
  950. m_freem(top);
  951. return (NULL);
  952. }
  953. len = MLEN;
  954. }
  955. if (totlen + off >= MINCLSIZE) {
  956. MCLGET(m, M_DONTWAIT);
  957. if (m->m_flags & M_EXT)
  958. len = MCLBYTES;
  959. } else {
  960. /* Place initial small packet/header at end of mbuf. */
  961. if (top == NULL && totlen + off + max_linkhdr <= len) {
  962. m->m_data += max_linkhdr;
  963. len -= max_linkhdr;
  964. }
  965. }
  966. if (off) {
  967. m->m_data += off;
  968. len -= off;
  969. off = 0;
  970. }
  971. m->m_len = len = min(totlen, len);
  972. memcpy(mtod(m, void *), buf, (size_t)len);
  973. buf += len;
  974. *mp = m;
  975. mp = &m->m_next;
  976. totlen -= len;
  977. }
  978. return (top);
  979. }
  980. void
  981. m_zero(struct mbuf *m)
  982. {
  983. #ifdef DIAGNOSTIC
  984. if (M_READONLY(m))
  985. panic("m_zero: M_READONLY");
  986. #endif /* DIAGNOSTIC */
  987. if (m->m_flags & M_EXT)
  988. explicit_bzero(m->m_ext.ext_buf, m->m_ext.ext_size);
  989. else {
  990. if (m->m_flags & M_PKTHDR)
  991. explicit_bzero(m->m_pktdat, MHLEN);
  992. else
  993. explicit_bzero(m->m_dat, MLEN);
  994. }
  995. }
  996. /*
  997. * Apply function f to the data in an mbuf chain starting "off" bytes from the
  998. * beginning, continuing for "len" bytes.
  999. */
  1000. int
  1001. m_apply(struct mbuf *m, int off, int len,
  1002. int (*f)(caddr_t, caddr_t, unsigned int), caddr_t fstate)
  1003. {
  1004. int rval;
  1005. unsigned int count;
  1006. if (len < 0)
  1007. panic("m_apply: len %d < 0", len);
  1008. if (off < 0)
  1009. panic("m_apply: off %d < 0", off);
  1010. while (off > 0) {
  1011. if (m == NULL)
  1012. panic("m_apply: null mbuf in skip");
  1013. if (off < m->m_len)
  1014. break;
  1015. off -= m->m_len;
  1016. m = m->m_next;
  1017. }
  1018. while (len > 0) {
  1019. if (m == NULL)
  1020. panic("m_apply: null mbuf");
  1021. count = min(m->m_len - off, len);
  1022. rval = f(fstate, mtod(m, caddr_t) + off, count);
  1023. if (rval)
  1024. return (rval);
  1025. len -= count;
  1026. off = 0;
  1027. m = m->m_next;
  1028. }
  1029. return (0);
  1030. }
  1031. int
  1032. m_leadingspace(struct mbuf *m)
  1033. {
  1034. if (M_READONLY(m))
  1035. return 0;
  1036. return (m->m_flags & M_EXT ? m->m_data - m->m_ext.ext_buf :
  1037. m->m_flags & M_PKTHDR ? m->m_data - m->m_pktdat :
  1038. m->m_data - m->m_dat);
  1039. }
  1040. int
  1041. m_trailingspace(struct mbuf *m)
  1042. {
  1043. if (M_READONLY(m))
  1044. return 0;
  1045. return (m->m_flags & M_EXT ? m->m_ext.ext_buf +
  1046. m->m_ext.ext_size - (m->m_data + m->m_len) :
  1047. &m->m_dat[MLEN] - (m->m_data + m->m_len));
  1048. }
  1049. /*
  1050. * Duplicate mbuf pkthdr from from to to.
  1051. * from must have M_PKTHDR set, and to must be empty.
  1052. */
  1053. int
  1054. m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int wait)
  1055. {
  1056. int error;
  1057. KASSERT(from->m_flags & M_PKTHDR);
  1058. to->m_flags = (to->m_flags & (M_EXT | M_EXTWR));
  1059. to->m_flags |= (from->m_flags & M_COPYFLAGS);
  1060. to->m_pkthdr = from->m_pkthdr;
  1061. SLIST_INIT(&to->m_pkthdr.tags);
  1062. if ((error = m_tag_copy_chain(to, from, wait)) != 0)
  1063. return (error);
  1064. if ((to->m_flags & M_EXT) == 0)
  1065. to->m_data = to->m_pktdat;
  1066. return (0);
  1067. }
  1068. #ifdef DDB
  1069. void
  1070. m_print(void *v,
  1071. int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
  1072. {
  1073. struct mbuf *m = v;
  1074. (*pr)("mbuf %p\n", m);
  1075. (*pr)("m_type: %i\tm_flags: %b\n", m->m_type, m->m_flags, M_BITS);
  1076. (*pr)("m_next: %p\tm_nextpkt: %p\n", m->m_next, m->m_nextpkt);
  1077. (*pr)("m_data: %p\tm_len: %u\n", m->m_data, m->m_len);
  1078. (*pr)("m_dat: %p\tm_pktdat: %p\n", m->m_dat, m->m_pktdat);
  1079. if (m->m_flags & M_PKTHDR) {
  1080. (*pr)("m_ptkhdr.ph_ifidx: %u\tm_pkthdr.len: %i\n",
  1081. m->m_pkthdr.ph_ifidx, m->m_pkthdr.len);
  1082. (*pr)("m_ptkhdr.tags: %p\tm_pkthdr.tagsset: %b\n",
  1083. SLIST_FIRST(&m->m_pkthdr.tags),
  1084. m->m_pkthdr.tagsset, MTAG_BITS);
  1085. (*pr)("m_pkthdr.csum_flags: %b\n",
  1086. m->m_pkthdr.csum_flags, MCS_BITS);
  1087. (*pr)("m_pkthdr.ether_vtag: %u\tm_ptkhdr.ph_rtableid: %u\n",
  1088. m->m_pkthdr.ether_vtag, m->m_pkthdr.ph_rtableid);
  1089. (*pr)("m_pkthdr.pf.statekey: %p\tm_pkthdr.pf.inp %p\n",
  1090. m->m_pkthdr.pf.statekey, m->m_pkthdr.pf.inp);
  1091. (*pr)("m_pkthdr.pf.qid: %u\tm_pkthdr.pf.tag: %u\n",
  1092. m->m_pkthdr.pf.qid, m->m_pkthdr.pf.tag);
  1093. (*pr)("m_pkthdr.pf.flags: %b\n",
  1094. m->m_pkthdr.pf.flags, MPF_BITS);
  1095. (*pr)("m_pkthdr.pf.routed: %u\tm_pkthdr.pf.prio: %u\n",
  1096. m->m_pkthdr.pf.routed, m->m_pkthdr.pf.prio);
  1097. }
  1098. if (m->m_flags & M_EXT) {
  1099. (*pr)("m_ext.ext_buf: %p\tm_ext.ext_size: %u\n",
  1100. m->m_ext.ext_buf, m->m_ext.ext_size);
  1101. (*pr)("m_ext.ext_free: %p\tm_ext.ext_arg: %p\n",
  1102. m->m_ext.ext_free, m->m_ext.ext_arg);
  1103. (*pr)("m_ext.ext_nextref: %p\tm_ext.ext_prevref: %p\n",
  1104. m->m_ext.ext_nextref, m->m_ext.ext_prevref);
  1105. }
  1106. }
  1107. #endif
  1108. /*
  1109. * mbuf lists
  1110. */
  1111. void ml_join(struct mbuf_list *, struct mbuf_list *);
  1112. void
  1113. ml_init(struct mbuf_list *ml)
  1114. {
  1115. ml->ml_head = ml->ml_tail = NULL;
  1116. ml->ml_len = 0;
  1117. }
  1118. void
  1119. ml_enqueue(struct mbuf_list *ml, struct mbuf *m)
  1120. {
  1121. if (ml->ml_tail == NULL)
  1122. ml->ml_head = ml->ml_tail = m;
  1123. else {
  1124. ml->ml_tail->m_nextpkt = m;
  1125. ml->ml_tail = m;
  1126. }
  1127. m->m_nextpkt = NULL;
  1128. ml->ml_len++;
  1129. }
  1130. void
  1131. ml_join(struct mbuf_list *mla, struct mbuf_list *mlb)
  1132. {
  1133. if (!ml_empty(mlb)) {
  1134. if (ml_empty(mla))
  1135. mla->ml_head = mlb->ml_head;
  1136. else
  1137. mla->ml_tail->m_nextpkt = mlb->ml_head;
  1138. mla->ml_tail = mlb->ml_tail;
  1139. mla->ml_len += mlb->ml_len;
  1140. ml_init(mlb);
  1141. }
  1142. }
  1143. struct mbuf *
  1144. ml_dequeue(struct mbuf_list *ml)
  1145. {
  1146. struct mbuf *m;
  1147. m = ml->ml_head;
  1148. if (m != NULL) {
  1149. ml->ml_head = m->m_nextpkt;
  1150. if (ml->ml_head == NULL)
  1151. ml->ml_tail = NULL;
  1152. m->m_nextpkt = NULL;
  1153. ml->ml_len--;
  1154. }
  1155. return (m);
  1156. }
  1157. struct mbuf *
  1158. ml_dechain(struct mbuf_list *ml)
  1159. {
  1160. struct mbuf *m0;
  1161. m0 = ml->ml_head;
  1162. ml_init(ml);
  1163. return (m0);
  1164. }
  1165. struct mbuf *
  1166. ml_filter(struct mbuf_list *ml,
  1167. int (*filter)(void *, const struct mbuf *), void *ctx)
  1168. {
  1169. struct mbuf_list matches = MBUF_LIST_INITIALIZER();
  1170. struct mbuf *m, *n;
  1171. struct mbuf **mp;
  1172. mp = &ml->ml_head;
  1173. for (m = ml->ml_head; m != NULL; m = n) {
  1174. n = m->m_nextpkt;
  1175. if ((*filter)(ctx, m)) {
  1176. *mp = n;
  1177. ml_enqueue(&matches, m);
  1178. } else {
  1179. mp = &m->m_nextpkt;
  1180. ml->ml_tail = m;
  1181. }
  1182. }
  1183. /* fixup ml */
  1184. if (ml->ml_head == NULL)
  1185. ml->ml_tail = NULL;
  1186. ml->ml_len -= ml_len(&matches);
  1187. return (matches.ml_head); /* ml_dechain */
  1188. }
  1189. /*
  1190. * mbuf queues
  1191. */
  1192. void
  1193. mq_init(struct mbuf_queue *mq, u_int maxlen, int ipl)
  1194. {
  1195. mtx_init(&mq->mq_mtx, ipl);
  1196. ml_init(&mq->mq_list);
  1197. mq->mq_maxlen = maxlen;
  1198. }
  1199. int
  1200. mq_enqueue(struct mbuf_queue *mq, struct mbuf *m)
  1201. {
  1202. int dropped = 0;
  1203. mtx_enter(&mq->mq_mtx);
  1204. if (mq_len(mq) < mq->mq_maxlen)
  1205. ml_enqueue(&mq->mq_list, m);
  1206. else {
  1207. mq->mq_drops++;
  1208. dropped = 1;
  1209. }
  1210. mtx_leave(&mq->mq_mtx);
  1211. if (dropped)
  1212. m_freem(m);
  1213. return (dropped);
  1214. }
  1215. struct mbuf *
  1216. mq_dequeue(struct mbuf_queue *mq)
  1217. {
  1218. struct mbuf *m;
  1219. mtx_enter(&mq->mq_mtx);
  1220. m = ml_dequeue(&mq->mq_list);
  1221. mtx_leave(&mq->mq_mtx);
  1222. return (m);
  1223. }
  1224. int
  1225. mq_enlist(struct mbuf_queue *mq, struct mbuf_list *ml)
  1226. {
  1227. struct mbuf *m;
  1228. int dropped = 0;
  1229. mtx_enter(&mq->mq_mtx);
  1230. if (mq_len(mq) < mq->mq_maxlen)
  1231. ml_join(&mq->mq_list, ml);
  1232. else {
  1233. dropped = ml_len(ml);
  1234. mq->mq_drops += dropped;
  1235. }
  1236. mtx_leave(&mq->mq_mtx);
  1237. if (dropped) {
  1238. while ((m = ml_dequeue(ml)) != NULL)
  1239. m_freem(m);
  1240. }
  1241. return (dropped);
  1242. }
  1243. void
  1244. mq_delist(struct mbuf_queue *mq, struct mbuf_list *ml)
  1245. {
  1246. mtx_enter(&mq->mq_mtx);
  1247. *ml = mq->mq_list;
  1248. ml_init(&mq->mq_list);
  1249. mtx_leave(&mq->mq_mtx);
  1250. }
  1251. struct mbuf *
  1252. mq_dechain(struct mbuf_queue *mq)
  1253. {
  1254. struct mbuf *m0;
  1255. mtx_enter(&mq->mq_mtx);
  1256. m0 = ml_dechain(&mq->mq_list);
  1257. mtx_leave(&mq->mq_mtx);
  1258. return (m0);
  1259. }
  1260. struct mbuf *
  1261. mq_filter(struct mbuf_queue *mq,
  1262. int (*filter)(void *, const struct mbuf *), void *ctx)
  1263. {
  1264. struct mbuf *m0;
  1265. mtx_enter(&mq->mq_mtx);
  1266. m0 = ml_filter(&mq->mq_list, filter, ctx);
  1267. mtx_leave(&mq->mq_mtx);
  1268. return (m0);
  1269. }