bpf_zerocopy.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  3. *
  4. * Copyright (c) 2007 Seccuris Inc.
  5. * All rights reserved.
  6. *
  7. * This software was developed by Robert N. M. Watson under contract to
  8. * Seccuris Inc.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions and the following disclaimer.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  20. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  21. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  22. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  23. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  24. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  25. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  26. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  27. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  28. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  29. * SUCH DAMAGE.
  30. */
  31. #include <sys/cdefs.h>
  32. __FBSDID("$FreeBSD$");
  33. #include "opt_bpf.h"
  34. #include <sys/param.h>
  35. #include <sys/lock.h>
  36. #include <sys/malloc.h>
  37. #include <sys/mbuf.h>
  38. #include <sys/mutex.h>
  39. #include <sys/proc.h>
  40. #include <sys/sf_buf.h>
  41. #include <sys/socket.h>
  42. #include <sys/uio.h>
  43. #include <machine/atomic.h>
  44. #include <net/if.h>
  45. #include <net/bpf.h>
  46. #include <net/bpf_zerocopy.h>
  47. #include <net/bpfdesc.h>
  48. #include <vm/vm.h>
  49. #include <vm/vm_param.h>
  50. #include <vm/pmap.h>
  51. #include <vm/vm_extern.h>
  52. #include <vm/vm_map.h>
  53. #include <vm/vm_page.h>
  54. /*
  55. * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which
  56. * are mapped into the kernel address space using sf_bufs and used directly
  57. * by BPF. Memory is wired since page faults cannot be tolerated in the
  58. * contexts where the buffers are copied to (locks held, interrupt context,
  59. * etc). Access to shared memory buffers is synchronized using a header on
  60. * each buffer, allowing the number of system calls to go to zero as BPF
  61. * reaches saturation (buffers filled as fast as they can be drained by the
  62. * user process). Full details of the protocol for communicating between the
  63. * user process and BPF may be found in bpf(4).
  64. */
  65. /*
  66. * Maximum number of pages per buffer. Since all BPF devices use two, the
  67. * maximum per device is 2*BPF_MAX_PAGES. Resource limits on the number of
  68. * sf_bufs may be an issue, so do not set this too high. On older systems,
  69. * kernel address space limits may also be an issue.
  70. */
  71. #define BPF_MAX_PAGES 512
  72. /*
  73. * struct zbuf describes a memory buffer loaned by a user process to the
  74. * kernel. We represent this as a series of pages managed using an array of
  75. * sf_bufs. Even though the memory is contiguous in user space, it may not
  76. * be mapped contiguously in the kernel (i.e., a set of physically
  77. * non-contiguous pages in the direct map region) so we must implement
  78. * scatter-gather copying. One significant mitigating factor is that on
  79. * systems with a direct memory map, we can avoid TLB misses.
  80. *
  81. * At the front of the shared memory region is a bpf_zbuf_header, which
  82. * contains shared control data to allow user space and the kernel to
  83. * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF
  84. * knows that the space is not available.
  85. */
  86. struct zbuf {
  87. vm_offset_t zb_uaddr; /* User address at time of setup. */
  88. size_t zb_size; /* Size of buffer, incl. header. */
  89. u_int zb_numpages; /* Number of pages. */
  90. int zb_flags; /* Flags on zbuf. */
  91. struct sf_buf **zb_pages; /* Pages themselves. */
  92. struct bpf_zbuf_header *zb_header; /* Shared header. */
  93. };
  94. /*
  95. * When a buffer has been assigned to userspace, flag it as such, as the
  96. * buffer may remain in the store position as a result of the user process
  97. * not yet having acknowledged the buffer in the hold position yet.
  98. */
  99. #define ZBUF_FLAG_ASSIGNED 0x00000001 /* Set when owned by user. */
  100. /*
  101. * Release a page we've previously wired.
  102. */
  103. static void
  104. zbuf_page_free(vm_page_t pp)
  105. {
  106. vm_page_unwire(pp, PQ_INACTIVE);
  107. }
  108. /*
  109. * Free an sf_buf with attached page.
  110. */
  111. static void
  112. zbuf_sfbuf_free(struct sf_buf *sf)
  113. {
  114. vm_page_t pp;
  115. pp = sf_buf_page(sf);
  116. sf_buf_free(sf);
  117. zbuf_page_free(pp);
  118. }
  119. /*
  120. * Free a zbuf, including its page array, sbufs, and pages. Allow partially
  121. * allocated zbufs to be freed so that it may be used even during a zbuf
  122. * setup.
  123. */
  124. static void
  125. zbuf_free(struct zbuf *zb)
  126. {
  127. int i;
  128. for (i = 0; i < zb->zb_numpages; i++) {
  129. if (zb->zb_pages[i] != NULL)
  130. zbuf_sfbuf_free(zb->zb_pages[i]);
  131. }
  132. free(zb->zb_pages, M_BPF);
  133. free(zb, M_BPF);
  134. }
  135. /*
  136. * Given a user pointer to a page of user memory, return an sf_buf for the
  137. * page. Because we may be requesting quite a few sf_bufs, prefer failure to
  138. * deadlock and use SFB_NOWAIT.
  139. */
  140. static struct sf_buf *
  141. zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
  142. {
  143. struct sf_buf *sf;
  144. vm_page_t pp;
  145. if (vm_fault_quick_hold_pages(map, uaddr, PAGE_SIZE, VM_PROT_READ |
  146. VM_PROT_WRITE, &pp, 1) < 0)
  147. return (NULL);
  148. sf = sf_buf_alloc(pp, SFB_NOWAIT);
  149. if (sf == NULL) {
  150. zbuf_page_free(pp);
  151. return (NULL);
  152. }
  153. return (sf);
  154. }
  155. /*
  156. * Create a zbuf describing a range of user address space memory. Validate
  157. * page alignment, size requirements, etc.
  158. */
  159. static int
  160. zbuf_setup(struct thread *td, vm_offset_t uaddr, size_t len,
  161. struct zbuf **zbp)
  162. {
  163. struct zbuf *zb;
  164. struct vm_map *map;
  165. int error, i;
  166. *zbp = NULL;
  167. /*
  168. * User address must be page-aligned.
  169. */
  170. if (uaddr & PAGE_MASK)
  171. return (EINVAL);
  172. /*
  173. * Length must be an integer number of full pages.
  174. */
  175. if (len & PAGE_MASK)
  176. return (EINVAL);
  177. /*
  178. * Length must not exceed per-buffer resource limit.
  179. */
  180. if ((len / PAGE_SIZE) > BPF_MAX_PAGES)
  181. return (EINVAL);
  182. /*
  183. * Allocate the buffer and set up each page with is own sf_buf.
  184. */
  185. error = 0;
  186. zb = malloc(sizeof(*zb), M_BPF, M_ZERO | M_WAITOK);
  187. zb->zb_uaddr = uaddr;
  188. zb->zb_size = len;
  189. zb->zb_numpages = len / PAGE_SIZE;
  190. zb->zb_pages = malloc(sizeof(struct sf_buf *) *
  191. zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK);
  192. map = &td->td_proc->p_vmspace->vm_map;
  193. for (i = 0; i < zb->zb_numpages; i++) {
  194. zb->zb_pages[i] = zbuf_sfbuf_get(map,
  195. uaddr + (i * PAGE_SIZE));
  196. if (zb->zb_pages[i] == NULL) {
  197. error = EFAULT;
  198. goto error;
  199. }
  200. }
  201. zb->zb_header =
  202. (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]);
  203. bzero(zb->zb_header, sizeof(*zb->zb_header));
  204. *zbp = zb;
  205. return (0);
  206. error:
  207. zbuf_free(zb);
  208. return (error);
  209. }
  210. /*
  211. * Copy bytes from a source into the specified zbuf. The caller is
  212. * responsible for performing bounds checking, etc.
  213. */
  214. void
  215. bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
  216. void *src, u_int len)
  217. {
  218. u_int count, page, poffset;
  219. u_char *src_bytes;
  220. struct zbuf *zb;
  221. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  222. ("bpf_zerocopy_append_bytes: not in zbuf mode"));
  223. KASSERT(buf != NULL, ("bpf_zerocopy_append_bytes: NULL buf"));
  224. src_bytes = (u_char *)src;
  225. zb = (struct zbuf *)buf;
  226. KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
  227. ("bpf_zerocopy_append_bytes: ZBUF_FLAG_ASSIGNED"));
  228. /*
  229. * Scatter-gather copy to user pages mapped into kernel address space
  230. * using sf_bufs: copy up to a page at a time.
  231. */
  232. offset += sizeof(struct bpf_zbuf_header);
  233. page = offset / PAGE_SIZE;
  234. poffset = offset % PAGE_SIZE;
  235. while (len > 0) {
  236. KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:"
  237. " page overflow (%d p %d np)\n", page, zb->zb_numpages));
  238. count = min(len, PAGE_SIZE - poffset);
  239. bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) +
  240. poffset, count);
  241. poffset += count;
  242. if (poffset == PAGE_SIZE) {
  243. poffset = 0;
  244. page++;
  245. }
  246. KASSERT(poffset < PAGE_SIZE,
  247. ("bpf_zerocopy_append_bytes: page offset overflow (%d)",
  248. poffset));
  249. len -= count;
  250. src_bytes += count;
  251. }
  252. }
  253. /*
  254. * Copy bytes from an mbuf chain to the specified zbuf: copying will be
  255. * scatter-gather both from mbufs, which may be fragmented over memory, and
  256. * to pages, which may not be contiguously mapped in kernel address space.
  257. * As with bpf_zerocopy_append_bytes(), the caller is responsible for
  258. * checking that this will not exceed the buffer limit.
  259. */
  260. void
  261. bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
  262. void *src, u_int len)
  263. {
  264. u_int count, moffset, page, poffset;
  265. const struct mbuf *m;
  266. struct zbuf *zb;
  267. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  268. ("bpf_zerocopy_append_mbuf not in zbuf mode"));
  269. KASSERT(buf != NULL, ("bpf_zerocopy_append_mbuf: NULL buf"));
  270. m = (struct mbuf *)src;
  271. zb = (struct zbuf *)buf;
  272. KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
  273. ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_ASSIGNED"));
  274. /*
  275. * Scatter gather both from an mbuf chain and to a user page set
  276. * mapped into kernel address space using sf_bufs. If we're lucky,
  277. * each mbuf requires one copy operation, but if page alignment and
  278. * mbuf alignment work out less well, we'll be doing two copies per
  279. * mbuf.
  280. */
  281. offset += sizeof(struct bpf_zbuf_header);
  282. page = offset / PAGE_SIZE;
  283. poffset = offset % PAGE_SIZE;
  284. moffset = 0;
  285. while (len > 0) {
  286. KASSERT(page < zb->zb_numpages,
  287. ("bpf_zerocopy_append_mbuf: page overflow (%d p %d "
  288. "np)\n", page, zb->zb_numpages));
  289. KASSERT(m != NULL,
  290. ("bpf_zerocopy_append_mbuf: end of mbuf chain"));
  291. count = min(m->m_len - moffset, len);
  292. count = min(count, PAGE_SIZE - poffset);
  293. bcopy(mtod(m, u_char *) + moffset,
  294. ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset,
  295. count);
  296. poffset += count;
  297. if (poffset == PAGE_SIZE) {
  298. poffset = 0;
  299. page++;
  300. }
  301. KASSERT(poffset < PAGE_SIZE,
  302. ("bpf_zerocopy_append_mbuf: page offset overflow (%d)",
  303. poffset));
  304. moffset += count;
  305. if (moffset == m->m_len) {
  306. m = m->m_next;
  307. moffset = 0;
  308. }
  309. len -= count;
  310. }
  311. }
  312. /*
  313. * Notification from the BPF framework that a buffer in the store position is
  314. * rejecting packets and may be considered full. We mark the buffer as
  315. * immutable and assign to userspace so that it is immediately available for
  316. * the user process to access.
  317. */
  318. void
  319. bpf_zerocopy_buffull(struct bpf_d *d)
  320. {
  321. struct zbuf *zb;
  322. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  323. ("bpf_zerocopy_buffull: not in zbuf mode"));
  324. zb = (struct zbuf *)d->bd_sbuf;
  325. KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL"));
  326. if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
  327. zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
  328. zb->zb_header->bzh_kernel_len = d->bd_slen;
  329. atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
  330. }
  331. }
  332. /*
  333. * Notification from the BPF framework that a buffer has moved into the held
  334. * slot on a descriptor. Zero-copy BPF will update the shared page to let
  335. * the user process know and flag the buffer as assigned if it hasn't already
  336. * been marked assigned due to filling while it was in the store position.
  337. *
  338. * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
  339. * on bd_hbuf and bd_hlen.
  340. */
  341. void
  342. bpf_zerocopy_bufheld(struct bpf_d *d)
  343. {
  344. struct zbuf *zb;
  345. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  346. ("bpf_zerocopy_bufheld: not in zbuf mode"));
  347. zb = (struct zbuf *)d->bd_hbuf;
  348. KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL"));
  349. if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
  350. zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
  351. zb->zb_header->bzh_kernel_len = d->bd_hlen;
  352. atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
  353. }
  354. }
  355. /*
  356. * Notification from the BPF framework that the free buffer has been been
  357. * rotated out of the held position to the free position. This happens when
  358. * the user acknowledges the held buffer.
  359. */
  360. void
  361. bpf_zerocopy_buf_reclaimed(struct bpf_d *d)
  362. {
  363. struct zbuf *zb;
  364. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  365. ("bpf_zerocopy_reclaim_buf: not in zbuf mode"));
  366. KASSERT(d->bd_fbuf != NULL,
  367. ("bpf_zerocopy_buf_reclaimed: NULL free buf"));
  368. zb = (struct zbuf *)d->bd_fbuf;
  369. zb->zb_flags &= ~ZBUF_FLAG_ASSIGNED;
  370. }
  371. /*
  372. * Query from the BPF framework regarding whether the buffer currently in the
  373. * held position can be moved to the free position, which can be indicated by
  374. * the user process making their generation number equal to the kernel
  375. * generation number.
  376. */
  377. int
  378. bpf_zerocopy_canfreebuf(struct bpf_d *d)
  379. {
  380. struct zbuf *zb;
  381. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  382. ("bpf_zerocopy_canfreebuf: not in zbuf mode"));
  383. zb = (struct zbuf *)d->bd_hbuf;
  384. if (zb == NULL)
  385. return (0);
  386. if (zb->zb_header->bzh_kernel_gen ==
  387. atomic_load_acq_int(&zb->zb_header->bzh_user_gen))
  388. return (1);
  389. return (0);
  390. }
  391. /*
  392. * Query from the BPF framework as to whether or not the buffer current in
  393. * the store position can actually be written to. This may return false if
  394. * the store buffer is assigned to userspace before the hold buffer is
  395. * acknowledged.
  396. */
  397. int
  398. bpf_zerocopy_canwritebuf(struct bpf_d *d)
  399. {
  400. struct zbuf *zb;
  401. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  402. ("bpf_zerocopy_canwritebuf: not in zbuf mode"));
  403. zb = (struct zbuf *)d->bd_sbuf;
  404. KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL"));
  405. if (zb->zb_flags & ZBUF_FLAG_ASSIGNED)
  406. return (0);
  407. return (1);
  408. }
  409. /*
  410. * Free zero copy buffers at request of descriptor.
  411. */
  412. void
  413. bpf_zerocopy_free(struct bpf_d *d)
  414. {
  415. struct zbuf *zb;
  416. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  417. ("bpf_zerocopy_free: not in zbuf mode"));
  418. zb = (struct zbuf *)d->bd_sbuf;
  419. if (zb != NULL)
  420. zbuf_free(zb);
  421. zb = (struct zbuf *)d->bd_hbuf;
  422. if (zb != NULL)
  423. zbuf_free(zb);
  424. zb = (struct zbuf *)d->bd_fbuf;
  425. if (zb != NULL)
  426. zbuf_free(zb);
  427. }
  428. /*
  429. * Ioctl to return the maximum buffer size.
  430. */
  431. int
  432. bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
  433. {
  434. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  435. ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode"));
  436. *i = BPF_MAX_PAGES * PAGE_SIZE;
  437. return (0);
  438. }
  439. /*
  440. * Ioctl to force rotation of the two buffers, if there's any data available.
  441. * This can be used by user space to implement timeouts when waiting for a
  442. * buffer to fill.
  443. */
  444. int
  445. bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d,
  446. struct bpf_zbuf *bz)
  447. {
  448. struct zbuf *bzh;
  449. bzero(bz, sizeof(*bz));
  450. BPFD_LOCK(d);
  451. if (d->bd_hbuf == NULL && d->bd_slen != 0) {
  452. ROTATE_BUFFERS(d);
  453. bzh = (struct zbuf *)d->bd_hbuf;
  454. bz->bz_bufa = (void *)bzh->zb_uaddr;
  455. bz->bz_buflen = d->bd_hlen;
  456. }
  457. BPFD_UNLOCK(d);
  458. return (0);
  459. }
  460. /*
  461. * Ioctl to configure zero-copy buffers -- may be done only once.
  462. */
  463. int
  464. bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d,
  465. struct bpf_zbuf *bz)
  466. {
  467. struct zbuf *zba, *zbb;
  468. int error;
  469. KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
  470. ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode"));
  471. /*
  472. * Must set both buffers. Cannot clear them.
  473. */
  474. if (bz->bz_bufa == NULL || bz->bz_bufb == NULL)
  475. return (EINVAL);
  476. /*
  477. * Buffers must have a size greater than 0. Alignment and other size
  478. * validity checking is done in zbuf_setup().
  479. */
  480. if (bz->bz_buflen == 0)
  481. return (EINVAL);
  482. /*
  483. * Allocate new buffers.
  484. */
  485. error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen,
  486. &zba);
  487. if (error)
  488. return (error);
  489. error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen,
  490. &zbb);
  491. if (error) {
  492. zbuf_free(zba);
  493. return (error);
  494. }
  495. /*
  496. * We only allow buffers to be installed once, so atomically check
  497. * that no buffers are currently installed and install new buffers.
  498. */
  499. BPFD_LOCK(d);
  500. if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL ||
  501. d->bd_bif != NULL) {
  502. BPFD_UNLOCK(d);
  503. zbuf_free(zba);
  504. zbuf_free(zbb);
  505. return (EINVAL);
  506. }
  507. /*
  508. * Point BPF descriptor at buffers; initialize sbuf as zba so that
  509. * it is always filled first in the sequence, per bpf(4).
  510. */
  511. d->bd_fbuf = (caddr_t)zbb;
  512. d->bd_sbuf = (caddr_t)zba;
  513. d->bd_slen = 0;
  514. d->bd_hlen = 0;
  515. /*
  516. * We expose only the space left in the buffer after the size of the
  517. * shared management region.
  518. */
  519. d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header);
  520. BPFD_UNLOCK(d);
  521. return (0);
  522. }