if_tun.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. /* $OpenBSD: if_tun.c,v 1.151 2015/07/20 22:54:29 mpi Exp $ */
  2. /* $NetBSD: if_tun.c,v 1.24 1996/05/07 02:40:48 thorpej Exp $ */
  3. /*
  4. * Copyright (c) 1988, Julian Onions <Julian.Onions@nexor.co.uk>
  5. * Nottingham University 1987.
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27. */
  28. /*
  29. * This driver takes packets off the IP i/f and hands them up to a
  30. * user process to have its wicked way with. This driver has its
  31. * roots in a similar driver written by Phil Cockcroft (formerly) at
  32. * UCL. This driver is based much more on read/write/select mode of
  33. * operation though.
  34. */
  35. /* #define TUN_DEBUG 9 */
  36. #include <sys/param.h>
  37. #include <sys/kernel.h>
  38. #include <sys/proc.h>
  39. #include <sys/systm.h>
  40. #include <sys/mbuf.h>
  41. #include <sys/protosw.h>
  42. #include <sys/socket.h>
  43. #include <sys/ioctl.h>
  44. #include <sys/errno.h>
  45. #include <sys/syslog.h>
  46. #include <sys/selinfo.h>
  47. #include <sys/file.h>
  48. #include <sys/time.h>
  49. #include <sys/device.h>
  50. #include <sys/vnode.h>
  51. #include <sys/signalvar.h>
  52. #include <sys/poll.h>
  53. #include <sys/conf.h>
  54. #include <net/if.h>
  55. #include <net/if_types.h>
  56. #include <net/netisr.h>
  57. #include <netinet/in.h>
  58. #include <netinet/if_ether.h>
  59. #ifdef PIPEX
  60. #include <net/pipex.h>
  61. #endif
  62. #include "bpfilter.h"
  63. #if NBPFILTER > 0
  64. #include <net/bpf.h>
  65. #endif
  66. #include <net/if_tun.h>
  67. struct tun_softc {
  68. struct arpcom arpcom; /* ethernet common data */
  69. struct selinfo tun_rsel; /* read select */
  70. struct selinfo tun_wsel; /* write select (not used) */
  71. LIST_ENTRY(tun_softc) tun_list; /* all tunnel interfaces */
  72. int tun_unit;
  73. uid_t tun_siguid; /* uid for process that set tun_pgid */
  74. uid_t tun_sigeuid; /* euid for process that set tun_pgid */
  75. pid_t tun_pgid; /* the process group - if any */
  76. u_short tun_flags; /* misc flags */
  77. #define tun_if arpcom.ac_if
  78. #ifdef PIPEX
  79. struct pipex_iface_context pipex_iface; /* pipex context */
  80. #endif
  81. };
  82. #ifdef TUN_DEBUG
  83. int tundebug = TUN_DEBUG;
  84. #define TUNDEBUG(a) (tundebug? printf a : 0)
  85. #else
  86. #define TUNDEBUG(a) /* (tundebug? printf a : 0) */
  87. #endif
  88. /* Only these IFF flags are changeable by TUNSIFINFO */
  89. #define TUN_IFF_FLAGS (IFF_UP|IFF_POINTOPOINT|IFF_MULTICAST|IFF_BROADCAST)
  90. void tunattach(int);
  91. int tunopen(dev_t, int, int, struct proc *);
  92. int tunclose(dev_t, int, int, struct proc *);
  93. int tun_ioctl(struct ifnet *, u_long, caddr_t);
  94. int tun_output(struct ifnet *, struct mbuf *, struct sockaddr *,
  95. struct rtentry *);
  96. int tunioctl(dev_t, u_long, caddr_t, int, struct proc *);
  97. int tunread(dev_t, struct uio *, int);
  98. int tunwrite(dev_t, struct uio *, int);
  99. int tunpoll(dev_t, int, struct proc *);
  100. int tunkqfilter(dev_t, struct knote *);
  101. int tun_clone_create(struct if_clone *, int);
  102. int tun_create(struct if_clone *, int, int);
  103. int tun_clone_destroy(struct ifnet *);
  104. struct tun_softc *tun_lookup(int);
  105. void tun_wakeup(struct tun_softc *);
  106. int tun_switch(struct tun_softc *, int);
  107. void tun_ifattach(struct ifnet *, int);
  108. void tun_ifdetach(struct ifnet *);
  109. int tuninit(struct tun_softc *);
  110. int filt_tunread(struct knote *, long);
  111. int filt_tunwrite(struct knote *, long);
  112. void filt_tunrdetach(struct knote *);
  113. void filt_tunwdetach(struct knote *);
  114. void tunstart(struct ifnet *);
  115. void tun_link_state(struct tun_softc *);
  116. struct filterops tunread_filtops =
  117. { 1, NULL, filt_tunrdetach, filt_tunread};
  118. struct filterops tunwrite_filtops =
  119. { 1, NULL, filt_tunwdetach, filt_tunwrite};
  120. LIST_HEAD(, tun_softc) tun_softc_list;
  121. struct if_clone tun_cloner =
  122. IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
  123. void
  124. tunattach(int n)
  125. {
  126. LIST_INIT(&tun_softc_list);
  127. if_clone_attach(&tun_cloner);
  128. #ifdef PIPEX
  129. pipex_init();
  130. #endif
  131. }
  132. int
  133. tun_clone_create(struct if_clone *ifc, int unit)
  134. {
  135. return (tun_create(ifc, unit, 0));
  136. }
  137. void
  138. tun_ifattach(struct ifnet *ifp, int flags)
  139. {
  140. struct tun_softc *tp = ifp->if_softc;
  141. int s;
  142. ifp->if_ioctl = tun_ioctl;
  143. ifp->if_output = tun_output;
  144. ifp->if_start = tunstart;
  145. ifp->if_hardmtu = TUNMRU;
  146. ifp->if_link_state = LINK_STATE_DOWN;
  147. IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
  148. IFQ_SET_READY(&ifp->if_snd);
  149. if ((flags & TUN_LAYER2) == 0) {
  150. tp->tun_flags &= ~TUN_LAYER2;
  151. ifp->if_mtu = ETHERMTU;
  152. ifp->if_flags = IFF_POINTOPOINT;
  153. ifp->if_type = IFT_TUNNEL;
  154. ifp->if_hdrlen = sizeof(u_int32_t);
  155. if_attach(ifp);
  156. if_alloc_sadl(ifp);
  157. #if NBPFILTER > 0
  158. bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(u_int32_t));
  159. #endif
  160. } else {
  161. tp->tun_flags |= TUN_LAYER2;
  162. ifp->if_flags =
  163. (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST|IFF_LINK0);
  164. ifp->if_capabilities = IFCAP_VLAN_MTU;
  165. if_attach(ifp);
  166. ether_ifattach(ifp);
  167. }
  168. s = splnet();
  169. LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
  170. splx(s);
  171. #ifdef PIPEX
  172. pipex_iface_init(&tp->pipex_iface, ifp);
  173. #endif
  174. }
  175. void
  176. tun_ifdetach(struct ifnet *ifp)
  177. {
  178. struct tun_softc *tp = ifp->if_softc;
  179. int s;
  180. #ifdef PIPEX
  181. pipex_iface_fini(&tp->pipex_iface);
  182. #endif
  183. tun_wakeup(tp);
  184. s = splhigh();
  185. klist_invalidate(&tp->tun_rsel.si_note);
  186. klist_invalidate(&tp->tun_wsel.si_note);
  187. splx(s);
  188. s = splnet();
  189. LIST_REMOVE(tp, tun_list);
  190. splx(s);
  191. if (tp->tun_flags & TUN_LAYER2)
  192. ether_ifdetach(ifp);
  193. if_detach(ifp);
  194. }
  195. int
  196. tun_create(struct if_clone *ifc, int unit, int flags)
  197. {
  198. struct tun_softc *tp;
  199. struct ifnet *ifp;
  200. tp = malloc(sizeof(*tp), M_DEVBUF, M_NOWAIT|M_ZERO);
  201. if (tp == NULL)
  202. return (ENOMEM);
  203. tp->tun_unit = unit;
  204. tp->tun_flags = TUN_INITED|TUN_STAYUP;
  205. ifp = &tp->tun_if;
  206. snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", ifc->ifc_name,
  207. unit);
  208. ether_fakeaddr(ifp);
  209. ifp->if_softc = tp;
  210. tun_ifattach(ifp, flags);
  211. return (0);
  212. }
  213. int
  214. tun_clone_destroy(struct ifnet *ifp)
  215. {
  216. struct tun_softc *tp = ifp->if_softc;
  217. tun_ifdetach(ifp);
  218. free(tp, M_DEVBUF, 0);
  219. return (0);
  220. }
  221. struct tun_softc *
  222. tun_lookup(int unit)
  223. {
  224. struct tun_softc *tp;
  225. LIST_FOREACH(tp, &tun_softc_list, tun_list)
  226. if (tp->tun_unit == unit)
  227. return (tp);
  228. return (NULL);
  229. }
  230. int
  231. tun_switch(struct tun_softc *tp, int flags)
  232. {
  233. struct ifnet *ifp = &tp->tun_if;
  234. int unit, open, s;
  235. struct ifg_list *ifgl;
  236. u_int ifgr_len;
  237. char *ifgrpnames, *p;
  238. if ((tp->tun_flags & TUN_LAYER2) == (flags & TUN_LAYER2))
  239. return (0);
  240. /* tp will be removed so store unit number */
  241. unit = tp->tun_unit;
  242. open = tp->tun_flags & (TUN_OPEN|TUN_NBIO|TUN_ASYNC);
  243. TUNDEBUG(("%s: switching to layer %d\n", ifp->if_xname,
  244. flags & TUN_LAYER2 ? 2 : 3));
  245. /* remember joined groups */
  246. ifgr_len = 0;
  247. ifgrpnames = NULL;
  248. TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
  249. ifgr_len += IFNAMSIZ;
  250. if (ifgr_len)
  251. ifgrpnames = malloc(ifgr_len + 1, M_TEMP, M_NOWAIT|M_ZERO);
  252. if (ifgrpnames) {
  253. p = ifgrpnames;
  254. TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
  255. strlcpy(p, ifgl->ifgl_group->ifg_group, IFNAMSIZ);
  256. p += IFNAMSIZ;
  257. }
  258. }
  259. if (ifp->if_flags & IFF_UP)
  260. if_down(ifp);
  261. tun_ifdetach(ifp);
  262. tun_ifattach(ifp, flags);
  263. /* rejoin groups */
  264. for (p = ifgrpnames; p && *p; p += IFNAMSIZ)
  265. if_addgroup(ifp, p);
  266. if (open) {
  267. /* already opened before ifconfig tunX link0 */
  268. s = splnet();
  269. tp->tun_flags |= open;
  270. ifp->if_flags |= IFF_RUNNING;
  271. tun_link_state(tp);
  272. splx(s);
  273. TUNDEBUG(("%s: already open\n", tp->tun_if.if_xname));
  274. }
  275. if (ifgrpnames)
  276. free(ifgrpnames, M_TEMP, 0);
  277. return (0);
  278. }
  279. /*
  280. * tunnel open - must be superuser & the device must be
  281. * configured in
  282. */
  283. int
  284. tunopen(dev_t dev, int flag, int mode, struct proc *p)
  285. {
  286. struct tun_softc *tp;
  287. struct ifnet *ifp;
  288. int error, s;
  289. if ((tp = tun_lookup(minor(dev))) == NULL) { /* create on demand */
  290. char xname[IFNAMSIZ];
  291. snprintf(xname, sizeof(xname), "%s%d", "tun", minor(dev));
  292. if ((error = if_clone_create(xname)) != 0)
  293. return (error);
  294. if ((tp = tun_lookup(minor(dev))) == NULL)
  295. return (ENXIO);
  296. tp->tun_flags &= ~TUN_STAYUP;
  297. }
  298. if (tp->tun_flags & TUN_OPEN)
  299. return (EBUSY);
  300. ifp = &tp->tun_if;
  301. tp->tun_flags |= TUN_OPEN;
  302. /* automatically mark the interface running on open */
  303. s = splnet();
  304. ifp->if_flags |= IFF_RUNNING;
  305. tun_link_state(tp);
  306. splx(s);
  307. TUNDEBUG(("%s: open\n", ifp->if_xname));
  308. return (0);
  309. }
  310. /*
  311. * tunclose - close the device; if closing the real device, flush pending
  312. * output and unless STAYUP bring down and destroy the interface.
  313. */
  314. int
  315. tunclose(dev_t dev, int flag, int mode, struct proc *p)
  316. {
  317. int s;
  318. struct tun_softc *tp;
  319. struct ifnet *ifp;
  320. if ((tp = tun_lookup(minor(dev))) == NULL)
  321. return (ENXIO);
  322. ifp = &tp->tun_if;
  323. tp->tun_flags &= ~(TUN_OPEN|TUN_NBIO|TUN_ASYNC);
  324. /*
  325. * junk all pending output
  326. */
  327. s = splnet();
  328. ifp->if_flags &= ~IFF_RUNNING;
  329. tun_link_state(tp);
  330. IFQ_PURGE(&ifp->if_snd);
  331. splx(s);
  332. TUNDEBUG(("%s: closed\n", ifp->if_xname));
  333. if (!(tp->tun_flags & TUN_STAYUP))
  334. return (if_clone_destroy(ifp->if_xname));
  335. else {
  336. tp->tun_pgid = 0;
  337. selwakeup(&tp->tun_rsel);
  338. }
  339. return (0);
  340. }
  341. int
  342. tuninit(struct tun_softc *tp)
  343. {
  344. struct ifnet *ifp = &tp->tun_if;
  345. struct ifaddr *ifa;
  346. TUNDEBUG(("%s: tuninit\n", ifp->if_xname));
  347. ifp->if_flags |= IFF_UP | IFF_RUNNING;
  348. ifp->if_flags &= ~IFF_OACTIVE; /* we are never active */
  349. tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR|TUN_BRDADDR);
  350. TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
  351. if (ifa->ifa_addr->sa_family == AF_INET) {
  352. struct sockaddr_in *sin;
  353. sin = satosin(ifa->ifa_addr);
  354. if (sin && sin->sin_addr.s_addr)
  355. tp->tun_flags |= TUN_IASET;
  356. if (ifp->if_flags & IFF_POINTOPOINT) {
  357. sin = satosin(ifa->ifa_dstaddr);
  358. if (sin && sin->sin_addr.s_addr)
  359. tp->tun_flags |= TUN_DSTADDR;
  360. } else
  361. tp->tun_flags &= ~TUN_DSTADDR;
  362. if (ifp->if_flags & IFF_BROADCAST) {
  363. sin = satosin(ifa->ifa_broadaddr);
  364. if (sin && sin->sin_addr.s_addr)
  365. tp->tun_flags |= TUN_BRDADDR;
  366. } else
  367. tp->tun_flags &= ~TUN_BRDADDR;
  368. }
  369. #ifdef INET6
  370. if (ifa->ifa_addr->sa_family == AF_INET6) {
  371. struct sockaddr_in6 *sin;
  372. sin = (struct sockaddr_in6 *)ifa->ifa_addr;
  373. if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
  374. tp->tun_flags |= TUN_IASET;
  375. if (ifp->if_flags & IFF_POINTOPOINT) {
  376. sin = (struct sockaddr_in6 *)ifa->ifa_dstaddr;
  377. if (sin &&
  378. !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
  379. tp->tun_flags |= TUN_DSTADDR;
  380. } else
  381. tp->tun_flags &= ~TUN_DSTADDR;
  382. }
  383. #endif /* INET6 */
  384. }
  385. return (0);
  386. }
  387. /*
  388. * Process an ioctl request.
  389. */
  390. int
  391. tun_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  392. {
  393. struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
  394. struct ifreq *ifr = (struct ifreq *)data;
  395. struct ifaddr *ifa = (struct ifaddr *)data;
  396. int error = 0, s;
  397. s = splnet();
  398. switch (cmd) {
  399. case SIOCSIFADDR:
  400. tuninit(tp);
  401. TUNDEBUG(("%s: address set\n", ifp->if_xname));
  402. if (tp->tun_flags & TUN_LAYER2) {
  403. switch (ifa->ifa_addr->sa_family) {
  404. case AF_INET:
  405. arp_ifinit(&tp->arpcom, ifa);
  406. break;
  407. default:
  408. break;
  409. }
  410. } else {
  411. ifa->ifa_rtrequest = p2p_rtrequest;
  412. }
  413. break;
  414. case SIOCSIFDSTADDR:
  415. tuninit(tp);
  416. TUNDEBUG(("%s: destination address set\n", ifp->if_xname));
  417. break;
  418. case SIOCSIFMTU:
  419. if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > TUNMRU)
  420. error = EINVAL;
  421. else
  422. ifp->if_mtu = ifr->ifr_mtu;
  423. break;
  424. case SIOCADDMULTI:
  425. case SIOCDELMULTI:
  426. break;
  427. case SIOCSIFFLAGS:
  428. error = tun_switch(tp,
  429. ifp->if_flags & IFF_LINK0 ? TUN_LAYER2 : 0);
  430. break;
  431. default:
  432. if (tp->tun_flags & TUN_LAYER2)
  433. error = ether_ioctl(ifp, &tp->arpcom, cmd, data);
  434. else
  435. error = ENOTTY;
  436. }
  437. splx(s);
  438. return (error);
  439. }
  440. /*
  441. * tun_output - queue packets from higher level ready to put out.
  442. */
  443. int
  444. tun_output(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
  445. struct rtentry *rt)
  446. {
  447. struct tun_softc *tp = ifp->if_softc;
  448. int s, error;
  449. u_int32_t *af;
  450. if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
  451. m_freem(m0);
  452. return (EHOSTDOWN);
  453. }
  454. TUNDEBUG(("%s: tun_output\n", ifp->if_xname));
  455. if ((tp->tun_flags & TUN_READY) != TUN_READY) {
  456. TUNDEBUG(("%s: not ready %#x\n", ifp->if_xname,
  457. tp->tun_flags));
  458. m_freem(m0);
  459. return (EHOSTDOWN);
  460. }
  461. if (tp->tun_flags & TUN_LAYER2)
  462. return (ether_output(ifp, m0, dst, rt));
  463. M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
  464. if (m0 == NULL)
  465. return (ENOBUFS);
  466. af = mtod(m0, u_int32_t *);
  467. *af = htonl(dst->sa_family);
  468. s = splnet();
  469. #if NBPFILTER > 0
  470. if (ifp->if_bpf)
  471. bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
  472. #endif
  473. #ifdef PIPEX
  474. if ((m0 = pipex_output(m0, dst->sa_family, sizeof(u_int32_t),
  475. &tp->pipex_iface)) == NULL) {
  476. splx(s);
  477. return (0);
  478. }
  479. #endif
  480. error = if_enqueue(ifp, m0);
  481. splx(s);
  482. if (error) {
  483. ifp->if_collisions++;
  484. return (error);
  485. }
  486. ifp->if_opackets++;
  487. tun_wakeup(tp);
  488. return (0);
  489. }
  490. void
  491. tun_wakeup(struct tun_softc *tp)
  492. {
  493. if (tp->tun_flags & TUN_RWAIT) {
  494. tp->tun_flags &= ~TUN_RWAIT;
  495. wakeup((caddr_t)tp);
  496. }
  497. if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
  498. csignal(tp->tun_pgid, SIGIO,
  499. tp->tun_siguid, tp->tun_sigeuid);
  500. selwakeup(&tp->tun_rsel);
  501. }
  502. /*
  503. * the cdevsw interface is now pretty minimal.
  504. */
  505. int
  506. tunioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
  507. {
  508. int s;
  509. struct tun_softc *tp;
  510. struct tuninfo *tunp;
  511. struct mbuf *m;
  512. if ((tp = tun_lookup(minor(dev))) == NULL)
  513. return (ENXIO);
  514. s = splnet();
  515. switch (cmd) {
  516. case TUNSIFINFO:
  517. tunp = (struct tuninfo *)data;
  518. if (tunp->mtu < ETHERMIN || tunp->mtu > TUNMRU) {
  519. splx(s);
  520. return (EINVAL);
  521. }
  522. tp->tun_if.if_mtu = tunp->mtu;
  523. tp->tun_if.if_type = tunp->type;
  524. tp->tun_if.if_flags =
  525. (tunp->flags & TUN_IFF_FLAGS) |
  526. (tp->tun_if.if_flags & ~TUN_IFF_FLAGS);
  527. tp->tun_if.if_baudrate = tunp->baudrate;
  528. break;
  529. case TUNGIFINFO:
  530. tunp = (struct tuninfo *)data;
  531. tunp->mtu = tp->tun_if.if_mtu;
  532. tunp->type = tp->tun_if.if_type;
  533. tunp->flags = tp->tun_if.if_flags;
  534. tunp->baudrate = tp->tun_if.if_baudrate;
  535. break;
  536. #ifdef TUN_DEBUG
  537. case TUNSDEBUG:
  538. tundebug = *(int *)data;
  539. break;
  540. case TUNGDEBUG:
  541. *(int *)data = tundebug;
  542. break;
  543. #endif
  544. case TUNSIFMODE:
  545. switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
  546. case IFF_POINTOPOINT:
  547. case IFF_BROADCAST:
  548. tp->tun_if.if_flags &= ~TUN_IFF_FLAGS;
  549. tp->tun_if.if_flags |= *(int *)data & TUN_IFF_FLAGS;
  550. break;
  551. default:
  552. splx(s);
  553. return (EINVAL);
  554. }
  555. break;
  556. case FIONBIO:
  557. if (*(int *)data)
  558. tp->tun_flags |= TUN_NBIO;
  559. else
  560. tp->tun_flags &= ~TUN_NBIO;
  561. break;
  562. case FIOASYNC:
  563. if (*(int *)data)
  564. tp->tun_flags |= TUN_ASYNC;
  565. else
  566. tp->tun_flags &= ~TUN_ASYNC;
  567. break;
  568. case FIONREAD:
  569. IFQ_POLL(&tp->tun_if.if_snd, m);
  570. if (m != NULL)
  571. *(int *)data = m->m_pkthdr.len;
  572. else
  573. *(int *)data = 0;
  574. break;
  575. case TIOCSPGRP:
  576. tp->tun_pgid = *(int *)data;
  577. tp->tun_siguid = p->p_ucred->cr_ruid;
  578. tp->tun_sigeuid = p->p_ucred->cr_uid;
  579. break;
  580. case TIOCGPGRP:
  581. *(int *)data = tp->tun_pgid;
  582. break;
  583. case SIOCGIFADDR:
  584. if (!(tp->tun_flags & TUN_LAYER2)) {
  585. splx(s);
  586. return (EINVAL);
  587. }
  588. bcopy(tp->arpcom.ac_enaddr, data,
  589. sizeof(tp->arpcom.ac_enaddr));
  590. break;
  591. case SIOCSIFADDR:
  592. if (!(tp->tun_flags & TUN_LAYER2)) {
  593. splx(s);
  594. return (EINVAL);
  595. }
  596. bcopy(data, tp->arpcom.ac_enaddr,
  597. sizeof(tp->arpcom.ac_enaddr));
  598. break;
  599. default:
  600. #ifdef PIPEX
  601. {
  602. int ret;
  603. ret = pipex_ioctl(&tp->pipex_iface, cmd, data);
  604. splx(s);
  605. return (ret);
  606. }
  607. #else
  608. splx(s);
  609. return (ENOTTY);
  610. #endif
  611. }
  612. splx(s);
  613. return (0);
  614. }
  615. /*
  616. * The cdevsw read interface - reads a packet at a time, or at
  617. * least as much of a packet as can be read.
  618. */
  619. int
  620. tunread(dev_t dev, struct uio *uio, int ioflag)
  621. {
  622. struct tun_softc *tp;
  623. struct ifnet *ifp;
  624. struct mbuf *m, *m0;
  625. int error = 0, len, s;
  626. unsigned int ifindex;
  627. if ((tp = tun_lookup(minor(dev))) == NULL)
  628. return (ENXIO);
  629. ifp = &tp->tun_if;
  630. ifindex = ifp->if_index;
  631. TUNDEBUG(("%s: read\n", ifp->if_xname));
  632. if ((tp->tun_flags & TUN_READY) != TUN_READY) {
  633. TUNDEBUG(("%s: not ready %#x\n", ifp->if_xname, tp->tun_flags));
  634. return (EHOSTDOWN);
  635. }
  636. tp->tun_flags &= ~TUN_RWAIT;
  637. s = splnet();
  638. do {
  639. while ((tp->tun_flags & TUN_READY) != TUN_READY) {
  640. if ((error = tsleep((caddr_t)tp,
  641. (PZERO + 1)|PCATCH, "tunread", 0)) != 0) {
  642. splx(s);
  643. return (error);
  644. }
  645. if ((ifp = if_get(ifindex)) == NULL) {
  646. splx(s);
  647. return (ENXIO);
  648. }
  649. }
  650. IFQ_DEQUEUE(&ifp->if_snd, m0);
  651. if (m0 == NULL) {
  652. if (tp->tun_flags & TUN_NBIO && ioflag & IO_NDELAY) {
  653. splx(s);
  654. return (EWOULDBLOCK);
  655. }
  656. tp->tun_flags |= TUN_RWAIT;
  657. if ((error = tsleep((caddr_t)tp,
  658. (PZERO + 1)|PCATCH, "tunread", 0)) != 0) {
  659. splx(s);
  660. return (error);
  661. }
  662. if ((ifp = if_get(ifindex)) == NULL) {
  663. splx(s);
  664. return (ENXIO);
  665. }
  666. }
  667. } while (m0 == NULL);
  668. splx(s);
  669. while (m0 != NULL && uio->uio_resid > 0 && error == 0) {
  670. len = min(uio->uio_resid, m0->m_len);
  671. if (len != 0)
  672. error = uiomovei(mtod(m0, caddr_t), len, uio);
  673. m = m_free(m0);
  674. m0 = m;
  675. }
  676. if (m0 != NULL) {
  677. TUNDEBUG(("Dropping mbuf\n"));
  678. m_freem(m0);
  679. }
  680. if (error)
  681. ifp->if_oerrors++;
  682. return (error);
  683. }
  684. /*
  685. * the cdevsw write interface - an atomic write is a packet - or else!
  686. */
  687. int
  688. tunwrite(dev_t dev, struct uio *uio, int ioflag)
  689. {
  690. struct tun_softc *tp;
  691. struct ifnet *ifp;
  692. struct niqueue *ifq;
  693. u_int32_t *th;
  694. struct mbuf *top, **mp, *m;
  695. int error=0, s, tlen, mlen;
  696. if ((tp = tun_lookup(minor(dev))) == NULL)
  697. return (ENXIO);
  698. ifp = &tp->tun_if;
  699. TUNDEBUG(("%s: tunwrite\n", ifp->if_xname));
  700. if (uio->uio_resid == 0 || uio->uio_resid > ifp->if_mtu +
  701. (tp->tun_flags & TUN_LAYER2 ? ETHER_HDR_LEN : sizeof(*th))) {
  702. TUNDEBUG(("%s: len=%d!\n", ifp->if_xname, uio->uio_resid));
  703. return (EMSGSIZE);
  704. }
  705. tlen = uio->uio_resid;
  706. /* get a header mbuf */
  707. MGETHDR(m, M_DONTWAIT, MT_DATA);
  708. if (m == NULL)
  709. return (ENOBUFS);
  710. mlen = MHLEN;
  711. if (uio->uio_resid >= MINCLSIZE) {
  712. MCLGET(m, M_DONTWAIT);
  713. if (!(m->m_flags & M_EXT)) {
  714. m_free(m);
  715. return (ENOBUFS);
  716. }
  717. mlen = MCLBYTES;
  718. }
  719. top = NULL;
  720. mp = &top;
  721. if (tp->tun_flags & TUN_LAYER2) {
  722. /*
  723. * Pad so that IP header is correctly aligned
  724. * this is necessary for all strict aligned architectures.
  725. */
  726. mlen -= ETHER_ALIGN;
  727. m->m_data += ETHER_ALIGN;
  728. }
  729. while (error == 0 && uio->uio_resid > 0) {
  730. m->m_len = min(mlen, uio->uio_resid);
  731. error = uiomovei(mtod (m, caddr_t), m->m_len, uio);
  732. *mp = m;
  733. mp = &m->m_next;
  734. if (error == 0 && uio->uio_resid > 0) {
  735. MGET(m, M_DONTWAIT, MT_DATA);
  736. if (m == NULL) {
  737. error = ENOBUFS;
  738. break;
  739. }
  740. mlen = MLEN;
  741. if (uio->uio_resid >= MINCLSIZE) {
  742. MCLGET(m, M_DONTWAIT);
  743. if (!(m->m_flags & M_EXT)) {
  744. error = ENOBUFS;
  745. m_free(m);
  746. break;
  747. }
  748. mlen = MCLBYTES;
  749. }
  750. }
  751. }
  752. if (error) {
  753. m_freem(top);
  754. ifp->if_ierrors++;
  755. return (error);
  756. }
  757. top->m_pkthdr.len = tlen;
  758. if (tp->tun_flags & TUN_LAYER2) {
  759. struct mbuf_list ml = MBUF_LIST_INITIALIZER();
  760. ml_enqueue(&ml, top);
  761. if_input(ifp, &ml);
  762. return (0);
  763. }
  764. #if NBPFILTER > 0
  765. if (ifp->if_bpf) {
  766. s = splnet();
  767. bpf_mtap(ifp->if_bpf, top, BPF_DIRECTION_IN);
  768. splx(s);
  769. }
  770. #endif
  771. th = mtod(top, u_int32_t *);
  772. /* strip the tunnel header */
  773. top->m_data += sizeof(*th);
  774. top->m_len -= sizeof(*th);
  775. top->m_pkthdr.len -= sizeof(*th);
  776. top->m_pkthdr.ph_rtableid = ifp->if_rdomain;
  777. top->m_pkthdr.ph_ifidx = ifp->if_index;
  778. switch (ntohl(*th)) {
  779. case AF_INET:
  780. ifq = &ipintrq;
  781. break;
  782. #ifdef INET6
  783. case AF_INET6:
  784. ifq = &ip6intrq;
  785. break;
  786. #endif
  787. default:
  788. m_freem(top);
  789. return (EAFNOSUPPORT);
  790. }
  791. if (niq_enqueue(ifq, top) != 0) {
  792. ifp->if_collisions++;
  793. return (ENOBUFS);
  794. }
  795. ifp->if_ipackets++;
  796. ifp->if_ibytes += top->m_pkthdr.len;
  797. return (error);
  798. }
  799. /*
  800. * tunpoll - the poll interface, this is only useful on reads
  801. * really. The write detect always returns true, write never blocks
  802. * anyway, it either accepts the packet or drops it.
  803. */
  804. int
  805. tunpoll(dev_t dev, int events, struct proc *p)
  806. {
  807. int revents, s;
  808. struct tun_softc *tp;
  809. struct ifnet *ifp;
  810. struct mbuf *m;
  811. if ((tp = tun_lookup(minor(dev))) == NULL)
  812. return (POLLERR);
  813. ifp = &tp->tun_if;
  814. revents = 0;
  815. s = splnet();
  816. TUNDEBUG(("%s: tunpoll\n", ifp->if_xname));
  817. if (events & (POLLIN | POLLRDNORM)) {
  818. IFQ_POLL(&ifp->if_snd, m);
  819. if (m != NULL) {
  820. TUNDEBUG(("%s: tunselect q=%d\n", ifp->if_xname,
  821. IFQ_LEN(ifp->if_snd)));
  822. revents |= events & (POLLIN | POLLRDNORM);
  823. } else {
  824. TUNDEBUG(("%s: tunpoll waiting\n", ifp->if_xname));
  825. selrecord(p, &tp->tun_rsel);
  826. }
  827. }
  828. if (events & (POLLOUT | POLLWRNORM))
  829. revents |= events & (POLLOUT | POLLWRNORM);
  830. splx(s);
  831. return (revents);
  832. }
  833. /*
  834. * kqueue(2) support.
  835. *
  836. * The tun driver uses an array of tun_softc's based on the minor number
  837. * of the device. kn->kn_hook gets set to the specific tun_softc.
  838. *
  839. * filt_tunread() sets kn->kn_data to the iface qsize
  840. * filt_tunwrite() sets kn->kn_data to the MTU size
  841. */
  842. int
  843. tunkqfilter(dev_t dev, struct knote *kn)
  844. {
  845. int s;
  846. struct klist *klist;
  847. struct tun_softc *tp;
  848. struct ifnet *ifp;
  849. if ((tp = tun_lookup(minor(dev))) == NULL)
  850. return (ENXIO);
  851. ifp = &tp->tun_if;
  852. s = splnet();
  853. TUNDEBUG(("%s: tunkqfilter\n", ifp->if_xname));
  854. splx(s);
  855. switch (kn->kn_filter) {
  856. case EVFILT_READ:
  857. klist = &tp->tun_rsel.si_note;
  858. kn->kn_fop = &tunread_filtops;
  859. break;
  860. case EVFILT_WRITE:
  861. klist = &tp->tun_wsel.si_note;
  862. kn->kn_fop = &tunwrite_filtops;
  863. break;
  864. default:
  865. return (EINVAL);
  866. }
  867. kn->kn_hook = (caddr_t)tp;
  868. s = splhigh();
  869. SLIST_INSERT_HEAD(klist, kn, kn_selnext);
  870. splx(s);
  871. return (0);
  872. }
  873. void
  874. filt_tunrdetach(struct knote *kn)
  875. {
  876. int s;
  877. struct tun_softc *tp;
  878. tp = (struct tun_softc *)kn->kn_hook;
  879. s = splhigh();
  880. if (!(kn->kn_status & KN_DETACHED))
  881. SLIST_REMOVE(&tp->tun_rsel.si_note, kn, knote, kn_selnext);
  882. splx(s);
  883. }
  884. int
  885. filt_tunread(struct knote *kn, long hint)
  886. {
  887. int s;
  888. struct tun_softc *tp;
  889. struct ifnet *ifp;
  890. struct mbuf *m;
  891. if (kn->kn_status & KN_DETACHED) {
  892. kn->kn_data = 0;
  893. return (1);
  894. }
  895. tp = (struct tun_softc *)kn->kn_hook;
  896. ifp = &tp->tun_if;
  897. s = splnet();
  898. IFQ_POLL(&ifp->if_snd, m);
  899. if (m != NULL) {
  900. splx(s);
  901. kn->kn_data = IFQ_LEN(&ifp->if_snd);
  902. TUNDEBUG(("%s: tunkqread q=%d\n", ifp->if_xname,
  903. IFQ_LEN(&ifp->if_snd)));
  904. return (1);
  905. }
  906. splx(s);
  907. TUNDEBUG(("%s: tunkqread waiting\n", ifp->if_xname));
  908. return (0);
  909. }
  910. void
  911. filt_tunwdetach(struct knote *kn)
  912. {
  913. int s;
  914. struct tun_softc *tp;
  915. tp = (struct tun_softc *)kn->kn_hook;
  916. s = splhigh();
  917. if (!(kn->kn_status & KN_DETACHED))
  918. SLIST_REMOVE(&tp->tun_wsel.si_note, kn, knote, kn_selnext);
  919. splx(s);
  920. }
  921. int
  922. filt_tunwrite(struct knote *kn, long hint)
  923. {
  924. struct tun_softc *tp;
  925. struct ifnet *ifp;
  926. if (kn->kn_status & KN_DETACHED) {
  927. kn->kn_data = 0;
  928. return (1);
  929. }
  930. tp = (struct tun_softc *)kn->kn_hook;
  931. ifp = &tp->tun_if;
  932. kn->kn_data = ifp->if_mtu;
  933. return (1);
  934. }
  935. void
  936. tunstart(struct ifnet *ifp)
  937. {
  938. struct tun_softc *tp = ifp->if_softc;
  939. struct mbuf *m;
  940. splassert(IPL_NET);
  941. IFQ_POLL(&ifp->if_snd, m);
  942. if (m != NULL) {
  943. if (tp->tun_flags & TUN_LAYER2) {
  944. #if NBPFILTER > 0
  945. if (ifp->if_bpf)
  946. bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
  947. #endif
  948. ifp->if_opackets++;
  949. }
  950. tun_wakeup(tp);
  951. }
  952. }
  953. void
  954. tun_link_state(struct tun_softc *tp)
  955. {
  956. struct ifnet *ifp = &tp->tun_if;
  957. int link_state = LINK_STATE_DOWN;
  958. if (tp->tun_flags & TUN_OPEN) {
  959. if (tp->tun_flags & TUN_LAYER2)
  960. link_state = LINK_STATE_FULL_DUPLEX;
  961. else
  962. link_state = LINK_STATE_UP;
  963. }
  964. if (ifp->if_link_state != link_state) {
  965. ifp->if_link_state = link_state;
  966. if_link_state_change(ifp);
  967. }
  968. }