ip_mroute.c 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971
  1. /*-
  2. * SPDX-License-Identifier: BSD-3-Clause
  3. *
  4. * Copyright (c) 1989 Stephen Deering
  5. * Copyright (c) 1992, 1993
  6. * The Regents of the University of California. All rights reserved.
  7. *
  8. * This code is derived from software contributed to Berkeley by
  9. * Stephen Deering of Stanford University.
  10. *
  11. * Redistribution and use in source and binary forms, with or without
  12. * modification, are permitted provided that the following conditions
  13. * are met:
  14. * 1. Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. * 2. Redistributions in binary form must reproduce the above copyright
  17. * notice, this list of conditions and the following disclaimer in the
  18. * documentation and/or other materials provided with the distribution.
  19. * 3. Neither the name of the University nor the names of its contributors
  20. * may be used to endorse or promote products derived from this software
  21. * without specific prior written permission.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  24. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  27. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  28. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  29. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  30. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  31. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  32. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  33. * SUCH DAMAGE.
  34. *
  35. * @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
  36. */
  37. /*
  38. * IP multicast forwarding procedures
  39. *
  40. * Written by David Waitzman, BBN Labs, August 1988.
  41. * Modified by Steve Deering, Stanford, February 1989.
  42. * Modified by Mark J. Steiglitz, Stanford, May, 1991
  43. * Modified by Van Jacobson, LBL, January 1993
  44. * Modified by Ajit Thyagarajan, PARC, August 1993
  45. * Modified by Bill Fenner, PARC, April 1995
  46. * Modified by Ahmed Helmy, SGI, June 1996
  47. * Modified by George Edmond Eddy (Rusty), ISI, February 1998
  48. * Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000
  49. * Modified by Hitoshi Asaeda, WIDE, August 2000
  50. * Modified by Pavlin Radoslavov, ICSI, October 2002
  51. *
  52. * MROUTING Revision: 3.5
  53. * and PIM-SMv2 and PIM-DM support, advanced API support,
  54. * bandwidth metering and signaling
  55. */
  56. /*
  57. * TODO: Prefix functions with ipmf_.
  58. * TODO: Maintain a refcount on if_allmulti() in ifnet or in the protocol
  59. * domain attachment (if_afdata) so we can track consumers of that service.
  60. * TODO: Deprecate routing socket path for SIOCGETSGCNT and SIOCGETVIFCNT,
  61. * move it to socket options.
  62. * TODO: Cleanup LSRR removal further.
  63. * TODO: Push RSVP stubs into raw_ip.c.
  64. * TODO: Use bitstring.h for vif set.
  65. * TODO: Fix mrt6_ioctl dangling ref when dynamically loaded.
  66. * TODO: Sync ip6_mroute.c with this file.
  67. */
  68. #include <sys/cdefs.h>
  69. __FBSDID("$FreeBSD$");
  70. #include "opt_inet.h"
  71. #include "opt_mrouting.h"
  72. #define _PIM_VT 1
  73. #include <sys/param.h>
  74. #include <sys/kernel.h>
  75. #include <sys/stddef.h>
  76. #include <sys/eventhandler.h>
  77. #include <sys/lock.h>
  78. #include <sys/ktr.h>
  79. #include <sys/malloc.h>
  80. #include <sys/mbuf.h>
  81. #include <sys/module.h>
  82. #include <sys/priv.h>
  83. #include <sys/protosw.h>
  84. #include <sys/signalvar.h>
  85. #include <sys/socket.h>
  86. #include <sys/socketvar.h>
  87. #include <sys/sockio.h>
  88. #include <sys/sx.h>
  89. #include <sys/sysctl.h>
  90. #include <sys/syslog.h>
  91. #include <sys/systm.h>
  92. #include <sys/time.h>
  93. #include <sys/counter.h>
  94. #include <net/if.h>
  95. #include <net/if_var.h>
  96. #include <net/netisr.h>
  97. #include <net/route.h>
  98. #include <net/vnet.h>
  99. #include <netinet/in.h>
  100. #include <netinet/igmp.h>
  101. #include <netinet/in_systm.h>
  102. #include <netinet/in_var.h>
  103. #include <netinet/ip.h>
  104. #include <netinet/ip_encap.h>
  105. #include <netinet/ip_mroute.h>
  106. #include <netinet/ip_var.h>
  107. #include <netinet/ip_options.h>
  108. #include <netinet/pim.h>
  109. #include <netinet/pim_var.h>
  110. #include <netinet/udp.h>
  111. #include <machine/in_cksum.h>
  112. #ifndef KTR_IPMF
  113. #define KTR_IPMF KTR_INET
  114. #endif
  115. #define VIFI_INVALID ((vifi_t) -1)
  116. VNET_DEFINE_STATIC(uint32_t, last_tv_sec); /* last time we processed this */
  117. #define V_last_tv_sec VNET(last_tv_sec)
  118. static MALLOC_DEFINE(M_MRTABLE, "mroutetbl", "multicast forwarding cache");
  119. /*
  120. * Locking. We use two locks: one for the virtual interface table and
  121. * one for the forwarding table. These locks may be nested in which case
  122. * the VIF lock must always be taken first. Note that each lock is used
  123. * to cover not only the specific data structure but also related data
  124. * structures.
  125. */
  126. static struct mtx mrouter_mtx;
  127. #define MROUTER_LOCK() mtx_lock(&mrouter_mtx)
  128. #define MROUTER_UNLOCK() mtx_unlock(&mrouter_mtx)
  129. #define MROUTER_LOCK_ASSERT() mtx_assert(&mrouter_mtx, MA_OWNED)
  130. #define MROUTER_LOCK_INIT() \
  131. mtx_init(&mrouter_mtx, "IPv4 multicast forwarding", NULL, MTX_DEF)
  132. #define MROUTER_LOCK_DESTROY() mtx_destroy(&mrouter_mtx)
  133. static int ip_mrouter_cnt; /* # of vnets with active mrouters */
  134. static int ip_mrouter_unloading; /* Allow no more V_ip_mrouter sockets */
  135. VNET_PCPUSTAT_DEFINE_STATIC(struct mrtstat, mrtstat);
  136. VNET_PCPUSTAT_SYSINIT(mrtstat);
  137. VNET_PCPUSTAT_SYSUNINIT(mrtstat);
  138. SYSCTL_VNET_PCPUSTAT(_net_inet_ip, OID_AUTO, mrtstat, struct mrtstat,
  139. mrtstat, "IPv4 Multicast Forwarding Statistics (struct mrtstat, "
  140. "netinet/ip_mroute.h)");
  141. VNET_DEFINE_STATIC(u_long, mfchash);
  142. #define V_mfchash VNET(mfchash)
  143. #define MFCHASH(a, g) \
  144. ((((a).s_addr >> 20) ^ ((a).s_addr >> 10) ^ (a).s_addr ^ \
  145. ((g).s_addr >> 20) ^ ((g).s_addr >> 10) ^ (g).s_addr) & V_mfchash)
  146. #define MFCHASHSIZE 256
  147. static u_long mfchashsize; /* Hash size */
  148. VNET_DEFINE_STATIC(u_char *, nexpire); /* 0..mfchashsize-1 */
  149. #define V_nexpire VNET(nexpire)
  150. VNET_DEFINE_STATIC(LIST_HEAD(mfchashhdr, mfc)*, mfchashtbl);
  151. #define V_mfchashtbl VNET(mfchashtbl)
  152. static struct mtx mfc_mtx;
  153. #define MFC_LOCK() mtx_lock(&mfc_mtx)
  154. #define MFC_UNLOCK() mtx_unlock(&mfc_mtx)
  155. #define MFC_LOCK_ASSERT() mtx_assert(&mfc_mtx, MA_OWNED)
  156. #define MFC_LOCK_INIT() \
  157. mtx_init(&mfc_mtx, "IPv4 multicast forwarding cache", NULL, MTX_DEF)
  158. #define MFC_LOCK_DESTROY() mtx_destroy(&mfc_mtx)
  159. VNET_DEFINE_STATIC(vifi_t, numvifs);
  160. #define V_numvifs VNET(numvifs)
  161. VNET_DEFINE_STATIC(struct vif *, viftable);
  162. #define V_viftable VNET(viftable)
  163. static struct mtx vif_mtx;
  164. #define VIF_LOCK() mtx_lock(&vif_mtx)
  165. #define VIF_UNLOCK() mtx_unlock(&vif_mtx)
  166. #define VIF_LOCK_ASSERT() mtx_assert(&vif_mtx, MA_OWNED)
  167. #define VIF_LOCK_INIT() \
  168. mtx_init(&vif_mtx, "IPv4 multicast interfaces", NULL, MTX_DEF)
  169. #define VIF_LOCK_DESTROY() mtx_destroy(&vif_mtx)
  170. static eventhandler_tag if_detach_event_tag = NULL;
  171. VNET_DEFINE_STATIC(struct callout, expire_upcalls_ch);
  172. #define V_expire_upcalls_ch VNET(expire_upcalls_ch)
  173. #define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */
  174. #define UPCALL_EXPIRE 6 /* number of timeouts */
  175. /*
  176. * Bandwidth meter variables and constants
  177. */
  178. static MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters");
  179. /*
  180. * Pending timeouts are stored in a hash table, the key being the
  181. * expiration time. Periodically, the entries are analysed and processed.
  182. */
  183. #define BW_METER_BUCKETS 1024
  184. VNET_DEFINE_STATIC(struct bw_meter **, bw_meter_timers);
  185. #define V_bw_meter_timers VNET(bw_meter_timers)
  186. VNET_DEFINE_STATIC(struct callout, bw_meter_ch);
  187. #define V_bw_meter_ch VNET(bw_meter_ch)
  188. #define BW_METER_PERIOD (hz) /* periodical handling of bw meters */
  189. /*
  190. * Pending upcalls are stored in a vector which is flushed when
  191. * full, or periodically
  192. */
  193. VNET_DEFINE_STATIC(struct bw_upcall *, bw_upcalls);
  194. #define V_bw_upcalls VNET(bw_upcalls)
  195. VNET_DEFINE_STATIC(u_int, bw_upcalls_n); /* # of pending upcalls */
  196. #define V_bw_upcalls_n VNET(bw_upcalls_n)
  197. VNET_DEFINE_STATIC(struct callout, bw_upcalls_ch);
  198. #define V_bw_upcalls_ch VNET(bw_upcalls_ch)
  199. #define BW_UPCALLS_PERIOD (hz) /* periodical flush of bw upcalls */
  200. VNET_PCPUSTAT_DEFINE_STATIC(struct pimstat, pimstat);
  201. VNET_PCPUSTAT_SYSINIT(pimstat);
  202. VNET_PCPUSTAT_SYSUNINIT(pimstat);
  203. SYSCTL_NODE(_net_inet, IPPROTO_PIM, pim, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
  204. "PIM");
  205. SYSCTL_VNET_PCPUSTAT(_net_inet_pim, PIMCTL_STATS, stats, struct pimstat,
  206. pimstat, "PIM Statistics (struct pimstat, netinet/pim_var.h)");
  207. static u_long pim_squelch_wholepkt = 0;
  208. SYSCTL_ULONG(_net_inet_pim, OID_AUTO, squelch_wholepkt, CTLFLAG_RW,
  209. &pim_squelch_wholepkt, 0,
  210. "Disable IGMP_WHOLEPKT notifications if rendezvous point is unspecified");
  211. static const struct encaptab *pim_encap_cookie;
  212. static int pim_encapcheck(const struct mbuf *, int, int, void *);
  213. static int pim_input(struct mbuf *, int, int, void *);
  214. static const struct encap_config ipv4_encap_cfg = {
  215. .proto = IPPROTO_PIM,
  216. .min_length = sizeof(struct ip) + PIM_MINLEN,
  217. .exact_match = 8,
  218. .check = pim_encapcheck,
  219. .input = pim_input
  220. };
  221. /*
  222. * Note: the PIM Register encapsulation adds the following in front of a
  223. * data packet:
  224. *
  225. * struct pim_encap_hdr {
  226. * struct ip ip;
  227. * struct pim_encap_pimhdr pim;
  228. * }
  229. *
  230. */
  231. struct pim_encap_pimhdr {
  232. struct pim pim;
  233. uint32_t flags;
  234. };
  235. #define PIM_ENCAP_TTL 64
  236. static struct ip pim_encap_iphdr = {
  237. #if BYTE_ORDER == LITTLE_ENDIAN
  238. sizeof(struct ip) >> 2,
  239. IPVERSION,
  240. #else
  241. IPVERSION,
  242. sizeof(struct ip) >> 2,
  243. #endif
  244. 0, /* tos */
  245. sizeof(struct ip), /* total length */
  246. 0, /* id */
  247. 0, /* frag offset */
  248. PIM_ENCAP_TTL,
  249. IPPROTO_PIM,
  250. 0, /* checksum */
  251. };
  252. static struct pim_encap_pimhdr pim_encap_pimhdr = {
  253. {
  254. PIM_MAKE_VT(PIM_VERSION, PIM_REGISTER), /* PIM vers and message type */
  255. 0, /* reserved */
  256. 0, /* checksum */
  257. },
  258. 0 /* flags */
  259. };
  260. VNET_DEFINE_STATIC(vifi_t, reg_vif_num) = VIFI_INVALID;
  261. #define V_reg_vif_num VNET(reg_vif_num)
  262. VNET_DEFINE_STATIC(struct ifnet, multicast_register_if);
  263. #define V_multicast_register_if VNET(multicast_register_if)
  264. /*
  265. * Private variables.
  266. */
  267. static u_long X_ip_mcast_src(int);
  268. static int X_ip_mforward(struct ip *, struct ifnet *, struct mbuf *,
  269. struct ip_moptions *);
  270. static int X_ip_mrouter_done(void);
  271. static int X_ip_mrouter_get(struct socket *, struct sockopt *);
  272. static int X_ip_mrouter_set(struct socket *, struct sockopt *);
  273. static int X_legal_vif_num(int);
  274. static int X_mrt_ioctl(u_long, caddr_t, int);
  275. static int add_bw_upcall(struct bw_upcall *);
  276. static int add_mfc(struct mfcctl2 *);
  277. static int add_vif(struct vifctl *);
  278. static void bw_meter_prepare_upcall(struct bw_meter *, struct timeval *);
  279. static void bw_meter_process(void);
  280. static void bw_meter_receive_packet(struct bw_meter *, int,
  281. struct timeval *);
  282. static void bw_upcalls_send(void);
  283. static int del_bw_upcall(struct bw_upcall *);
  284. static int del_mfc(struct mfcctl2 *);
  285. static int del_vif(vifi_t);
  286. static int del_vif_locked(vifi_t);
  287. static void expire_bw_meter_process(void *);
  288. static void expire_bw_upcalls_send(void *);
  289. static void expire_mfc(struct mfc *);
  290. static void expire_upcalls(void *);
  291. static void free_bw_list(struct bw_meter *);
  292. static int get_sg_cnt(struct sioc_sg_req *);
  293. static int get_vif_cnt(struct sioc_vif_req *);
  294. static void if_detached_event(void *, struct ifnet *);
  295. static int ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, vifi_t);
  296. static int ip_mrouter_init(struct socket *, int);
  297. static __inline struct mfc *
  298. mfc_find(struct in_addr *, struct in_addr *);
  299. static void phyint_send(struct ip *, struct vif *, struct mbuf *);
  300. static struct mbuf *
  301. pim_register_prepare(struct ip *, struct mbuf *);
  302. static int pim_register_send(struct ip *, struct vif *,
  303. struct mbuf *, struct mfc *);
  304. static int pim_register_send_rp(struct ip *, struct vif *,
  305. struct mbuf *, struct mfc *);
  306. static int pim_register_send_upcall(struct ip *, struct vif *,
  307. struct mbuf *, struct mfc *);
  308. static void schedule_bw_meter(struct bw_meter *, struct timeval *);
  309. static void send_packet(struct vif *, struct mbuf *);
  310. static int set_api_config(uint32_t *);
  311. static int set_assert(int);
  312. static int socket_send(struct socket *, struct mbuf *,
  313. struct sockaddr_in *);
  314. static void unschedule_bw_meter(struct bw_meter *);
  315. /*
  316. * Kernel multicast forwarding API capabilities and setup.
  317. * If more API capabilities are added to the kernel, they should be
  318. * recorded in `mrt_api_support'.
  319. */
  320. #define MRT_API_VERSION 0x0305
  321. static const int mrt_api_version = MRT_API_VERSION;
  322. static const uint32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF |
  323. MRT_MFC_FLAGS_BORDER_VIF |
  324. MRT_MFC_RP |
  325. MRT_MFC_BW_UPCALL);
  326. VNET_DEFINE_STATIC(uint32_t, mrt_api_config);
  327. #define V_mrt_api_config VNET(mrt_api_config)
  328. VNET_DEFINE_STATIC(int, pim_assert_enabled);
  329. #define V_pim_assert_enabled VNET(pim_assert_enabled)
  330. static struct timeval pim_assert_interval = { 3, 0 }; /* Rate limit */
  331. /*
  332. * Find a route for a given origin IP address and multicast group address.
  333. * Statistics must be updated by the caller.
  334. */
  335. static __inline struct mfc *
  336. mfc_find(struct in_addr *o, struct in_addr *g)
  337. {
  338. struct mfc *rt;
  339. MFC_LOCK_ASSERT();
  340. LIST_FOREACH(rt, &V_mfchashtbl[MFCHASH(*o, *g)], mfc_hash) {
  341. if (in_hosteq(rt->mfc_origin, *o) &&
  342. in_hosteq(rt->mfc_mcastgrp, *g) &&
  343. TAILQ_EMPTY(&rt->mfc_stall))
  344. break;
  345. }
  346. return (rt);
  347. }
  348. /*
  349. * Handle MRT setsockopt commands to modify the multicast forwarding tables.
  350. */
  351. static int
  352. X_ip_mrouter_set(struct socket *so, struct sockopt *sopt)
  353. {
  354. int error, optval;
  355. vifi_t vifi;
  356. struct vifctl vifc;
  357. struct mfcctl2 mfc;
  358. struct bw_upcall bw_upcall;
  359. uint32_t i;
  360. if (so != V_ip_mrouter && sopt->sopt_name != MRT_INIT)
  361. return EPERM;
  362. error = 0;
  363. switch (sopt->sopt_name) {
  364. case MRT_INIT:
  365. error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
  366. if (error)
  367. break;
  368. error = ip_mrouter_init(so, optval);
  369. break;
  370. case MRT_DONE:
  371. error = ip_mrouter_done();
  372. break;
  373. case MRT_ADD_VIF:
  374. error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc);
  375. if (error)
  376. break;
  377. error = add_vif(&vifc);
  378. break;
  379. case MRT_DEL_VIF:
  380. error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi);
  381. if (error)
  382. break;
  383. error = del_vif(vifi);
  384. break;
  385. case MRT_ADD_MFC:
  386. case MRT_DEL_MFC:
  387. /*
  388. * select data size depending on API version.
  389. */
  390. if (sopt->sopt_name == MRT_ADD_MFC &&
  391. V_mrt_api_config & MRT_API_FLAGS_ALL) {
  392. error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl2),
  393. sizeof(struct mfcctl2));
  394. } else {
  395. error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl),
  396. sizeof(struct mfcctl));
  397. bzero((caddr_t)&mfc + sizeof(struct mfcctl),
  398. sizeof(mfc) - sizeof(struct mfcctl));
  399. }
  400. if (error)
  401. break;
  402. if (sopt->sopt_name == MRT_ADD_MFC)
  403. error = add_mfc(&mfc);
  404. else
  405. error = del_mfc(&mfc);
  406. break;
  407. case MRT_ASSERT:
  408. error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
  409. if (error)
  410. break;
  411. set_assert(optval);
  412. break;
  413. case MRT_API_CONFIG:
  414. error = sooptcopyin(sopt, &i, sizeof i, sizeof i);
  415. if (!error)
  416. error = set_api_config(&i);
  417. if (!error)
  418. error = sooptcopyout(sopt, &i, sizeof i);
  419. break;
  420. case MRT_ADD_BW_UPCALL:
  421. case MRT_DEL_BW_UPCALL:
  422. error = sooptcopyin(sopt, &bw_upcall, sizeof bw_upcall,
  423. sizeof bw_upcall);
  424. if (error)
  425. break;
  426. if (sopt->sopt_name == MRT_ADD_BW_UPCALL)
  427. error = add_bw_upcall(&bw_upcall);
  428. else
  429. error = del_bw_upcall(&bw_upcall);
  430. break;
  431. default:
  432. error = EOPNOTSUPP;
  433. break;
  434. }
  435. return error;
  436. }
  437. /*
  438. * Handle MRT getsockopt commands
  439. */
  440. static int
  441. X_ip_mrouter_get(struct socket *so, struct sockopt *sopt)
  442. {
  443. int error;
  444. switch (sopt->sopt_name) {
  445. case MRT_VERSION:
  446. error = sooptcopyout(sopt, &mrt_api_version, sizeof mrt_api_version);
  447. break;
  448. case MRT_ASSERT:
  449. error = sooptcopyout(sopt, &V_pim_assert_enabled,
  450. sizeof V_pim_assert_enabled);
  451. break;
  452. case MRT_API_SUPPORT:
  453. error = sooptcopyout(sopt, &mrt_api_support, sizeof mrt_api_support);
  454. break;
  455. case MRT_API_CONFIG:
  456. error = sooptcopyout(sopt, &V_mrt_api_config, sizeof V_mrt_api_config);
  457. break;
  458. default:
  459. error = EOPNOTSUPP;
  460. break;
  461. }
  462. return error;
  463. }
  464. /*
  465. * Handle ioctl commands to obtain information from the cache
  466. */
  467. static int
  468. X_mrt_ioctl(u_long cmd, caddr_t data, int fibnum __unused)
  469. {
  470. int error = 0;
  471. /*
  472. * Currently the only function calling this ioctl routine is rtioctl_fib().
  473. * Typically, only root can create the raw socket in order to execute
  474. * this ioctl method, however the request might be coming from a prison
  475. */
  476. error = priv_check(curthread, PRIV_NETINET_MROUTE);
  477. if (error)
  478. return (error);
  479. switch (cmd) {
  480. case (SIOCGETVIFCNT):
  481. error = get_vif_cnt((struct sioc_vif_req *)data);
  482. break;
  483. case (SIOCGETSGCNT):
  484. error = get_sg_cnt((struct sioc_sg_req *)data);
  485. break;
  486. default:
  487. error = EINVAL;
  488. break;
  489. }
  490. return error;
  491. }
  492. /*
  493. * returns the packet, byte, rpf-failure count for the source group provided
  494. */
  495. static int
  496. get_sg_cnt(struct sioc_sg_req *req)
  497. {
  498. struct mfc *rt;
  499. MFC_LOCK();
  500. rt = mfc_find(&req->src, &req->grp);
  501. if (rt == NULL) {
  502. MFC_UNLOCK();
  503. req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff;
  504. return EADDRNOTAVAIL;
  505. }
  506. req->pktcnt = rt->mfc_pkt_cnt;
  507. req->bytecnt = rt->mfc_byte_cnt;
  508. req->wrong_if = rt->mfc_wrong_if;
  509. MFC_UNLOCK();
  510. return 0;
  511. }
  512. /*
  513. * returns the input and output packet and byte counts on the vif provided
  514. */
  515. static int
  516. get_vif_cnt(struct sioc_vif_req *req)
  517. {
  518. vifi_t vifi = req->vifi;
  519. VIF_LOCK();
  520. if (vifi >= V_numvifs) {
  521. VIF_UNLOCK();
  522. return EINVAL;
  523. }
  524. req->icount = V_viftable[vifi].v_pkt_in;
  525. req->ocount = V_viftable[vifi].v_pkt_out;
  526. req->ibytes = V_viftable[vifi].v_bytes_in;
  527. req->obytes = V_viftable[vifi].v_bytes_out;
  528. VIF_UNLOCK();
  529. return 0;
  530. }
  531. static void
  532. if_detached_event(void *arg __unused, struct ifnet *ifp)
  533. {
  534. vifi_t vifi;
  535. u_long i;
  536. MROUTER_LOCK();
  537. if (V_ip_mrouter == NULL) {
  538. MROUTER_UNLOCK();
  539. return;
  540. }
  541. VIF_LOCK();
  542. MFC_LOCK();
  543. /*
  544. * Tear down multicast forwarder state associated with this ifnet.
  545. * 1. Walk the vif list, matching vifs against this ifnet.
  546. * 2. Walk the multicast forwarding cache (mfc) looking for
  547. * inner matches with this vif's index.
  548. * 3. Expire any matching multicast forwarding cache entries.
  549. * 4. Free vif state. This should disable ALLMULTI on the interface.
  550. */
  551. for (vifi = 0; vifi < V_numvifs; vifi++) {
  552. if (V_viftable[vifi].v_ifp != ifp)
  553. continue;
  554. for (i = 0; i < mfchashsize; i++) {
  555. struct mfc *rt, *nrt;
  556. LIST_FOREACH_SAFE(rt, &V_mfchashtbl[i], mfc_hash, nrt) {
  557. if (rt->mfc_parent == vifi) {
  558. expire_mfc(rt);
  559. }
  560. }
  561. }
  562. del_vif_locked(vifi);
  563. }
  564. MFC_UNLOCK();
  565. VIF_UNLOCK();
  566. MROUTER_UNLOCK();
  567. }
  568. /*
  569. * Enable multicast forwarding.
  570. */
  571. static int
  572. ip_mrouter_init(struct socket *so, int version)
  573. {
  574. CTR3(KTR_IPMF, "%s: so_type %d, pr_protocol %d", __func__,
  575. so->so_type, so->so_proto->pr_protocol);
  576. if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_IGMP)
  577. return EOPNOTSUPP;
  578. if (version != 1)
  579. return ENOPROTOOPT;
  580. MROUTER_LOCK();
  581. if (ip_mrouter_unloading) {
  582. MROUTER_UNLOCK();
  583. return ENOPROTOOPT;
  584. }
  585. if (V_ip_mrouter != NULL) {
  586. MROUTER_UNLOCK();
  587. return EADDRINUSE;
  588. }
  589. V_mfchashtbl = hashinit_flags(mfchashsize, M_MRTABLE, &V_mfchash,
  590. HASH_NOWAIT);
  591. callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls,
  592. curvnet);
  593. callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send,
  594. curvnet);
  595. callout_reset(&V_bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process,
  596. curvnet);
  597. V_ip_mrouter = so;
  598. ip_mrouter_cnt++;
  599. MROUTER_UNLOCK();
  600. CTR1(KTR_IPMF, "%s: done", __func__);
  601. return 0;
  602. }
  603. /*
  604. * Disable multicast forwarding.
  605. */
  606. static int
  607. X_ip_mrouter_done(void)
  608. {
  609. struct ifnet *ifp;
  610. u_long i;
  611. vifi_t vifi;
  612. MROUTER_LOCK();
  613. if (V_ip_mrouter == NULL) {
  614. MROUTER_UNLOCK();
  615. return EINVAL;
  616. }
  617. /*
  618. * Detach/disable hooks to the reset of the system.
  619. */
  620. V_ip_mrouter = NULL;
  621. ip_mrouter_cnt--;
  622. V_mrt_api_config = 0;
  623. VIF_LOCK();
  624. /*
  625. * For each phyint in use, disable promiscuous reception of all IP
  626. * multicasts.
  627. */
  628. for (vifi = 0; vifi < V_numvifs; vifi++) {
  629. if (!in_nullhost(V_viftable[vifi].v_lcl_addr) &&
  630. !(V_viftable[vifi].v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) {
  631. ifp = V_viftable[vifi].v_ifp;
  632. if_allmulti(ifp, 0);
  633. }
  634. }
  635. bzero((caddr_t)V_viftable, sizeof(*V_viftable) * MAXVIFS);
  636. V_numvifs = 0;
  637. V_pim_assert_enabled = 0;
  638. VIF_UNLOCK();
  639. callout_stop(&V_expire_upcalls_ch);
  640. callout_stop(&V_bw_upcalls_ch);
  641. callout_stop(&V_bw_meter_ch);
  642. MFC_LOCK();
  643. /*
  644. * Free all multicast forwarding cache entries.
  645. * Do not use hashdestroy(), as we must perform other cleanup.
  646. */
  647. for (i = 0; i < mfchashsize; i++) {
  648. struct mfc *rt, *nrt;
  649. LIST_FOREACH_SAFE(rt, &V_mfchashtbl[i], mfc_hash, nrt) {
  650. expire_mfc(rt);
  651. }
  652. }
  653. free(V_mfchashtbl, M_MRTABLE);
  654. V_mfchashtbl = NULL;
  655. bzero(V_nexpire, sizeof(V_nexpire[0]) * mfchashsize);
  656. V_bw_upcalls_n = 0;
  657. bzero(V_bw_meter_timers, BW_METER_BUCKETS * sizeof(*V_bw_meter_timers));
  658. MFC_UNLOCK();
  659. V_reg_vif_num = VIFI_INVALID;
  660. MROUTER_UNLOCK();
  661. CTR1(KTR_IPMF, "%s: done", __func__);
  662. return 0;
  663. }
  664. /*
  665. * Set PIM assert processing global
  666. */
  667. static int
  668. set_assert(int i)
  669. {
  670. if ((i != 1) && (i != 0))
  671. return EINVAL;
  672. V_pim_assert_enabled = i;
  673. return 0;
  674. }
  675. /*
  676. * Configure API capabilities
  677. */
  678. int
  679. set_api_config(uint32_t *apival)
  680. {
  681. u_long i;
  682. /*
  683. * We can set the API capabilities only if it is the first operation
  684. * after MRT_INIT. I.e.:
  685. * - there are no vifs installed
  686. * - pim_assert is not enabled
  687. * - the MFC table is empty
  688. */
  689. if (V_numvifs > 0) {
  690. *apival = 0;
  691. return EPERM;
  692. }
  693. if (V_pim_assert_enabled) {
  694. *apival = 0;
  695. return EPERM;
  696. }
  697. MFC_LOCK();
  698. for (i = 0; i < mfchashsize; i++) {
  699. if (LIST_FIRST(&V_mfchashtbl[i]) != NULL) {
  700. MFC_UNLOCK();
  701. *apival = 0;
  702. return EPERM;
  703. }
  704. }
  705. MFC_UNLOCK();
  706. V_mrt_api_config = *apival & mrt_api_support;
  707. *apival = V_mrt_api_config;
  708. return 0;
  709. }
  710. /*
  711. * Add a vif to the vif table
  712. */
  713. static int
  714. add_vif(struct vifctl *vifcp)
  715. {
  716. struct vif *vifp = V_viftable + vifcp->vifc_vifi;
  717. struct sockaddr_in sin = {sizeof sin, AF_INET};
  718. struct ifaddr *ifa;
  719. struct ifnet *ifp;
  720. int error;
  721. VIF_LOCK();
  722. if (vifcp->vifc_vifi >= MAXVIFS) {
  723. VIF_UNLOCK();
  724. return EINVAL;
  725. }
  726. /* rate limiting is no longer supported by this code */
  727. if (vifcp->vifc_rate_limit != 0) {
  728. log(LOG_ERR, "rate limiting is no longer supported\n");
  729. VIF_UNLOCK();
  730. return EINVAL;
  731. }
  732. if (!in_nullhost(vifp->v_lcl_addr)) {
  733. VIF_UNLOCK();
  734. return EADDRINUSE;
  735. }
  736. if (in_nullhost(vifcp->vifc_lcl_addr)) {
  737. VIF_UNLOCK();
  738. return EADDRNOTAVAIL;
  739. }
  740. /* Find the interface with an address in AF_INET family */
  741. if (vifcp->vifc_flags & VIFF_REGISTER) {
  742. /*
  743. * XXX: Because VIFF_REGISTER does not really need a valid
  744. * local interface (e.g. it could be 127.0.0.2), we don't
  745. * check its address.
  746. */
  747. ifp = NULL;
  748. } else {
  749. struct epoch_tracker et;
  750. sin.sin_addr = vifcp->vifc_lcl_addr;
  751. NET_EPOCH_ENTER(et);
  752. ifa = ifa_ifwithaddr((struct sockaddr *)&sin);
  753. if (ifa == NULL) {
  754. NET_EPOCH_EXIT(et);
  755. VIF_UNLOCK();
  756. return EADDRNOTAVAIL;
  757. }
  758. ifp = ifa->ifa_ifp;
  759. /* XXX FIXME we need to take a ref on ifp and cleanup properly! */
  760. NET_EPOCH_EXIT(et);
  761. }
  762. if ((vifcp->vifc_flags & VIFF_TUNNEL) != 0) {
  763. CTR1(KTR_IPMF, "%s: tunnels are no longer supported", __func__);
  764. VIF_UNLOCK();
  765. return EOPNOTSUPP;
  766. } else if (vifcp->vifc_flags & VIFF_REGISTER) {
  767. ifp = &V_multicast_register_if;
  768. CTR2(KTR_IPMF, "%s: add register vif for ifp %p", __func__, ifp);
  769. if (V_reg_vif_num == VIFI_INVALID) {
  770. if_initname(&V_multicast_register_if, "register_vif", 0);
  771. V_multicast_register_if.if_flags = IFF_LOOPBACK;
  772. V_reg_vif_num = vifcp->vifc_vifi;
  773. }
  774. } else { /* Make sure the interface supports multicast */
  775. if ((ifp->if_flags & IFF_MULTICAST) == 0) {
  776. VIF_UNLOCK();
  777. return EOPNOTSUPP;
  778. }
  779. /* Enable promiscuous reception of all IP multicasts from the if */
  780. error = if_allmulti(ifp, 1);
  781. if (error) {
  782. VIF_UNLOCK();
  783. return error;
  784. }
  785. }
  786. vifp->v_flags = vifcp->vifc_flags;
  787. vifp->v_threshold = vifcp->vifc_threshold;
  788. vifp->v_lcl_addr = vifcp->vifc_lcl_addr;
  789. vifp->v_rmt_addr = vifcp->vifc_rmt_addr;
  790. vifp->v_ifp = ifp;
  791. /* initialize per vif pkt counters */
  792. vifp->v_pkt_in = 0;
  793. vifp->v_pkt_out = 0;
  794. vifp->v_bytes_in = 0;
  795. vifp->v_bytes_out = 0;
  796. /* Adjust numvifs up if the vifi is higher than numvifs */
  797. if (V_numvifs <= vifcp->vifc_vifi)
  798. V_numvifs = vifcp->vifc_vifi + 1;
  799. VIF_UNLOCK();
  800. CTR4(KTR_IPMF, "%s: add vif %d laddr 0x%08x thresh %x", __func__,
  801. (int)vifcp->vifc_vifi, ntohl(vifcp->vifc_lcl_addr.s_addr),
  802. (int)vifcp->vifc_threshold);
  803. return 0;
  804. }
  805. /*
  806. * Delete a vif from the vif table
  807. */
  808. static int
  809. del_vif_locked(vifi_t vifi)
  810. {
  811. struct vif *vifp;
  812. VIF_LOCK_ASSERT();
  813. if (vifi >= V_numvifs) {
  814. return EINVAL;
  815. }
  816. vifp = &V_viftable[vifi];
  817. if (in_nullhost(vifp->v_lcl_addr)) {
  818. return EADDRNOTAVAIL;
  819. }
  820. if (!(vifp->v_flags & (VIFF_TUNNEL | VIFF_REGISTER)))
  821. if_allmulti(vifp->v_ifp, 0);
  822. if (vifp->v_flags & VIFF_REGISTER)
  823. V_reg_vif_num = VIFI_INVALID;
  824. bzero((caddr_t)vifp, sizeof (*vifp));
  825. CTR2(KTR_IPMF, "%s: delete vif %d", __func__, (int)vifi);
  826. /* Adjust numvifs down */
  827. for (vifi = V_numvifs; vifi > 0; vifi--)
  828. if (!in_nullhost(V_viftable[vifi-1].v_lcl_addr))
  829. break;
  830. V_numvifs = vifi;
  831. return 0;
  832. }
  833. static int
  834. del_vif(vifi_t vifi)
  835. {
  836. int cc;
  837. VIF_LOCK();
  838. cc = del_vif_locked(vifi);
  839. VIF_UNLOCK();
  840. return cc;
  841. }
  842. /*
  843. * update an mfc entry without resetting counters and S,G addresses.
  844. */
  845. static void
  846. update_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
  847. {
  848. int i;
  849. rt->mfc_parent = mfccp->mfcc_parent;
  850. for (i = 0; i < V_numvifs; i++) {
  851. rt->mfc_ttls[i] = mfccp->mfcc_ttls[i];
  852. rt->mfc_flags[i] = mfccp->mfcc_flags[i] & V_mrt_api_config &
  853. MRT_MFC_FLAGS_ALL;
  854. }
  855. /* set the RP address */
  856. if (V_mrt_api_config & MRT_MFC_RP)
  857. rt->mfc_rp = mfccp->mfcc_rp;
  858. else
  859. rt->mfc_rp.s_addr = INADDR_ANY;
  860. }
  861. /*
  862. * fully initialize an mfc entry from the parameter.
  863. */
  864. static void
  865. init_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
  866. {
  867. rt->mfc_origin = mfccp->mfcc_origin;
  868. rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp;
  869. update_mfc_params(rt, mfccp);
  870. /* initialize pkt counters per src-grp */
  871. rt->mfc_pkt_cnt = 0;
  872. rt->mfc_byte_cnt = 0;
  873. rt->mfc_wrong_if = 0;
  874. timevalclear(&rt->mfc_last_assert);
  875. }
  876. static void
  877. expire_mfc(struct mfc *rt)
  878. {
  879. struct rtdetq *rte, *nrte;
  880. MFC_LOCK_ASSERT();
  881. free_bw_list(rt->mfc_bw_meter);
  882. TAILQ_FOREACH_SAFE(rte, &rt->mfc_stall, rte_link, nrte) {
  883. m_freem(rte->m);
  884. TAILQ_REMOVE(&rt->mfc_stall, rte, rte_link);
  885. free(rte, M_MRTABLE);
  886. }
  887. LIST_REMOVE(rt, mfc_hash);
  888. free(rt, M_MRTABLE);
  889. }
  890. /*
  891. * Add an mfc entry
  892. */
  893. static int
  894. add_mfc(struct mfcctl2 *mfccp)
  895. {
  896. struct mfc *rt;
  897. struct rtdetq *rte, *nrte;
  898. u_long hash = 0;
  899. u_short nstl;
  900. VIF_LOCK();
  901. MFC_LOCK();
  902. rt = mfc_find(&mfccp->mfcc_origin, &mfccp->mfcc_mcastgrp);
  903. /* If an entry already exists, just update the fields */
  904. if (rt) {
  905. CTR4(KTR_IPMF, "%s: update mfc orig 0x%08x group %lx parent %x",
  906. __func__, ntohl(mfccp->mfcc_origin.s_addr),
  907. (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
  908. mfccp->mfcc_parent);
  909. update_mfc_params(rt, mfccp);
  910. MFC_UNLOCK();
  911. VIF_UNLOCK();
  912. return (0);
  913. }
  914. /*
  915. * Find the entry for which the upcall was made and update
  916. */
  917. nstl = 0;
  918. hash = MFCHASH(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp);
  919. LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
  920. if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
  921. in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp) &&
  922. !TAILQ_EMPTY(&rt->mfc_stall)) {
  923. CTR5(KTR_IPMF,
  924. "%s: add mfc orig 0x%08x group %lx parent %x qh %p",
  925. __func__, ntohl(mfccp->mfcc_origin.s_addr),
  926. (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
  927. mfccp->mfcc_parent,
  928. TAILQ_FIRST(&rt->mfc_stall));
  929. if (nstl++)
  930. CTR1(KTR_IPMF, "%s: multiple matches", __func__);
  931. init_mfc_params(rt, mfccp);
  932. rt->mfc_expire = 0; /* Don't clean this guy up */
  933. V_nexpire[hash]--;
  934. /* Free queued packets, but attempt to forward them first. */
  935. TAILQ_FOREACH_SAFE(rte, &rt->mfc_stall, rte_link, nrte) {
  936. if (rte->ifp != NULL)
  937. ip_mdq(rte->m, rte->ifp, rt, -1);
  938. m_freem(rte->m);
  939. TAILQ_REMOVE(&rt->mfc_stall, rte, rte_link);
  940. rt->mfc_nstall--;
  941. free(rte, M_MRTABLE);
  942. }
  943. }
  944. }
  945. /*
  946. * It is possible that an entry is being inserted without an upcall
  947. */
  948. if (nstl == 0) {
  949. CTR1(KTR_IPMF, "%s: adding mfc w/o upcall", __func__);
  950. LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
  951. if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
  952. in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp)) {
  953. init_mfc_params(rt, mfccp);
  954. if (rt->mfc_expire)
  955. V_nexpire[hash]--;
  956. rt->mfc_expire = 0;
  957. break; /* XXX */
  958. }
  959. }
  960. if (rt == NULL) { /* no upcall, so make a new entry */
  961. rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
  962. if (rt == NULL) {
  963. MFC_UNLOCK();
  964. VIF_UNLOCK();
  965. return (ENOBUFS);
  966. }
  967. init_mfc_params(rt, mfccp);
  968. TAILQ_INIT(&rt->mfc_stall);
  969. rt->mfc_nstall = 0;
  970. rt->mfc_expire = 0;
  971. rt->mfc_bw_meter = NULL;
  972. /* insert new entry at head of hash chain */
  973. LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash);
  974. }
  975. }
  976. MFC_UNLOCK();
  977. VIF_UNLOCK();
  978. return (0);
  979. }
  980. /*
  981. * Delete an mfc entry
  982. */
  983. static int
  984. del_mfc(struct mfcctl2 *mfccp)
  985. {
  986. struct in_addr origin;
  987. struct in_addr mcastgrp;
  988. struct mfc *rt;
  989. origin = mfccp->mfcc_origin;
  990. mcastgrp = mfccp->mfcc_mcastgrp;
  991. CTR3(KTR_IPMF, "%s: delete mfc orig 0x%08x group %lx", __func__,
  992. ntohl(origin.s_addr), (u_long)ntohl(mcastgrp.s_addr));
  993. MFC_LOCK();
  994. rt = mfc_find(&origin, &mcastgrp);
  995. if (rt == NULL) {
  996. MFC_UNLOCK();
  997. return EADDRNOTAVAIL;
  998. }
  999. /*
  1000. * free the bw_meter entries
  1001. */
  1002. free_bw_list(rt->mfc_bw_meter);
  1003. rt->mfc_bw_meter = NULL;
  1004. LIST_REMOVE(rt, mfc_hash);
  1005. free(rt, M_MRTABLE);
  1006. MFC_UNLOCK();
  1007. return (0);
  1008. }
  1009. /*
  1010. * Send a message to the routing daemon on the multicast routing socket.
  1011. */
  1012. static int
  1013. socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src)
  1014. {
  1015. if (s) {
  1016. SOCKBUF_LOCK(&s->so_rcv);
  1017. if (sbappendaddr_locked(&s->so_rcv, (struct sockaddr *)src, mm,
  1018. NULL) != 0) {
  1019. sorwakeup_locked(s);
  1020. return 0;
  1021. }
  1022. SOCKBUF_UNLOCK(&s->so_rcv);
  1023. }
  1024. m_freem(mm);
  1025. return -1;
  1026. }
  1027. /*
  1028. * IP multicast forwarding function. This function assumes that the packet
  1029. * pointed to by "ip" has arrived on (or is about to be sent to) the interface
  1030. * pointed to by "ifp", and the packet is to be relayed to other networks
  1031. * that have members of the packet's destination IP multicast group.
  1032. *
  1033. * The packet is returned unscathed to the caller, unless it is
  1034. * erroneous, in which case a non-zero return value tells the caller to
  1035. * discard it.
  1036. */
  1037. #define TUNNEL_LEN 12 /* # bytes of IP option for tunnel encapsulation */
  1038. static int
  1039. X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m,
  1040. struct ip_moptions *imo)
  1041. {
  1042. struct mfc *rt;
  1043. int error;
  1044. vifi_t vifi;
  1045. CTR3(KTR_IPMF, "ip_mforward: delete mfc orig 0x%08x group %lx ifp %p",
  1046. ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr), ifp);
  1047. if (ip->ip_hl < (sizeof(struct ip) + TUNNEL_LEN) >> 2 ||
  1048. ((u_char *)(ip + 1))[1] != IPOPT_LSRR ) {
  1049. /*
  1050. * Packet arrived via a physical interface or
  1051. * an encapsulated tunnel or a register_vif.
  1052. */
  1053. } else {
  1054. /*
  1055. * Packet arrived through a source-route tunnel.
  1056. * Source-route tunnels are no longer supported.
  1057. */
  1058. return (1);
  1059. }
  1060. VIF_LOCK();
  1061. MFC_LOCK();
  1062. if (imo && ((vifi = imo->imo_multicast_vif) < V_numvifs)) {
  1063. if (ip->ip_ttl < MAXTTL)
  1064. ip->ip_ttl++; /* compensate for -1 in *_send routines */
  1065. error = ip_mdq(m, ifp, NULL, vifi);
  1066. MFC_UNLOCK();
  1067. VIF_UNLOCK();
  1068. return error;
  1069. }
  1070. /*
  1071. * Don't forward a packet with time-to-live of zero or one,
  1072. * or a packet destined to a local-only group.
  1073. */
  1074. if (ip->ip_ttl <= 1 || IN_LOCAL_GROUP(ntohl(ip->ip_dst.s_addr))) {
  1075. MFC_UNLOCK();
  1076. VIF_UNLOCK();
  1077. return 0;
  1078. }
  1079. /*
  1080. * Determine forwarding vifs from the forwarding cache table
  1081. */
  1082. MRTSTAT_INC(mrts_mfc_lookups);
  1083. rt = mfc_find(&ip->ip_src, &ip->ip_dst);
  1084. /* Entry exists, so forward if necessary */
  1085. if (rt != NULL) {
  1086. error = ip_mdq(m, ifp, rt, -1);
  1087. MFC_UNLOCK();
  1088. VIF_UNLOCK();
  1089. return error;
  1090. } else {
  1091. /*
  1092. * If we don't have a route for packet's origin,
  1093. * Make a copy of the packet & send message to routing daemon
  1094. */
  1095. struct mbuf *mb0;
  1096. struct rtdetq *rte;
  1097. u_long hash;
  1098. int hlen = ip->ip_hl << 2;
  1099. MRTSTAT_INC(mrts_mfc_misses);
  1100. MRTSTAT_INC(mrts_no_route);
  1101. CTR2(KTR_IPMF, "ip_mforward: no mfc for (0x%08x,%lx)",
  1102. ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr));
  1103. /*
  1104. * Allocate mbufs early so that we don't do extra work if we are
  1105. * just going to fail anyway. Make sure to pullup the header so
  1106. * that other people can't step on it.
  1107. */
  1108. rte = (struct rtdetq *)malloc((sizeof *rte), M_MRTABLE,
  1109. M_NOWAIT|M_ZERO);
  1110. if (rte == NULL) {
  1111. MFC_UNLOCK();
  1112. VIF_UNLOCK();
  1113. return ENOBUFS;
  1114. }
  1115. mb0 = m_copypacket(m, M_NOWAIT);
  1116. if (mb0 && (!M_WRITABLE(mb0) || mb0->m_len < hlen))
  1117. mb0 = m_pullup(mb0, hlen);
  1118. if (mb0 == NULL) {
  1119. free(rte, M_MRTABLE);
  1120. MFC_UNLOCK();
  1121. VIF_UNLOCK();
  1122. return ENOBUFS;
  1123. }
  1124. /* is there an upcall waiting for this flow ? */
  1125. hash = MFCHASH(ip->ip_src, ip->ip_dst);
  1126. LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
  1127. if (in_hosteq(ip->ip_src, rt->mfc_origin) &&
  1128. in_hosteq(ip->ip_dst, rt->mfc_mcastgrp) &&
  1129. !TAILQ_EMPTY(&rt->mfc_stall))
  1130. break;
  1131. }
  1132. if (rt == NULL) {
  1133. int i;
  1134. struct igmpmsg *im;
  1135. struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
  1136. struct mbuf *mm;
  1137. /*
  1138. * Locate the vifi for the incoming interface for this packet.
  1139. * If none found, drop packet.
  1140. */
  1141. for (vifi = 0; vifi < V_numvifs &&
  1142. V_viftable[vifi].v_ifp != ifp; vifi++)
  1143. ;
  1144. if (vifi >= V_numvifs) /* vif not found, drop packet */
  1145. goto non_fatal;
  1146. /* no upcall, so make a new entry */
  1147. rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
  1148. if (rt == NULL)
  1149. goto fail;
  1150. /* Make a copy of the header to send to the user level process */
  1151. mm = m_copym(mb0, 0, hlen, M_NOWAIT);
  1152. if (mm == NULL)
  1153. goto fail1;
  1154. /*
  1155. * Send message to routing daemon to install
  1156. * a route into the kernel table
  1157. */
  1158. im = mtod(mm, struct igmpmsg *);
  1159. im->im_msgtype = IGMPMSG_NOCACHE;
  1160. im->im_mbz = 0;
  1161. im->im_vif = vifi;
  1162. MRTSTAT_INC(mrts_upcalls);
  1163. k_igmpsrc.sin_addr = ip->ip_src;
  1164. if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
  1165. CTR0(KTR_IPMF, "ip_mforward: socket queue full");
  1166. MRTSTAT_INC(mrts_upq_sockfull);
  1167. fail1:
  1168. free(rt, M_MRTABLE);
  1169. fail:
  1170. free(rte, M_MRTABLE);
  1171. m_freem(mb0);
  1172. MFC_UNLOCK();
  1173. VIF_UNLOCK();
  1174. return ENOBUFS;
  1175. }
  1176. /* insert new entry at head of hash chain */
  1177. rt->mfc_origin.s_addr = ip->ip_src.s_addr;
  1178. rt->mfc_mcastgrp.s_addr = ip->ip_dst.s_addr;
  1179. rt->mfc_expire = UPCALL_EXPIRE;
  1180. V_nexpire[hash]++;
  1181. for (i = 0; i < V_numvifs; i++) {
  1182. rt->mfc_ttls[i] = 0;
  1183. rt->mfc_flags[i] = 0;
  1184. }
  1185. rt->mfc_parent = -1;
  1186. /* clear the RP address */
  1187. rt->mfc_rp.s_addr = INADDR_ANY;
  1188. rt->mfc_bw_meter = NULL;
  1189. /* initialize pkt counters per src-grp */
  1190. rt->mfc_pkt_cnt = 0;
  1191. rt->mfc_byte_cnt = 0;
  1192. rt->mfc_wrong_if = 0;
  1193. timevalclear(&rt->mfc_last_assert);
  1194. TAILQ_INIT(&rt->mfc_stall);
  1195. rt->mfc_nstall = 0;
  1196. /* link into table */
  1197. LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash);
  1198. TAILQ_INSERT_HEAD(&rt->mfc_stall, rte, rte_link);
  1199. rt->mfc_nstall++;
  1200. } else {
  1201. /* determine if queue has overflowed */
  1202. if (rt->mfc_nstall > MAX_UPQ) {
  1203. MRTSTAT_INC(mrts_upq_ovflw);
  1204. non_fatal:
  1205. free(rte, M_MRTABLE);
  1206. m_freem(mb0);
  1207. MFC_UNLOCK();
  1208. VIF_UNLOCK();
  1209. return (0);
  1210. }
  1211. TAILQ_INSERT_TAIL(&rt->mfc_stall, rte, rte_link);
  1212. rt->mfc_nstall++;
  1213. }
  1214. rte->m = mb0;
  1215. rte->ifp = ifp;
  1216. MFC_UNLOCK();
  1217. VIF_UNLOCK();
  1218. return 0;
  1219. }
  1220. }
  1221. /*
  1222. * Clean up the cache entry if upcall is not serviced
  1223. */
  1224. static void
  1225. expire_upcalls(void *arg)
  1226. {
  1227. u_long i;
  1228. CURVNET_SET((struct vnet *) arg);
  1229. MFC_LOCK();
  1230. for (i = 0; i < mfchashsize; i++) {
  1231. struct mfc *rt, *nrt;
  1232. if (V_nexpire[i] == 0)
  1233. continue;
  1234. LIST_FOREACH_SAFE(rt, &V_mfchashtbl[i], mfc_hash, nrt) {
  1235. if (TAILQ_EMPTY(&rt->mfc_stall))
  1236. continue;
  1237. if (rt->mfc_expire == 0 || --rt->mfc_expire > 0)
  1238. continue;
  1239. /*
  1240. * free the bw_meter entries
  1241. */
  1242. while (rt->mfc_bw_meter != NULL) {
  1243. struct bw_meter *x = rt->mfc_bw_meter;
  1244. rt->mfc_bw_meter = x->bm_mfc_next;
  1245. free(x, M_BWMETER);
  1246. }
  1247. MRTSTAT_INC(mrts_cache_cleanups);
  1248. CTR3(KTR_IPMF, "%s: expire (%lx, %lx)", __func__,
  1249. (u_long)ntohl(rt->mfc_origin.s_addr),
  1250. (u_long)ntohl(rt->mfc_mcastgrp.s_addr));
  1251. expire_mfc(rt);
  1252. }
  1253. }
  1254. MFC_UNLOCK();
  1255. callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls,
  1256. curvnet);
  1257. CURVNET_RESTORE();
  1258. }
  1259. /*
  1260. * Packet forwarding routine once entry in the cache is made
  1261. */
  1262. static int
  1263. ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
  1264. {
  1265. struct ip *ip = mtod(m, struct ip *);
  1266. vifi_t vifi;
  1267. int plen = ntohs(ip->ip_len);
  1268. VIF_LOCK_ASSERT();
  1269. /*
  1270. * If xmt_vif is not -1, send on only the requested vif.
  1271. *
  1272. * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.)
  1273. */
  1274. if (xmt_vif < V_numvifs) {
  1275. if (V_viftable[xmt_vif].v_flags & VIFF_REGISTER)
  1276. pim_register_send(ip, V_viftable + xmt_vif, m, rt);
  1277. else
  1278. phyint_send(ip, V_viftable + xmt_vif, m);
  1279. return 1;
  1280. }
  1281. /*
  1282. * Don't forward if it didn't arrive from the parent vif for its origin.
  1283. */
  1284. vifi = rt->mfc_parent;
  1285. if ((vifi >= V_numvifs) || (V_viftable[vifi].v_ifp != ifp)) {
  1286. CTR4(KTR_IPMF, "%s: rx on wrong ifp %p (vifi %d, v_ifp %p)",
  1287. __func__, ifp, (int)vifi, V_viftable[vifi].v_ifp);
  1288. MRTSTAT_INC(mrts_wrong_if);
  1289. ++rt->mfc_wrong_if;
  1290. /*
  1291. * If we are doing PIM assert processing, send a message
  1292. * to the routing daemon.
  1293. *
  1294. * XXX: A PIM-SM router needs the WRONGVIF detection so it
  1295. * can complete the SPT switch, regardless of the type
  1296. * of the iif (broadcast media, GRE tunnel, etc).
  1297. */
  1298. if (V_pim_assert_enabled && (vifi < V_numvifs) &&
  1299. V_viftable[vifi].v_ifp) {
  1300. if (ifp == &V_multicast_register_if)
  1301. PIMSTAT_INC(pims_rcv_registers_wrongiif);
  1302. /* Get vifi for the incoming packet */
  1303. for (vifi = 0; vifi < V_numvifs && V_viftable[vifi].v_ifp != ifp;
  1304. vifi++)
  1305. ;
  1306. if (vifi >= V_numvifs)
  1307. return 0; /* The iif is not found: ignore the packet. */
  1308. if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_DISABLE_WRONGVIF)
  1309. return 0; /* WRONGVIF disabled: ignore the packet */
  1310. if (ratecheck(&rt->mfc_last_assert, &pim_assert_interval)) {
  1311. struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
  1312. struct igmpmsg *im;
  1313. int hlen = ip->ip_hl << 2;
  1314. struct mbuf *mm = m_copym(m, 0, hlen, M_NOWAIT);
  1315. if (mm && (!M_WRITABLE(mm) || mm->m_len < hlen))
  1316. mm = m_pullup(mm, hlen);
  1317. if (mm == NULL)
  1318. return ENOBUFS;
  1319. im = mtod(mm, struct igmpmsg *);
  1320. im->im_msgtype = IGMPMSG_WRONGVIF;
  1321. im->im_mbz = 0;
  1322. im->im_vif = vifi;
  1323. MRTSTAT_INC(mrts_upcalls);
  1324. k_igmpsrc.sin_addr = im->im_src;
  1325. if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
  1326. CTR1(KTR_IPMF, "%s: socket queue full", __func__);
  1327. MRTSTAT_INC(mrts_upq_sockfull);
  1328. return ENOBUFS;
  1329. }
  1330. }
  1331. }
  1332. return 0;
  1333. }
  1334. /* If I sourced this packet, it counts as output, else it was input. */
  1335. if (in_hosteq(ip->ip_src, V_viftable[vifi].v_lcl_addr)) {
  1336. V_viftable[vifi].v_pkt_out++;
  1337. V_viftable[vifi].v_bytes_out += plen;
  1338. } else {
  1339. V_viftable[vifi].v_pkt_in++;
  1340. V_viftable[vifi].v_bytes_in += plen;
  1341. }
  1342. rt->mfc_pkt_cnt++;
  1343. rt->mfc_byte_cnt += plen;
  1344. /*
  1345. * For each vif, decide if a copy of the packet should be forwarded.
  1346. * Forward if:
  1347. * - the ttl exceeds the vif's threshold
  1348. * - there are group members downstream on interface
  1349. */
  1350. for (vifi = 0; vifi < V_numvifs; vifi++)
  1351. if ((rt->mfc_ttls[vifi] > 0) && (ip->ip_ttl > rt->mfc_ttls[vifi])) {
  1352. V_viftable[vifi].v_pkt_out++;
  1353. V_viftable[vifi].v_bytes_out += plen;
  1354. if (V_viftable[vifi].v_flags & VIFF_REGISTER)
  1355. pim_register_send(ip, V_viftable + vifi, m, rt);
  1356. else
  1357. phyint_send(ip, V_viftable + vifi, m);
  1358. }
  1359. /*
  1360. * Perform upcall-related bw measuring.
  1361. */
  1362. if (rt->mfc_bw_meter != NULL) {
  1363. struct bw_meter *x;
  1364. struct timeval now;
  1365. microtime(&now);
  1366. MFC_LOCK_ASSERT();
  1367. for (x = rt->mfc_bw_meter; x != NULL; x = x->bm_mfc_next)
  1368. bw_meter_receive_packet(x, plen, &now);
  1369. }
  1370. return 0;
  1371. }
  1372. /*
  1373. * Check if a vif number is legal/ok. This is used by in_mcast.c.
  1374. */
  1375. static int
  1376. X_legal_vif_num(int vif)
  1377. {
  1378. int ret;
  1379. ret = 0;
  1380. if (vif < 0)
  1381. return (ret);
  1382. VIF_LOCK();
  1383. if (vif < V_numvifs)
  1384. ret = 1;
  1385. VIF_UNLOCK();
  1386. return (ret);
  1387. }
  1388. /*
  1389. * Return the local address used by this vif
  1390. */
  1391. static u_long
  1392. X_ip_mcast_src(int vifi)
  1393. {
  1394. in_addr_t addr;
  1395. addr = INADDR_ANY;
  1396. if (vifi < 0)
  1397. return (addr);
  1398. VIF_LOCK();
  1399. if (vifi < V_numvifs)
  1400. addr = V_viftable[vifi].v_lcl_addr.s_addr;
  1401. VIF_UNLOCK();
  1402. return (addr);
  1403. }
  1404. static void
  1405. phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
  1406. {
  1407. struct mbuf *mb_copy;
  1408. int hlen = ip->ip_hl << 2;
  1409. VIF_LOCK_ASSERT();
  1410. /*
  1411. * Make a new reference to the packet; make sure that
  1412. * the IP header is actually copied, not just referenced,
  1413. * so that ip_output() only scribbles on the copy.
  1414. */
  1415. mb_copy = m_copypacket(m, M_NOWAIT);
  1416. if (mb_copy && (!M_WRITABLE(mb_copy) || mb_copy->m_len < hlen))
  1417. mb_copy = m_pullup(mb_copy, hlen);
  1418. if (mb_copy == NULL)
  1419. return;
  1420. send_packet(vifp, mb_copy);
  1421. }
  1422. static void
  1423. send_packet(struct vif *vifp, struct mbuf *m)
  1424. {
  1425. struct ip_moptions imo;
  1426. int error __unused;
  1427. VIF_LOCK_ASSERT();
  1428. imo.imo_multicast_ifp = vifp->v_ifp;
  1429. imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1;
  1430. imo.imo_multicast_loop = 1;
  1431. imo.imo_multicast_vif = -1;
  1432. STAILQ_INIT(&imo.imo_head);
  1433. /*
  1434. * Re-entrancy should not be a problem here, because
  1435. * the packets that we send out and are looped back at us
  1436. * should get rejected because they appear to come from
  1437. * the loopback interface, thus preventing looping.
  1438. */
  1439. error = ip_output(m, NULL, NULL, IP_FORWARDING, &imo, NULL);
  1440. CTR3(KTR_IPMF, "%s: vif %td err %d", __func__,
  1441. (ptrdiff_t)(vifp - V_viftable), error);
  1442. }
  1443. /*
  1444. * Stubs for old RSVP socket shim implementation.
  1445. */
  1446. static int
  1447. X_ip_rsvp_vif(struct socket *so __unused, struct sockopt *sopt __unused)
  1448. {
  1449. return (EOPNOTSUPP);
  1450. }
  1451. static void
  1452. X_ip_rsvp_force_done(struct socket *so __unused)
  1453. {
  1454. }
  1455. static int
  1456. X_rsvp_input(struct mbuf **mp, int *offp, int proto)
  1457. {
  1458. struct mbuf *m;
  1459. m = *mp;
  1460. *mp = NULL;
  1461. if (!V_rsvp_on)
  1462. m_freem(m);
  1463. return (IPPROTO_DONE);
  1464. }
  1465. /*
  1466. * Code for bandwidth monitors
  1467. */
  1468. /*
  1469. * Define common interface for timeval-related methods
  1470. */
  1471. #define BW_TIMEVALCMP(tvp, uvp, cmp) timevalcmp((tvp), (uvp), cmp)
  1472. #define BW_TIMEVALDECR(vvp, uvp) timevalsub((vvp), (uvp))
  1473. #define BW_TIMEVALADD(vvp, uvp) timevaladd((vvp), (uvp))
  1474. static uint32_t
  1475. compute_bw_meter_flags(struct bw_upcall *req)
  1476. {
  1477. uint32_t flags = 0;
  1478. if (req->bu_flags & BW_UPCALL_UNIT_PACKETS)
  1479. flags |= BW_METER_UNIT_PACKETS;
  1480. if (req->bu_flags & BW_UPCALL_UNIT_BYTES)
  1481. flags |= BW_METER_UNIT_BYTES;
  1482. if (req->bu_flags & BW_UPCALL_GEQ)
  1483. flags |= BW_METER_GEQ;
  1484. if (req->bu_flags & BW_UPCALL_LEQ)
  1485. flags |= BW_METER_LEQ;
  1486. return flags;
  1487. }
  1488. /*
  1489. * Add a bw_meter entry
  1490. */
  1491. static int
  1492. add_bw_upcall(struct bw_upcall *req)
  1493. {
  1494. struct mfc *mfc;
  1495. struct timeval delta = { BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC,
  1496. BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC };
  1497. struct timeval now;
  1498. struct bw_meter *x;
  1499. uint32_t flags;
  1500. if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL))
  1501. return EOPNOTSUPP;
  1502. /* Test if the flags are valid */
  1503. if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES)))
  1504. return EINVAL;
  1505. if (!(req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ)))
  1506. return EINVAL;
  1507. if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
  1508. == (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
  1509. return EINVAL;
  1510. /* Test if the threshold time interval is valid */
  1511. if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <))
  1512. return EINVAL;
  1513. flags = compute_bw_meter_flags(req);
  1514. /*
  1515. * Find if we have already same bw_meter entry
  1516. */
  1517. MFC_LOCK();
  1518. mfc = mfc_find(&req->bu_src, &req->bu_dst);
  1519. if (mfc == NULL) {
  1520. MFC_UNLOCK();
  1521. return EADDRNOTAVAIL;
  1522. }
  1523. for (x = mfc->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) {
  1524. if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
  1525. &req->bu_threshold.b_time, ==)) &&
  1526. (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
  1527. (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
  1528. (x->bm_flags & BW_METER_USER_FLAGS) == flags) {
  1529. MFC_UNLOCK();
  1530. return 0; /* XXX Already installed */
  1531. }
  1532. }
  1533. /* Allocate the new bw_meter entry */
  1534. x = (struct bw_meter *)malloc(sizeof(*x), M_BWMETER, M_NOWAIT);
  1535. if (x == NULL) {
  1536. MFC_UNLOCK();
  1537. return ENOBUFS;
  1538. }
  1539. /* Set the new bw_meter entry */
  1540. x->bm_threshold.b_time = req->bu_threshold.b_time;
  1541. microtime(&now);
  1542. x->bm_start_time = now;
  1543. x->bm_threshold.b_packets = req->bu_threshold.b_packets;
  1544. x->bm_threshold.b_bytes = req->bu_threshold.b_bytes;
  1545. x->bm_measured.b_packets = 0;
  1546. x->bm_measured.b_bytes = 0;
  1547. x->bm_flags = flags;
  1548. x->bm_time_next = NULL;
  1549. x->bm_time_hash = BW_METER_BUCKETS;
  1550. /* Add the new bw_meter entry to the front of entries for this MFC */
  1551. x->bm_mfc = mfc;
  1552. x->bm_mfc_next = mfc->mfc_bw_meter;
  1553. mfc->mfc_bw_meter = x;
  1554. schedule_bw_meter(x, &now);
  1555. MFC_UNLOCK();
  1556. return 0;
  1557. }
  1558. static void
  1559. free_bw_list(struct bw_meter *list)
  1560. {
  1561. while (list != NULL) {
  1562. struct bw_meter *x = list;
  1563. list = list->bm_mfc_next;
  1564. unschedule_bw_meter(x);
  1565. free(x, M_BWMETER);
  1566. }
  1567. }
  1568. /*
  1569. * Delete one or multiple bw_meter entries
  1570. */
  1571. static int
  1572. del_bw_upcall(struct bw_upcall *req)
  1573. {
  1574. struct mfc *mfc;
  1575. struct bw_meter *x;
  1576. if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL))
  1577. return EOPNOTSUPP;
  1578. MFC_LOCK();
  1579. /* Find the corresponding MFC entry */
  1580. mfc = mfc_find(&req->bu_src, &req->bu_dst);
  1581. if (mfc == NULL) {
  1582. MFC_UNLOCK();
  1583. return EADDRNOTAVAIL;
  1584. } else if (req->bu_flags & BW_UPCALL_DELETE_ALL) {
  1585. /*
  1586. * Delete all bw_meter entries for this mfc
  1587. */
  1588. struct bw_meter *list;
  1589. list = mfc->mfc_bw_meter;
  1590. mfc->mfc_bw_meter = NULL;
  1591. free_bw_list(list);
  1592. MFC_UNLOCK();
  1593. return 0;
  1594. } else { /* Delete a single bw_meter entry */
  1595. struct bw_meter *prev;
  1596. uint32_t flags = 0;
  1597. flags = compute_bw_meter_flags(req);
  1598. /* Find the bw_meter entry to delete */
  1599. for (prev = NULL, x = mfc->mfc_bw_meter; x != NULL;
  1600. prev = x, x = x->bm_mfc_next) {
  1601. if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
  1602. &req->bu_threshold.b_time, ==)) &&
  1603. (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
  1604. (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
  1605. (x->bm_flags & BW_METER_USER_FLAGS) == flags)
  1606. break;
  1607. }
  1608. if (x != NULL) { /* Delete entry from the list for this MFC */
  1609. if (prev != NULL)
  1610. prev->bm_mfc_next = x->bm_mfc_next; /* remove from middle*/
  1611. else
  1612. x->bm_mfc->mfc_bw_meter = x->bm_mfc_next;/* new head of list */
  1613. unschedule_bw_meter(x);
  1614. MFC_UNLOCK();
  1615. /* Free the bw_meter entry */
  1616. free(x, M_BWMETER);
  1617. return 0;
  1618. } else {
  1619. MFC_UNLOCK();
  1620. return EINVAL;
  1621. }
  1622. }
  1623. /* NOTREACHED */
  1624. }
  1625. /*
  1626. * Perform bandwidth measurement processing that may result in an upcall
  1627. */
  1628. static void
  1629. bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
  1630. {
  1631. struct timeval delta;
  1632. MFC_LOCK_ASSERT();
  1633. delta = *nowp;
  1634. BW_TIMEVALDECR(&delta, &x->bm_start_time);
  1635. if (x->bm_flags & BW_METER_GEQ) {
  1636. /*
  1637. * Processing for ">=" type of bw_meter entry
  1638. */
  1639. if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
  1640. /* Reset the bw_meter entry */
  1641. x->bm_start_time = *nowp;
  1642. x->bm_measured.b_packets = 0;
  1643. x->bm_measured.b_bytes = 0;
  1644. x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
  1645. }
  1646. /* Record that a packet is received */
  1647. x->bm_measured.b_packets++;
  1648. x->bm_measured.b_bytes += plen;
  1649. /*
  1650. * Test if we should deliver an upcall
  1651. */
  1652. if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) {
  1653. if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
  1654. (x->bm_measured.b_packets >= x->bm_threshold.b_packets)) ||
  1655. ((x->bm_flags & BW_METER_UNIT_BYTES) &&
  1656. (x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) {
  1657. /* Prepare an upcall for delivery */
  1658. bw_meter_prepare_upcall(x, nowp);
  1659. x->bm_flags |= BW_METER_UPCALL_DELIVERED;
  1660. }
  1661. }
  1662. } else if (x->bm_flags & BW_METER_LEQ) {
  1663. /*
  1664. * Processing for "<=" type of bw_meter entry
  1665. */
  1666. if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
  1667. /*
  1668. * We are behind time with the multicast forwarding table
  1669. * scanning for "<=" type of bw_meter entries, so test now
  1670. * if we should deliver an upcall.
  1671. */
  1672. if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
  1673. (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
  1674. ((x->bm_flags & BW_METER_UNIT_BYTES) &&
  1675. (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
  1676. /* Prepare an upcall for delivery */
  1677. bw_meter_prepare_upcall(x, nowp);
  1678. }
  1679. /* Reschedule the bw_meter entry */
  1680. unschedule_bw_meter(x);
  1681. schedule_bw_meter(x, nowp);
  1682. }
  1683. /* Record that a packet is received */
  1684. x->bm_measured.b_packets++;
  1685. x->bm_measured.b_bytes += plen;
  1686. /*
  1687. * Test if we should restart the measuring interval
  1688. */
  1689. if ((x->bm_flags & BW_METER_UNIT_PACKETS &&
  1690. x->bm_measured.b_packets <= x->bm_threshold.b_packets) ||
  1691. (x->bm_flags & BW_METER_UNIT_BYTES &&
  1692. x->bm_measured.b_bytes <= x->bm_threshold.b_bytes)) {
  1693. /* Don't restart the measuring interval */
  1694. } else {
  1695. /* Do restart the measuring interval */
  1696. /*
  1697. * XXX: note that we don't unschedule and schedule, because this
  1698. * might be too much overhead per packet. Instead, when we process
  1699. * all entries for a given timer hash bin, we check whether it is
  1700. * really a timeout. If not, we reschedule at that time.
  1701. */
  1702. x->bm_start_time = *nowp;
  1703. x->bm_measured.b_packets = 0;
  1704. x->bm_measured.b_bytes = 0;
  1705. x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
  1706. }
  1707. }
  1708. }
  1709. /*
  1710. * Prepare a bandwidth-related upcall
  1711. */
  1712. static void
  1713. bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp)
  1714. {
  1715. struct timeval delta;
  1716. struct bw_upcall *u;
  1717. MFC_LOCK_ASSERT();
  1718. /*
  1719. * Compute the measured time interval
  1720. */
  1721. delta = *nowp;
  1722. BW_TIMEVALDECR(&delta, &x->bm_start_time);
  1723. /*
  1724. * If there are too many pending upcalls, deliver them now
  1725. */
  1726. if (V_bw_upcalls_n >= BW_UPCALLS_MAX)
  1727. bw_upcalls_send();
  1728. /*
  1729. * Set the bw_upcall entry
  1730. */
  1731. u = &V_bw_upcalls[V_bw_upcalls_n++];
  1732. u->bu_src = x->bm_mfc->mfc_origin;
  1733. u->bu_dst = x->bm_mfc->mfc_mcastgrp;
  1734. u->bu_threshold.b_time = x->bm_threshold.b_time;
  1735. u->bu_threshold.b_packets = x->bm_threshold.b_packets;
  1736. u->bu_threshold.b_bytes = x->bm_threshold.b_bytes;
  1737. u->bu_measured.b_time = delta;
  1738. u->bu_measured.b_packets = x->bm_measured.b_packets;
  1739. u->bu_measured.b_bytes = x->bm_measured.b_bytes;
  1740. u->bu_flags = 0;
  1741. if (x->bm_flags & BW_METER_UNIT_PACKETS)
  1742. u->bu_flags |= BW_UPCALL_UNIT_PACKETS;
  1743. if (x->bm_flags & BW_METER_UNIT_BYTES)
  1744. u->bu_flags |= BW_UPCALL_UNIT_BYTES;
  1745. if (x->bm_flags & BW_METER_GEQ)
  1746. u->bu_flags |= BW_UPCALL_GEQ;
  1747. if (x->bm_flags & BW_METER_LEQ)
  1748. u->bu_flags |= BW_UPCALL_LEQ;
  1749. }
  1750. /*
  1751. * Send the pending bandwidth-related upcalls
  1752. */
  1753. static void
  1754. bw_upcalls_send(void)
  1755. {
  1756. struct mbuf *m;
  1757. int len = V_bw_upcalls_n * sizeof(V_bw_upcalls[0]);
  1758. struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
  1759. static struct igmpmsg igmpmsg = { 0, /* unused1 */
  1760. 0, /* unused2 */
  1761. IGMPMSG_BW_UPCALL,/* im_msgtype */
  1762. 0, /* im_mbz */
  1763. 0, /* im_vif */
  1764. 0, /* unused3 */
  1765. { 0 }, /* im_src */
  1766. { 0 } }; /* im_dst */
  1767. MFC_LOCK_ASSERT();
  1768. if (V_bw_upcalls_n == 0)
  1769. return; /* No pending upcalls */
  1770. V_bw_upcalls_n = 0;
  1771. /*
  1772. * Allocate a new mbuf, initialize it with the header and
  1773. * the payload for the pending calls.
  1774. */
  1775. m = m_gethdr(M_NOWAIT, MT_DATA);
  1776. if (m == NULL) {
  1777. log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n");
  1778. return;
  1779. }
  1780. m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg);
  1781. m_copyback(m, sizeof(struct igmpmsg), len, (caddr_t)&V_bw_upcalls[0]);
  1782. /*
  1783. * Send the upcalls
  1784. * XXX do we need to set the address in k_igmpsrc ?
  1785. */
  1786. MRTSTAT_INC(mrts_upcalls);
  1787. if (socket_send(V_ip_mrouter, m, &k_igmpsrc) < 0) {
  1788. log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n");
  1789. MRTSTAT_INC(mrts_upq_sockfull);
  1790. }
  1791. }
  1792. /*
  1793. * Compute the timeout hash value for the bw_meter entries
  1794. */
  1795. #define BW_METER_TIMEHASH(bw_meter, hash) \
  1796. do { \
  1797. struct timeval next_timeval = (bw_meter)->bm_start_time; \
  1798. \
  1799. BW_TIMEVALADD(&next_timeval, &(bw_meter)->bm_threshold.b_time); \
  1800. (hash) = next_timeval.tv_sec; \
  1801. if (next_timeval.tv_usec) \
  1802. (hash)++; /* XXX: make sure we don't timeout early */ \
  1803. (hash) %= BW_METER_BUCKETS; \
  1804. } while (0)
  1805. /*
  1806. * Schedule a timer to process periodically bw_meter entry of type "<="
  1807. * by linking the entry in the proper hash bucket.
  1808. */
  1809. static void
  1810. schedule_bw_meter(struct bw_meter *x, struct timeval *nowp)
  1811. {
  1812. int time_hash;
  1813. MFC_LOCK_ASSERT();
  1814. if (!(x->bm_flags & BW_METER_LEQ))
  1815. return; /* XXX: we schedule timers only for "<=" entries */
  1816. /*
  1817. * Reset the bw_meter entry
  1818. */
  1819. x->bm_start_time = *nowp;
  1820. x->bm_measured.b_packets = 0;
  1821. x->bm_measured.b_bytes = 0;
  1822. x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
  1823. /*
  1824. * Compute the timeout hash value and insert the entry
  1825. */
  1826. BW_METER_TIMEHASH(x, time_hash);
  1827. x->bm_time_next = V_bw_meter_timers[time_hash];
  1828. V_bw_meter_timers[time_hash] = x;
  1829. x->bm_time_hash = time_hash;
  1830. }
  1831. /*
  1832. * Unschedule the periodic timer that processes bw_meter entry of type "<="
  1833. * by removing the entry from the proper hash bucket.
  1834. */
  1835. static void
  1836. unschedule_bw_meter(struct bw_meter *x)
  1837. {
  1838. int time_hash;
  1839. struct bw_meter *prev, *tmp;
  1840. MFC_LOCK_ASSERT();
  1841. if (!(x->bm_flags & BW_METER_LEQ))
  1842. return; /* XXX: we schedule timers only for "<=" entries */
  1843. /*
  1844. * Compute the timeout hash value and delete the entry
  1845. */
  1846. time_hash = x->bm_time_hash;
  1847. if (time_hash >= BW_METER_BUCKETS)
  1848. return; /* Entry was not scheduled */
  1849. for (prev = NULL, tmp = V_bw_meter_timers[time_hash];
  1850. tmp != NULL; prev = tmp, tmp = tmp->bm_time_next)
  1851. if (tmp == x)
  1852. break;
  1853. if (tmp == NULL)
  1854. panic("unschedule_bw_meter: bw_meter entry not found");
  1855. if (prev != NULL)
  1856. prev->bm_time_next = x->bm_time_next;
  1857. else
  1858. V_bw_meter_timers[time_hash] = x->bm_time_next;
  1859. x->bm_time_next = NULL;
  1860. x->bm_time_hash = BW_METER_BUCKETS;
  1861. }
  1862. /*
  1863. * Process all "<=" type of bw_meter that should be processed now,
  1864. * and for each entry prepare an upcall if necessary. Each processed
  1865. * entry is rescheduled again for the (periodic) processing.
  1866. *
  1867. * This is run periodically (once per second normally). On each round,
  1868. * all the potentially matching entries are in the hash slot that we are
  1869. * looking at.
  1870. */
  1871. static void
  1872. bw_meter_process()
  1873. {
  1874. uint32_t loops;
  1875. int i;
  1876. struct timeval now, process_endtime;
  1877. microtime(&now);
  1878. if (V_last_tv_sec == now.tv_sec)
  1879. return; /* nothing to do */
  1880. loops = now.tv_sec - V_last_tv_sec;
  1881. V_last_tv_sec = now.tv_sec;
  1882. if (loops > BW_METER_BUCKETS)
  1883. loops = BW_METER_BUCKETS;
  1884. MFC_LOCK();
  1885. /*
  1886. * Process all bins of bw_meter entries from the one after the last
  1887. * processed to the current one. On entry, i points to the last bucket
  1888. * visited, so we need to increment i at the beginning of the loop.
  1889. */
  1890. for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) {
  1891. struct bw_meter *x, *tmp_list;
  1892. if (++i >= BW_METER_BUCKETS)
  1893. i = 0;
  1894. /* Disconnect the list of bw_meter entries from the bin */
  1895. tmp_list = V_bw_meter_timers[i];
  1896. V_bw_meter_timers[i] = NULL;
  1897. /* Process the list of bw_meter entries */
  1898. while (tmp_list != NULL) {
  1899. x = tmp_list;
  1900. tmp_list = tmp_list->bm_time_next;
  1901. /* Test if the time interval is over */
  1902. process_endtime = x->bm_start_time;
  1903. BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time);
  1904. if (BW_TIMEVALCMP(&process_endtime, &now, >)) {
  1905. /* Not yet: reschedule, but don't reset */
  1906. int time_hash;
  1907. BW_METER_TIMEHASH(x, time_hash);
  1908. if (time_hash == i && process_endtime.tv_sec == now.tv_sec) {
  1909. /*
  1910. * XXX: somehow the bin processing is a bit ahead of time.
  1911. * Put the entry in the next bin.
  1912. */
  1913. if (++time_hash >= BW_METER_BUCKETS)
  1914. time_hash = 0;
  1915. }
  1916. x->bm_time_next = V_bw_meter_timers[time_hash];
  1917. V_bw_meter_timers[time_hash] = x;
  1918. x->bm_time_hash = time_hash;
  1919. continue;
  1920. }
  1921. /*
  1922. * Test if we should deliver an upcall
  1923. */
  1924. if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
  1925. (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
  1926. ((x->bm_flags & BW_METER_UNIT_BYTES) &&
  1927. (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
  1928. /* Prepare an upcall for delivery */
  1929. bw_meter_prepare_upcall(x, &now);
  1930. }
  1931. /*
  1932. * Reschedule for next processing
  1933. */
  1934. schedule_bw_meter(x, &now);
  1935. }
  1936. }
  1937. /* Send all upcalls that are pending delivery */
  1938. bw_upcalls_send();
  1939. MFC_UNLOCK();
  1940. }
  1941. /*
  1942. * A periodic function for sending all upcalls that are pending delivery
  1943. */
  1944. static void
  1945. expire_bw_upcalls_send(void *arg)
  1946. {
  1947. CURVNET_SET((struct vnet *) arg);
  1948. MFC_LOCK();
  1949. bw_upcalls_send();
  1950. MFC_UNLOCK();
  1951. callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send,
  1952. curvnet);
  1953. CURVNET_RESTORE();
  1954. }
  1955. /*
  1956. * A periodic function for periodic scanning of the multicast forwarding
  1957. * table for processing all "<=" bw_meter entries.
  1958. */
  1959. static void
  1960. expire_bw_meter_process(void *arg)
  1961. {
  1962. CURVNET_SET((struct vnet *) arg);
  1963. if (V_mrt_api_config & MRT_MFC_BW_UPCALL)
  1964. bw_meter_process();
  1965. callout_reset(&V_bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process,
  1966. curvnet);
  1967. CURVNET_RESTORE();
  1968. }
  1969. /*
  1970. * End of bandwidth monitoring code
  1971. */
  1972. /*
  1973. * Send the packet up to the user daemon, or eventually do kernel encapsulation
  1974. *
  1975. */
  1976. static int
  1977. pim_register_send(struct ip *ip, struct vif *vifp, struct mbuf *m,
  1978. struct mfc *rt)
  1979. {
  1980. struct mbuf *mb_copy, *mm;
  1981. /*
  1982. * Do not send IGMP_WHOLEPKT notifications to userland, if the
  1983. * rendezvous point was unspecified, and we were told not to.
  1984. */
  1985. if (pim_squelch_wholepkt != 0 && (V_mrt_api_config & MRT_MFC_RP) &&
  1986. in_nullhost(rt->mfc_rp))
  1987. return 0;
  1988. mb_copy = pim_register_prepare(ip, m);
  1989. if (mb_copy == NULL)
  1990. return ENOBUFS;
  1991. /*
  1992. * Send all the fragments. Note that the mbuf for each fragment
  1993. * is freed by the sending machinery.
  1994. */
  1995. for (mm = mb_copy; mm; mm = mb_copy) {
  1996. mb_copy = mm->m_nextpkt;
  1997. mm->m_nextpkt = 0;
  1998. mm = m_pullup(mm, sizeof(struct ip));
  1999. if (mm != NULL) {
  2000. ip = mtod(mm, struct ip *);
  2001. if ((V_mrt_api_config & MRT_MFC_RP) && !in_nullhost(rt->mfc_rp)) {
  2002. pim_register_send_rp(ip, vifp, mm, rt);
  2003. } else {
  2004. pim_register_send_upcall(ip, vifp, mm, rt);
  2005. }
  2006. }
  2007. }
  2008. return 0;
  2009. }
  2010. /*
  2011. * Return a copy of the data packet that is ready for PIM Register
  2012. * encapsulation.
  2013. * XXX: Note that in the returned copy the IP header is a valid one.
  2014. */
  2015. static struct mbuf *
  2016. pim_register_prepare(struct ip *ip, struct mbuf *m)
  2017. {
  2018. struct mbuf *mb_copy = NULL;
  2019. int mtu;
  2020. /* Take care of delayed checksums */
  2021. if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
  2022. in_delayed_cksum(m);
  2023. m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
  2024. }
  2025. /*
  2026. * Copy the old packet & pullup its IP header into the
  2027. * new mbuf so we can modify it.
  2028. */
  2029. mb_copy = m_copypacket(m, M_NOWAIT);
  2030. if (mb_copy == NULL)
  2031. return NULL;
  2032. mb_copy = m_pullup(mb_copy, ip->ip_hl << 2);
  2033. if (mb_copy == NULL)
  2034. return NULL;
  2035. /* take care of the TTL */
  2036. ip = mtod(mb_copy, struct ip *);
  2037. --ip->ip_ttl;
  2038. /* Compute the MTU after the PIM Register encapsulation */
  2039. mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr);
  2040. if (ntohs(ip->ip_len) <= mtu) {
  2041. /* Turn the IP header into a valid one */
  2042. ip->ip_sum = 0;
  2043. ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
  2044. } else {
  2045. /* Fragment the packet */
  2046. mb_copy->m_pkthdr.csum_flags |= CSUM_IP;
  2047. if (ip_fragment(ip, &mb_copy, mtu, 0) != 0) {
  2048. m_freem(mb_copy);
  2049. return NULL;
  2050. }
  2051. }
  2052. return mb_copy;
  2053. }
  2054. /*
  2055. * Send an upcall with the data packet to the user-level process.
  2056. */
  2057. static int
  2058. pim_register_send_upcall(struct ip *ip, struct vif *vifp,
  2059. struct mbuf *mb_copy, struct mfc *rt)
  2060. {
  2061. struct mbuf *mb_first;
  2062. int len = ntohs(ip->ip_len);
  2063. struct igmpmsg *im;
  2064. struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
  2065. VIF_LOCK_ASSERT();
  2066. /*
  2067. * Add a new mbuf with an upcall header
  2068. */
  2069. mb_first = m_gethdr(M_NOWAIT, MT_DATA);
  2070. if (mb_first == NULL) {
  2071. m_freem(mb_copy);
  2072. return ENOBUFS;
  2073. }
  2074. mb_first->m_data += max_linkhdr;
  2075. mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg);
  2076. mb_first->m_len = sizeof(struct igmpmsg);
  2077. mb_first->m_next = mb_copy;
  2078. /* Send message to routing daemon */
  2079. im = mtod(mb_first, struct igmpmsg *);
  2080. im->im_msgtype = IGMPMSG_WHOLEPKT;
  2081. im->im_mbz = 0;
  2082. im->im_vif = vifp - V_viftable;
  2083. im->im_src = ip->ip_src;
  2084. im->im_dst = ip->ip_dst;
  2085. k_igmpsrc.sin_addr = ip->ip_src;
  2086. MRTSTAT_INC(mrts_upcalls);
  2087. if (socket_send(V_ip_mrouter, mb_first, &k_igmpsrc) < 0) {
  2088. CTR1(KTR_IPMF, "%s: socket queue full", __func__);
  2089. MRTSTAT_INC(mrts_upq_sockfull);
  2090. return ENOBUFS;
  2091. }
  2092. /* Keep statistics */
  2093. PIMSTAT_INC(pims_snd_registers_msgs);
  2094. PIMSTAT_ADD(pims_snd_registers_bytes, len);
  2095. return 0;
  2096. }
  2097. /*
  2098. * Encapsulate the data packet in PIM Register message and send it to the RP.
  2099. */
  2100. static int
  2101. pim_register_send_rp(struct ip *ip, struct vif *vifp, struct mbuf *mb_copy,
  2102. struct mfc *rt)
  2103. {
  2104. struct mbuf *mb_first;
  2105. struct ip *ip_outer;
  2106. struct pim_encap_pimhdr *pimhdr;
  2107. int len = ntohs(ip->ip_len);
  2108. vifi_t vifi = rt->mfc_parent;
  2109. VIF_LOCK_ASSERT();
  2110. if ((vifi >= V_numvifs) || in_nullhost(V_viftable[vifi].v_lcl_addr)) {
  2111. m_freem(mb_copy);
  2112. return EADDRNOTAVAIL; /* The iif vif is invalid */
  2113. }
  2114. /*
  2115. * Add a new mbuf with the encapsulating header
  2116. */
  2117. mb_first = m_gethdr(M_NOWAIT, MT_DATA);
  2118. if (mb_first == NULL) {
  2119. m_freem(mb_copy);
  2120. return ENOBUFS;
  2121. }
  2122. mb_first->m_data += max_linkhdr;
  2123. mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
  2124. mb_first->m_next = mb_copy;
  2125. mb_first->m_pkthdr.len = len + mb_first->m_len;
  2126. /*
  2127. * Fill in the encapsulating IP and PIM header
  2128. */
  2129. ip_outer = mtod(mb_first, struct ip *);
  2130. *ip_outer = pim_encap_iphdr;
  2131. ip_outer->ip_len = htons(len + sizeof(pim_encap_iphdr) +
  2132. sizeof(pim_encap_pimhdr));
  2133. ip_outer->ip_src = V_viftable[vifi].v_lcl_addr;
  2134. ip_outer->ip_dst = rt->mfc_rp;
  2135. /*
  2136. * Copy the inner header TOS to the outer header, and take care of the
  2137. * IP_DF bit.
  2138. */
  2139. ip_outer->ip_tos = ip->ip_tos;
  2140. if (ip->ip_off & htons(IP_DF))
  2141. ip_outer->ip_off |= htons(IP_DF);
  2142. ip_fillid(ip_outer);
  2143. pimhdr = (struct pim_encap_pimhdr *)((caddr_t)ip_outer
  2144. + sizeof(pim_encap_iphdr));
  2145. *pimhdr = pim_encap_pimhdr;
  2146. /* If the iif crosses a border, set the Border-bit */
  2147. if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & V_mrt_api_config)
  2148. pimhdr->flags |= htonl(PIM_BORDER_REGISTER);
  2149. mb_first->m_data += sizeof(pim_encap_iphdr);
  2150. pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr));
  2151. mb_first->m_data -= sizeof(pim_encap_iphdr);
  2152. send_packet(vifp, mb_first);
  2153. /* Keep statistics */
  2154. PIMSTAT_INC(pims_snd_registers_msgs);
  2155. PIMSTAT_ADD(pims_snd_registers_bytes, len);
  2156. return 0;
  2157. }
  2158. /*
  2159. * pim_encapcheck() is called by the encap4_input() path at runtime to
  2160. * determine if a packet is for PIM; allowing PIM to be dynamically loaded
  2161. * into the kernel.
  2162. */
  2163. static int
  2164. pim_encapcheck(const struct mbuf *m __unused, int off __unused,
  2165. int proto __unused, void *arg __unused)
  2166. {
  2167. KASSERT(proto == IPPROTO_PIM, ("not for IPPROTO_PIM"));
  2168. return (8); /* claim the datagram. */
  2169. }
  2170. /*
  2171. * PIM-SMv2 and PIM-DM messages processing.
  2172. * Receives and verifies the PIM control messages, and passes them
  2173. * up to the listening socket, using rip_input().
  2174. * The only message with special processing is the PIM_REGISTER message
  2175. * (used by PIM-SM): the PIM header is stripped off, and the inner packet
  2176. * is passed to if_simloop().
  2177. */
  2178. static int
  2179. pim_input(struct mbuf *m, int off, int proto, void *arg __unused)
  2180. {
  2181. struct ip *ip = mtod(m, struct ip *);
  2182. struct pim *pim;
  2183. int iphlen = off;
  2184. int minlen;
  2185. int datalen = ntohs(ip->ip_len) - iphlen;
  2186. int ip_tos;
  2187. /* Keep statistics */
  2188. PIMSTAT_INC(pims_rcv_total_msgs);
  2189. PIMSTAT_ADD(pims_rcv_total_bytes, datalen);
  2190. /*
  2191. * Validate lengths
  2192. */
  2193. if (datalen < PIM_MINLEN) {
  2194. PIMSTAT_INC(pims_rcv_tooshort);
  2195. CTR3(KTR_IPMF, "%s: short packet (%d) from 0x%08x",
  2196. __func__, datalen, ntohl(ip->ip_src.s_addr));
  2197. m_freem(m);
  2198. return (IPPROTO_DONE);
  2199. }
  2200. /*
  2201. * If the packet is at least as big as a REGISTER, go agead
  2202. * and grab the PIM REGISTER header size, to avoid another
  2203. * possible m_pullup() later.
  2204. *
  2205. * PIM_MINLEN == pimhdr + u_int32_t == 4 + 4 = 8
  2206. * PIM_REG_MINLEN == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28
  2207. */
  2208. minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN);
  2209. /*
  2210. * Get the IP and PIM headers in contiguous memory, and
  2211. * possibly the PIM REGISTER header.
  2212. */
  2213. if (m->m_len < minlen && (m = m_pullup(m, minlen)) == NULL) {
  2214. CTR1(KTR_IPMF, "%s: m_pullup() failed", __func__);
  2215. return (IPPROTO_DONE);
  2216. }
  2217. /* m_pullup() may have given us a new mbuf so reset ip. */
  2218. ip = mtod(m, struct ip *);
  2219. ip_tos = ip->ip_tos;
  2220. /* adjust mbuf to point to the PIM header */
  2221. m->m_data += iphlen;
  2222. m->m_len -= iphlen;
  2223. pim = mtod(m, struct pim *);
  2224. /*
  2225. * Validate checksum. If PIM REGISTER, exclude the data packet.
  2226. *
  2227. * XXX: some older PIMv2 implementations don't make this distinction,
  2228. * so for compatibility reason perform the checksum over part of the
  2229. * message, and if error, then over the whole message.
  2230. */
  2231. if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) {
  2232. /* do nothing, checksum okay */
  2233. } else if (in_cksum(m, datalen)) {
  2234. PIMSTAT_INC(pims_rcv_badsum);
  2235. CTR1(KTR_IPMF, "%s: invalid checksum", __func__);
  2236. m_freem(m);
  2237. return (IPPROTO_DONE);
  2238. }
  2239. /* PIM version check */
  2240. if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) {
  2241. PIMSTAT_INC(pims_rcv_badversion);
  2242. CTR3(KTR_IPMF, "%s: bad version %d expect %d", __func__,
  2243. (int)PIM_VT_V(pim->pim_vt), PIM_VERSION);
  2244. m_freem(m);
  2245. return (IPPROTO_DONE);
  2246. }
  2247. /* restore mbuf back to the outer IP */
  2248. m->m_data -= iphlen;
  2249. m->m_len += iphlen;
  2250. if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) {
  2251. /*
  2252. * Since this is a REGISTER, we'll make a copy of the register
  2253. * headers ip + pim + u_int32 + encap_ip, to be passed up to the
  2254. * routing daemon.
  2255. */
  2256. struct sockaddr_in dst = { sizeof(dst), AF_INET };
  2257. struct mbuf *mcp;
  2258. struct ip *encap_ip;
  2259. u_int32_t *reghdr;
  2260. struct ifnet *vifp;
  2261. VIF_LOCK();
  2262. if ((V_reg_vif_num >= V_numvifs) || (V_reg_vif_num == VIFI_INVALID)) {
  2263. VIF_UNLOCK();
  2264. CTR2(KTR_IPMF, "%s: register vif not set: %d", __func__,
  2265. (int)V_reg_vif_num);
  2266. m_freem(m);
  2267. return (IPPROTO_DONE);
  2268. }
  2269. /* XXX need refcnt? */
  2270. vifp = V_viftable[V_reg_vif_num].v_ifp;
  2271. VIF_UNLOCK();
  2272. /*
  2273. * Validate length
  2274. */
  2275. if (datalen < PIM_REG_MINLEN) {
  2276. PIMSTAT_INC(pims_rcv_tooshort);
  2277. PIMSTAT_INC(pims_rcv_badregisters);
  2278. CTR1(KTR_IPMF, "%s: register packet size too small", __func__);
  2279. m_freem(m);
  2280. return (IPPROTO_DONE);
  2281. }
  2282. reghdr = (u_int32_t *)(pim + 1);
  2283. encap_ip = (struct ip *)(reghdr + 1);
  2284. CTR3(KTR_IPMF, "%s: register: encap ip src 0x%08x len %d",
  2285. __func__, ntohl(encap_ip->ip_src.s_addr),
  2286. ntohs(encap_ip->ip_len));
  2287. /* verify the version number of the inner packet */
  2288. if (encap_ip->ip_v != IPVERSION) {
  2289. PIMSTAT_INC(pims_rcv_badregisters);
  2290. CTR1(KTR_IPMF, "%s: bad encap ip version", __func__);
  2291. m_freem(m);
  2292. return (IPPROTO_DONE);
  2293. }
  2294. /* verify the inner packet is destined to a mcast group */
  2295. if (!IN_MULTICAST(ntohl(encap_ip->ip_dst.s_addr))) {
  2296. PIMSTAT_INC(pims_rcv_badregisters);
  2297. CTR2(KTR_IPMF, "%s: bad encap ip dest 0x%08x", __func__,
  2298. ntohl(encap_ip->ip_dst.s_addr));
  2299. m_freem(m);
  2300. return (IPPROTO_DONE);
  2301. }
  2302. /* If a NULL_REGISTER, pass it to the daemon */
  2303. if ((ntohl(*reghdr) & PIM_NULL_REGISTER))
  2304. goto pim_input_to_daemon;
  2305. /*
  2306. * Copy the TOS from the outer IP header to the inner IP header.
  2307. */
  2308. if (encap_ip->ip_tos != ip_tos) {
  2309. /* Outer TOS -> inner TOS */
  2310. encap_ip->ip_tos = ip_tos;
  2311. /* Recompute the inner header checksum. Sigh... */
  2312. /* adjust mbuf to point to the inner IP header */
  2313. m->m_data += (iphlen + PIM_MINLEN);
  2314. m->m_len -= (iphlen + PIM_MINLEN);
  2315. encap_ip->ip_sum = 0;
  2316. encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2);
  2317. /* restore mbuf to point back to the outer IP header */
  2318. m->m_data -= (iphlen + PIM_MINLEN);
  2319. m->m_len += (iphlen + PIM_MINLEN);
  2320. }
  2321. /*
  2322. * Decapsulate the inner IP packet and loopback to forward it
  2323. * as a normal multicast packet. Also, make a copy of the
  2324. * outer_iphdr + pimhdr + reghdr + encap_iphdr
  2325. * to pass to the daemon later, so it can take the appropriate
  2326. * actions (e.g., send back PIM_REGISTER_STOP).
  2327. * XXX: here m->m_data points to the outer IP header.
  2328. */
  2329. mcp = m_copym(m, 0, iphlen + PIM_REG_MINLEN, M_NOWAIT);
  2330. if (mcp == NULL) {
  2331. CTR1(KTR_IPMF, "%s: m_copym() failed", __func__);
  2332. m_freem(m);
  2333. return (IPPROTO_DONE);
  2334. }
  2335. /* Keep statistics */
  2336. /* XXX: registers_bytes include only the encap. mcast pkt */
  2337. PIMSTAT_INC(pims_rcv_registers_msgs);
  2338. PIMSTAT_ADD(pims_rcv_registers_bytes, ntohs(encap_ip->ip_len));
  2339. /*
  2340. * forward the inner ip packet; point m_data at the inner ip.
  2341. */
  2342. m_adj(m, iphlen + PIM_MINLEN);
  2343. CTR4(KTR_IPMF,
  2344. "%s: forward decap'd REGISTER: src %lx dst %lx vif %d",
  2345. __func__,
  2346. (u_long)ntohl(encap_ip->ip_src.s_addr),
  2347. (u_long)ntohl(encap_ip->ip_dst.s_addr),
  2348. (int)V_reg_vif_num);
  2349. /* NB: vifp was collected above; can it change on us? */
  2350. if_simloop(vifp, m, dst.sin_family, 0);
  2351. /* prepare the register head to send to the mrouting daemon */
  2352. m = mcp;
  2353. }
  2354. pim_input_to_daemon:
  2355. /*
  2356. * Pass the PIM message up to the daemon; if it is a Register message,
  2357. * pass the 'head' only up to the daemon. This includes the
  2358. * outer IP header, PIM header, PIM-Register header and the
  2359. * inner IP header.
  2360. * XXX: the outer IP header pkt size of a Register is not adjust to
  2361. * reflect the fact that the inner multicast data is truncated.
  2362. */
  2363. return (rip_input(&m, &off, proto));
  2364. }
  2365. static int
  2366. sysctl_mfctable(SYSCTL_HANDLER_ARGS)
  2367. {
  2368. struct mfc *rt;
  2369. int error, i;
  2370. if (req->newptr)
  2371. return (EPERM);
  2372. if (V_mfchashtbl == NULL) /* XXX unlocked */
  2373. return (0);
  2374. error = sysctl_wire_old_buffer(req, 0);
  2375. if (error)
  2376. return (error);
  2377. MFC_LOCK();
  2378. for (i = 0; i < mfchashsize; i++) {
  2379. LIST_FOREACH(rt, &V_mfchashtbl[i], mfc_hash) {
  2380. error = SYSCTL_OUT(req, rt, sizeof(struct mfc));
  2381. if (error)
  2382. goto out_locked;
  2383. }
  2384. }
  2385. out_locked:
  2386. MFC_UNLOCK();
  2387. return (error);
  2388. }
  2389. static SYSCTL_NODE(_net_inet_ip, OID_AUTO, mfctable,
  2390. CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mfctable,
  2391. "IPv4 Multicast Forwarding Table "
  2392. "(struct *mfc[mfchashsize], netinet/ip_mroute.h)");
  2393. static int
  2394. sysctl_viflist(SYSCTL_HANDLER_ARGS)
  2395. {
  2396. int error;
  2397. if (req->newptr)
  2398. return (EPERM);
  2399. if (V_viftable == NULL) /* XXX unlocked */
  2400. return (0);
  2401. error = sysctl_wire_old_buffer(req, sizeof(*V_viftable) * MAXVIFS);
  2402. if (error)
  2403. return (error);
  2404. VIF_LOCK();
  2405. error = SYSCTL_OUT(req, V_viftable, sizeof(*V_viftable) * MAXVIFS);
  2406. VIF_UNLOCK();
  2407. return (error);
  2408. }
  2409. SYSCTL_PROC(_net_inet_ip, OID_AUTO, viftable,
  2410. CTLTYPE_OPAQUE | CTLFLAG_VNET | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
  2411. sysctl_viflist, "S,vif[MAXVIFS]",
  2412. "IPv4 Multicast Interfaces (struct vif[MAXVIFS], netinet/ip_mroute.h)");
  2413. static void
  2414. vnet_mroute_init(const void *unused __unused)
  2415. {
  2416. V_nexpire = malloc(mfchashsize, M_MRTABLE, M_WAITOK|M_ZERO);
  2417. V_viftable = mallocarray(MAXVIFS, sizeof(*V_viftable),
  2418. M_MRTABLE, M_WAITOK|M_ZERO);
  2419. V_bw_meter_timers = mallocarray(BW_METER_BUCKETS,
  2420. sizeof(*V_bw_meter_timers), M_MRTABLE, M_WAITOK|M_ZERO);
  2421. V_bw_upcalls = mallocarray(BW_UPCALLS_MAX, sizeof(*V_bw_upcalls),
  2422. M_MRTABLE, M_WAITOK|M_ZERO);
  2423. callout_init(&V_expire_upcalls_ch, 1);
  2424. callout_init(&V_bw_upcalls_ch, 1);
  2425. callout_init(&V_bw_meter_ch, 1);
  2426. }
  2427. VNET_SYSINIT(vnet_mroute_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mroute_init,
  2428. NULL);
  2429. static void
  2430. vnet_mroute_uninit(const void *unused __unused)
  2431. {
  2432. free(V_bw_upcalls, M_MRTABLE);
  2433. free(V_bw_meter_timers, M_MRTABLE);
  2434. free(V_viftable, M_MRTABLE);
  2435. free(V_nexpire, M_MRTABLE);
  2436. V_nexpire = NULL;
  2437. }
  2438. VNET_SYSUNINIT(vnet_mroute_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE,
  2439. vnet_mroute_uninit, NULL);
  2440. static int
  2441. ip_mroute_modevent(module_t mod, int type, void *unused)
  2442. {
  2443. switch (type) {
  2444. case MOD_LOAD:
  2445. MROUTER_LOCK_INIT();
  2446. if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
  2447. if_detached_event, NULL, EVENTHANDLER_PRI_ANY);
  2448. if (if_detach_event_tag == NULL) {
  2449. printf("ip_mroute: unable to register "
  2450. "ifnet_departure_event handler\n");
  2451. MROUTER_LOCK_DESTROY();
  2452. return (EINVAL);
  2453. }
  2454. MFC_LOCK_INIT();
  2455. VIF_LOCK_INIT();
  2456. mfchashsize = MFCHASHSIZE;
  2457. if (TUNABLE_ULONG_FETCH("net.inet.ip.mfchashsize", &mfchashsize) &&
  2458. !powerof2(mfchashsize)) {
  2459. printf("WARNING: %s not a power of 2; using default\n",
  2460. "net.inet.ip.mfchashsize");
  2461. mfchashsize = MFCHASHSIZE;
  2462. }
  2463. pim_squelch_wholepkt = 0;
  2464. TUNABLE_ULONG_FETCH("net.inet.pim.squelch_wholepkt",
  2465. &pim_squelch_wholepkt);
  2466. pim_encap_cookie = ip_encap_attach(&ipv4_encap_cfg, NULL, M_WAITOK);
  2467. if (pim_encap_cookie == NULL) {
  2468. printf("ip_mroute: unable to attach pim encap\n");
  2469. VIF_LOCK_DESTROY();
  2470. MFC_LOCK_DESTROY();
  2471. MROUTER_LOCK_DESTROY();
  2472. return (EINVAL);
  2473. }
  2474. ip_mcast_src = X_ip_mcast_src;
  2475. ip_mforward = X_ip_mforward;
  2476. ip_mrouter_done = X_ip_mrouter_done;
  2477. ip_mrouter_get = X_ip_mrouter_get;
  2478. ip_mrouter_set = X_ip_mrouter_set;
  2479. ip_rsvp_force_done = X_ip_rsvp_force_done;
  2480. ip_rsvp_vif = X_ip_rsvp_vif;
  2481. legal_vif_num = X_legal_vif_num;
  2482. mrt_ioctl = X_mrt_ioctl;
  2483. rsvp_input_p = X_rsvp_input;
  2484. break;
  2485. case MOD_UNLOAD:
  2486. /*
  2487. * Typically module unload happens after the user-level
  2488. * process has shutdown the kernel services (the check
  2489. * below insures someone can't just yank the module out
  2490. * from under a running process). But if the module is
  2491. * just loaded and then unloaded w/o starting up a user
  2492. * process we still need to cleanup.
  2493. */
  2494. MROUTER_LOCK();
  2495. if (ip_mrouter_cnt != 0) {
  2496. MROUTER_UNLOCK();
  2497. return (EINVAL);
  2498. }
  2499. ip_mrouter_unloading = 1;
  2500. MROUTER_UNLOCK();
  2501. EVENTHANDLER_DEREGISTER(ifnet_departure_event, if_detach_event_tag);
  2502. if (pim_encap_cookie) {
  2503. ip_encap_detach(pim_encap_cookie);
  2504. pim_encap_cookie = NULL;
  2505. }
  2506. ip_mcast_src = NULL;
  2507. ip_mforward = NULL;
  2508. ip_mrouter_done = NULL;
  2509. ip_mrouter_get = NULL;
  2510. ip_mrouter_set = NULL;
  2511. ip_rsvp_force_done = NULL;
  2512. ip_rsvp_vif = NULL;
  2513. legal_vif_num = NULL;
  2514. mrt_ioctl = NULL;
  2515. rsvp_input_p = NULL;
  2516. VIF_LOCK_DESTROY();
  2517. MFC_LOCK_DESTROY();
  2518. MROUTER_LOCK_DESTROY();
  2519. break;
  2520. default:
  2521. return EOPNOTSUPP;
  2522. }
  2523. return 0;
  2524. }
  2525. static moduledata_t ip_mroutemod = {
  2526. "ip_mroute",
  2527. ip_mroute_modevent,
  2528. 0
  2529. };
  2530. DECLARE_MODULE(ip_mroute, ip_mroutemod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE);