if_vxlan.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694
  1. /*-
  2. * Copyright (c) 2014, Bryan Venteicher <bryanv@FreeBSD.org>
  3. * All rights reserved.
  4. * Copyright (c) 2020, Chelsio Communications.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice unmodified, this list of conditions, and the following
  11. * disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  17. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  19. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  22. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  23. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #include "opt_inet.h"
  28. #include "opt_inet6.h"
  29. #include <sys/param.h>
  30. #include <sys/eventhandler.h>
  31. #include <sys/kernel.h>
  32. #include <sys/lock.h>
  33. #include <sys/hash.h>
  34. #include <sys/malloc.h>
  35. #include <sys/mbuf.h>
  36. #include <sys/module.h>
  37. #include <sys/refcount.h>
  38. #include <sys/rmlock.h>
  39. #include <sys/priv.h>
  40. #include <sys/proc.h>
  41. #include <sys/queue.h>
  42. #include <sys/sbuf.h>
  43. #include <sys/socket.h>
  44. #include <sys/socketvar.h>
  45. #include <sys/sockio.h>
  46. #include <sys/sysctl.h>
  47. #include <sys/systm.h>
  48. #include <net/bpf.h>
  49. #include <net/ethernet.h>
  50. #include <net/if.h>
  51. #include <net/if_var.h>
  52. #include <net/if_private.h>
  53. #include <net/if_clone.h>
  54. #include <net/if_dl.h>
  55. #include <net/if_media.h>
  56. #include <net/if_types.h>
  57. #include <net/if_vxlan.h>
  58. #include <net/netisr.h>
  59. #include <net/route.h>
  60. #include <net/route/nhop.h>
  61. #include <netinet/in.h>
  62. #include <netinet/in_systm.h>
  63. #include <netinet/in_var.h>
  64. #include <netinet/in_pcb.h>
  65. #include <netinet/ip.h>
  66. #include <netinet/ip6.h>
  67. #include <netinet/ip_var.h>
  68. #include <netinet/udp.h>
  69. #include <netinet/udp_var.h>
  70. #include <netinet/in_fib.h>
  71. #include <netinet6/in6_fib.h>
  72. #include <netinet6/ip6_var.h>
  73. #include <netinet6/scope6_var.h>
  74. struct vxlan_softc;
  75. LIST_HEAD(vxlan_softc_head, vxlan_softc);
  76. struct sx vxlan_sx;
  77. SX_SYSINIT(vxlan, &vxlan_sx, "VXLAN global start/stop lock");
  78. struct vxlan_socket_mc_info {
  79. union vxlan_sockaddr vxlsomc_saddr;
  80. union vxlan_sockaddr vxlsomc_gaddr;
  81. int vxlsomc_ifidx;
  82. int vxlsomc_users;
  83. };
  84. /*
  85. * The maximum MTU of encapsulated ethernet frame within IPv4/UDP packet.
  86. */
  87. #define VXLAN_MAX_MTU (IP_MAXPACKET - \
  88. 60 /* Maximum IPv4 header len */ - \
  89. sizeof(struct udphdr) - \
  90. sizeof(struct vxlan_header) - \
  91. ETHER_HDR_LEN - ETHER_VLAN_ENCAP_LEN)
  92. #define VXLAN_BASIC_IFCAPS (IFCAP_LINKSTATE | IFCAP_JUMBO_MTU)
  93. #define VXLAN_SO_MC_MAX_GROUPS 32
  94. #define VXLAN_SO_VNI_HASH_SHIFT 6
  95. #define VXLAN_SO_VNI_HASH_SIZE (1 << VXLAN_SO_VNI_HASH_SHIFT)
  96. #define VXLAN_SO_VNI_HASH(_vni) ((_vni) % VXLAN_SO_VNI_HASH_SIZE)
  97. struct vxlan_socket {
  98. struct socket *vxlso_sock;
  99. struct rmlock vxlso_lock;
  100. u_int vxlso_refcnt;
  101. union vxlan_sockaddr vxlso_laddr;
  102. LIST_ENTRY(vxlan_socket) vxlso_entry;
  103. struct vxlan_softc_head vxlso_vni_hash[VXLAN_SO_VNI_HASH_SIZE];
  104. struct vxlan_socket_mc_info vxlso_mc[VXLAN_SO_MC_MAX_GROUPS];
  105. };
  106. #define VXLAN_SO_RLOCK(_vso, _p) rm_rlock(&(_vso)->vxlso_lock, (_p))
  107. #define VXLAN_SO_RUNLOCK(_vso, _p) rm_runlock(&(_vso)->vxlso_lock, (_p))
  108. #define VXLAN_SO_WLOCK(_vso) rm_wlock(&(_vso)->vxlso_lock)
  109. #define VXLAN_SO_WUNLOCK(_vso) rm_wunlock(&(_vso)->vxlso_lock)
  110. #define VXLAN_SO_LOCK_ASSERT(_vso) \
  111. rm_assert(&(_vso)->vxlso_lock, RA_LOCKED)
  112. #define VXLAN_SO_LOCK_WASSERT(_vso) \
  113. rm_assert(&(_vso)->vxlso_lock, RA_WLOCKED)
  114. #define VXLAN_SO_ACQUIRE(_vso) refcount_acquire(&(_vso)->vxlso_refcnt)
  115. #define VXLAN_SO_RELEASE(_vso) refcount_release(&(_vso)->vxlso_refcnt)
  116. struct vxlan_ftable_entry {
  117. LIST_ENTRY(vxlan_ftable_entry) vxlfe_hash;
  118. uint16_t vxlfe_flags;
  119. uint8_t vxlfe_mac[ETHER_ADDR_LEN];
  120. union vxlan_sockaddr vxlfe_raddr;
  121. time_t vxlfe_expire;
  122. };
  123. #define VXLAN_FE_FLAG_DYNAMIC 0x01
  124. #define VXLAN_FE_FLAG_STATIC 0x02
  125. #define VXLAN_FE_IS_DYNAMIC(_fe) \
  126. ((_fe)->vxlfe_flags & VXLAN_FE_FLAG_DYNAMIC)
  127. #define VXLAN_SC_FTABLE_SHIFT 9
  128. #define VXLAN_SC_FTABLE_SIZE (1 << VXLAN_SC_FTABLE_SHIFT)
  129. #define VXLAN_SC_FTABLE_MASK (VXLAN_SC_FTABLE_SIZE - 1)
  130. #define VXLAN_SC_FTABLE_HASH(_sc, _mac) \
  131. (vxlan_mac_hash(_sc, _mac) % VXLAN_SC_FTABLE_SIZE)
  132. LIST_HEAD(vxlan_ftable_head, vxlan_ftable_entry);
  133. struct vxlan_statistics {
  134. uint32_t ftable_nospace;
  135. uint32_t ftable_lock_upgrade_failed;
  136. counter_u64_t txcsum;
  137. counter_u64_t tso;
  138. counter_u64_t rxcsum;
  139. };
  140. struct vxlan_softc {
  141. struct ifnet *vxl_ifp;
  142. int vxl_reqcap;
  143. u_int vxl_fibnum;
  144. struct vxlan_socket *vxl_sock;
  145. uint32_t vxl_vni;
  146. union vxlan_sockaddr vxl_src_addr;
  147. union vxlan_sockaddr vxl_dst_addr;
  148. uint32_t vxl_flags;
  149. #define VXLAN_FLAG_INIT 0x0001
  150. #define VXLAN_FLAG_TEARDOWN 0x0002
  151. #define VXLAN_FLAG_LEARN 0x0004
  152. #define VXLAN_FLAG_USER_MTU 0x0008
  153. uint32_t vxl_port_hash_key;
  154. uint16_t vxl_min_port;
  155. uint16_t vxl_max_port;
  156. uint8_t vxl_ttl;
  157. /* Lookup table from MAC address to forwarding entry. */
  158. uint32_t vxl_ftable_cnt;
  159. uint32_t vxl_ftable_max;
  160. uint32_t vxl_ftable_timeout;
  161. uint32_t vxl_ftable_hash_key;
  162. struct vxlan_ftable_head *vxl_ftable;
  163. /* Derived from vxl_dst_addr. */
  164. struct vxlan_ftable_entry vxl_default_fe;
  165. struct ip_moptions *vxl_im4o;
  166. struct ip6_moptions *vxl_im6o;
  167. struct rmlock vxl_lock;
  168. volatile u_int vxl_refcnt;
  169. int vxl_unit;
  170. int vxl_vso_mc_index;
  171. struct vxlan_statistics vxl_stats;
  172. struct sysctl_oid *vxl_sysctl_node;
  173. struct sysctl_ctx_list vxl_sysctl_ctx;
  174. struct callout vxl_callout;
  175. struct ether_addr vxl_hwaddr;
  176. int vxl_mc_ifindex;
  177. struct ifnet *vxl_mc_ifp;
  178. struct ifmedia vxl_media;
  179. char vxl_mc_ifname[IFNAMSIZ];
  180. LIST_ENTRY(vxlan_softc) vxl_entry;
  181. LIST_ENTRY(vxlan_softc) vxl_ifdetach_list;
  182. /* For rate limiting errors on the tx fast path. */
  183. struct timeval err_time;
  184. int err_pps;
  185. };
  186. #define VXLAN_RLOCK(_sc, _p) rm_rlock(&(_sc)->vxl_lock, (_p))
  187. #define VXLAN_RUNLOCK(_sc, _p) rm_runlock(&(_sc)->vxl_lock, (_p))
  188. #define VXLAN_WLOCK(_sc) rm_wlock(&(_sc)->vxl_lock)
  189. #define VXLAN_WUNLOCK(_sc) rm_wunlock(&(_sc)->vxl_lock)
  190. #define VXLAN_LOCK_WOWNED(_sc) rm_wowned(&(_sc)->vxl_lock)
  191. #define VXLAN_LOCK_ASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_LOCKED)
  192. #define VXLAN_LOCK_WASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_WLOCKED)
  193. #define VXLAN_UNLOCK(_sc, _p) do { \
  194. if (VXLAN_LOCK_WOWNED(_sc)) \
  195. VXLAN_WUNLOCK(_sc); \
  196. else \
  197. VXLAN_RUNLOCK(_sc, _p); \
  198. } while (0)
  199. #define VXLAN_ACQUIRE(_sc) refcount_acquire(&(_sc)->vxl_refcnt)
  200. #define VXLAN_RELEASE(_sc) refcount_release(&(_sc)->vxl_refcnt)
  201. #define satoconstsin(sa) ((const struct sockaddr_in *)(sa))
  202. #define satoconstsin6(sa) ((const struct sockaddr_in6 *)(sa))
  203. struct vxlanudphdr {
  204. struct udphdr vxlh_udp;
  205. struct vxlan_header vxlh_hdr;
  206. } __packed;
  207. static int vxlan_ftable_addr_cmp(const uint8_t *, const uint8_t *);
  208. static void vxlan_ftable_init(struct vxlan_softc *);
  209. static void vxlan_ftable_fini(struct vxlan_softc *);
  210. static void vxlan_ftable_flush(struct vxlan_softc *, int);
  211. static void vxlan_ftable_expire(struct vxlan_softc *);
  212. static int vxlan_ftable_update_locked(struct vxlan_softc *,
  213. const union vxlan_sockaddr *, const uint8_t *,
  214. struct rm_priotracker *);
  215. static int vxlan_ftable_learn(struct vxlan_softc *,
  216. const struct sockaddr *, const uint8_t *);
  217. static int vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS);
  218. static struct vxlan_ftable_entry *
  219. vxlan_ftable_entry_alloc(void);
  220. static void vxlan_ftable_entry_free(struct vxlan_ftable_entry *);
  221. static void vxlan_ftable_entry_init(struct vxlan_softc *,
  222. struct vxlan_ftable_entry *, const uint8_t *,
  223. const struct sockaddr *, uint32_t);
  224. static void vxlan_ftable_entry_destroy(struct vxlan_softc *,
  225. struct vxlan_ftable_entry *);
  226. static int vxlan_ftable_entry_insert(struct vxlan_softc *,
  227. struct vxlan_ftable_entry *);
  228. static struct vxlan_ftable_entry *
  229. vxlan_ftable_entry_lookup(struct vxlan_softc *,
  230. const uint8_t *);
  231. static void vxlan_ftable_entry_dump(struct vxlan_ftable_entry *,
  232. struct sbuf *);
  233. static struct vxlan_socket *
  234. vxlan_socket_alloc(const union vxlan_sockaddr *);
  235. static void vxlan_socket_destroy(struct vxlan_socket *);
  236. static void vxlan_socket_release(struct vxlan_socket *);
  237. static struct vxlan_socket *
  238. vxlan_socket_lookup(union vxlan_sockaddr *vxlsa);
  239. static void vxlan_socket_insert(struct vxlan_socket *);
  240. static int vxlan_socket_init(struct vxlan_socket *, struct ifnet *);
  241. static int vxlan_socket_bind(struct vxlan_socket *, struct ifnet *);
  242. static int vxlan_socket_create(struct ifnet *, int,
  243. const union vxlan_sockaddr *, struct vxlan_socket **);
  244. static void vxlan_socket_ifdetach(struct vxlan_socket *,
  245. struct ifnet *, struct vxlan_softc_head *);
  246. static struct vxlan_socket *
  247. vxlan_socket_mc_lookup(const union vxlan_sockaddr *);
  248. static int vxlan_sockaddr_mc_info_match(
  249. const struct vxlan_socket_mc_info *,
  250. const union vxlan_sockaddr *,
  251. const union vxlan_sockaddr *, int);
  252. static int vxlan_socket_mc_join_group(struct vxlan_socket *,
  253. const union vxlan_sockaddr *, const union vxlan_sockaddr *,
  254. int *, union vxlan_sockaddr *);
  255. static int vxlan_socket_mc_leave_group(struct vxlan_socket *,
  256. const union vxlan_sockaddr *,
  257. const union vxlan_sockaddr *, int);
  258. static int vxlan_socket_mc_add_group(struct vxlan_socket *,
  259. const union vxlan_sockaddr *, const union vxlan_sockaddr *,
  260. int, int *);
  261. static void vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *,
  262. int);
  263. static struct vxlan_softc *
  264. vxlan_socket_lookup_softc_locked(struct vxlan_socket *,
  265. uint32_t);
  266. static struct vxlan_softc *
  267. vxlan_socket_lookup_softc(struct vxlan_socket *, uint32_t);
  268. static int vxlan_socket_insert_softc(struct vxlan_socket *,
  269. struct vxlan_softc *);
  270. static void vxlan_socket_remove_softc(struct vxlan_socket *,
  271. struct vxlan_softc *);
  272. static struct ifnet *
  273. vxlan_multicast_if_ref(struct vxlan_softc *, int);
  274. static void vxlan_free_multicast(struct vxlan_softc *);
  275. static int vxlan_setup_multicast_interface(struct vxlan_softc *);
  276. static int vxlan_setup_multicast(struct vxlan_softc *);
  277. static int vxlan_setup_socket(struct vxlan_softc *);
  278. #ifdef INET6
  279. static void vxlan_setup_zero_checksum_port(struct vxlan_softc *);
  280. #endif
  281. static void vxlan_setup_interface_hdrlen(struct vxlan_softc *);
  282. static int vxlan_valid_init_config(struct vxlan_softc *);
  283. static void vxlan_init_wait(struct vxlan_softc *);
  284. static void vxlan_init_complete(struct vxlan_softc *);
  285. static void vxlan_init(void *);
  286. static void vxlan_release(struct vxlan_softc *);
  287. static void vxlan_teardown_wait(struct vxlan_softc *);
  288. static void vxlan_teardown_complete(struct vxlan_softc *);
  289. static void vxlan_teardown_locked(struct vxlan_softc *);
  290. static void vxlan_teardown(struct vxlan_softc *);
  291. static void vxlan_ifdetach(struct vxlan_softc *, struct ifnet *,
  292. struct vxlan_softc_head *);
  293. static void vxlan_timer(void *);
  294. static int vxlan_ctrl_get_config(struct vxlan_softc *, void *);
  295. static int vxlan_ctrl_set_vni(struct vxlan_softc *, void *);
  296. static int vxlan_ctrl_set_local_addr(struct vxlan_softc *, void *);
  297. static int vxlan_ctrl_set_remote_addr(struct vxlan_softc *, void *);
  298. static int vxlan_ctrl_set_local_port(struct vxlan_softc *, void *);
  299. static int vxlan_ctrl_set_remote_port(struct vxlan_softc *, void *);
  300. static int vxlan_ctrl_set_port_range(struct vxlan_softc *, void *);
  301. static int vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *, void *);
  302. static int vxlan_ctrl_set_ftable_max(struct vxlan_softc *, void *);
  303. static int vxlan_ctrl_set_multicast_if(struct vxlan_softc * , void *);
  304. static int vxlan_ctrl_set_ttl(struct vxlan_softc *, void *);
  305. static int vxlan_ctrl_set_learn(struct vxlan_softc *, void *);
  306. static int vxlan_ctrl_ftable_entry_add(struct vxlan_softc *, void *);
  307. static int vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *, void *);
  308. static int vxlan_ctrl_flush(struct vxlan_softc *, void *);
  309. static int vxlan_ioctl_drvspec(struct vxlan_softc *,
  310. struct ifdrv *, int);
  311. static int vxlan_ioctl_ifflags(struct vxlan_softc *);
  312. static int vxlan_ioctl(struct ifnet *, u_long, caddr_t);
  313. #if defined(INET) || defined(INET6)
  314. static uint16_t vxlan_pick_source_port(struct vxlan_softc *, struct mbuf *);
  315. static void vxlan_encap_header(struct vxlan_softc *, struct mbuf *,
  316. int, uint16_t, uint16_t);
  317. #endif
  318. static int vxlan_encap4(struct vxlan_softc *,
  319. const union vxlan_sockaddr *, struct mbuf *);
  320. static int vxlan_encap6(struct vxlan_softc *,
  321. const union vxlan_sockaddr *, struct mbuf *);
  322. static int vxlan_transmit(struct ifnet *, struct mbuf *);
  323. static void vxlan_qflush(struct ifnet *);
  324. static bool vxlan_rcv_udp_packet(struct mbuf *, int, struct inpcb *,
  325. const struct sockaddr *, void *);
  326. static int vxlan_input(struct vxlan_socket *, uint32_t, struct mbuf **,
  327. const struct sockaddr *);
  328. static void vxlan_stats_alloc(struct vxlan_softc *);
  329. static void vxlan_stats_free(struct vxlan_softc *);
  330. static void vxlan_set_default_config(struct vxlan_softc *);
  331. static int vxlan_set_user_config(struct vxlan_softc *,
  332. struct ifvxlanparam *);
  333. static int vxlan_set_reqcap(struct vxlan_softc *, struct ifnet *, int);
  334. static void vxlan_set_hwcaps(struct vxlan_softc *);
  335. static int vxlan_clone_create(struct if_clone *, char *, size_t,
  336. struct ifc_data *, struct ifnet **);
  337. static int vxlan_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
  338. static uint32_t vxlan_mac_hash(struct vxlan_softc *, const uint8_t *);
  339. static int vxlan_media_change(struct ifnet *);
  340. static void vxlan_media_status(struct ifnet *, struct ifmediareq *);
  341. static int vxlan_sockaddr_cmp(const union vxlan_sockaddr *,
  342. const struct sockaddr *);
  343. static void vxlan_sockaddr_copy(union vxlan_sockaddr *,
  344. const struct sockaddr *);
  345. static int vxlan_sockaddr_in_equal(const union vxlan_sockaddr *,
  346. const struct sockaddr *);
  347. static void vxlan_sockaddr_in_copy(union vxlan_sockaddr *,
  348. const struct sockaddr *);
  349. static int vxlan_sockaddr_supported(const union vxlan_sockaddr *, int);
  350. static int vxlan_sockaddr_in_any(const union vxlan_sockaddr *);
  351. static int vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *);
  352. static int vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *);
  353. static int vxlan_can_change_config(struct vxlan_softc *);
  354. static int vxlan_check_vni(uint32_t);
  355. static int vxlan_check_ttl(int);
  356. static int vxlan_check_ftable_timeout(uint32_t);
  357. static int vxlan_check_ftable_max(uint32_t);
  358. static void vxlan_sysctl_setup(struct vxlan_softc *);
  359. static void vxlan_sysctl_destroy(struct vxlan_softc *);
  360. static int vxlan_tunable_int(struct vxlan_softc *, const char *, int);
  361. static void vxlan_ifdetach_event(void *, struct ifnet *);
  362. static void vxlan_load(void);
  363. static void vxlan_unload(void);
  364. static int vxlan_modevent(module_t, int, void *);
  365. static const char vxlan_name[] = "vxlan";
  366. static MALLOC_DEFINE(M_VXLAN, vxlan_name,
  367. "Virtual eXtensible LAN Interface");
  368. static struct if_clone *vxlan_cloner;
  369. static struct mtx vxlan_list_mtx;
  370. #define VXLAN_LIST_LOCK() mtx_lock(&vxlan_list_mtx)
  371. #define VXLAN_LIST_UNLOCK() mtx_unlock(&vxlan_list_mtx)
  372. static LIST_HEAD(, vxlan_socket) vxlan_socket_list;
  373. static eventhandler_tag vxlan_ifdetach_event_tag;
  374. SYSCTL_DECL(_net_link);
  375. SYSCTL_NODE(_net_link, OID_AUTO, vxlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
  376. "Virtual eXtensible Local Area Network");
  377. static int vxlan_legacy_port = 0;
  378. TUNABLE_INT("net.link.vxlan.legacy_port", &vxlan_legacy_port);
  379. static int vxlan_reuse_port = 0;
  380. TUNABLE_INT("net.link.vxlan.reuse_port", &vxlan_reuse_port);
  381. /*
  382. * This macro controls the default upper limitation on nesting of vxlan
  383. * tunnels. By default it is 3, as the overhead of IPv6 vxlan tunnel is 70
  384. * bytes, this will create at most 210 bytes overhead and the most inner
  385. * tunnel's MTU will be 1290 which will meet IPv6 minimum MTU size 1280.
  386. * Be careful to configure the tunnels when raising the limit. A large
  387. * number of nested tunnels can introduce system crash.
  388. */
  389. #ifndef MAX_VXLAN_NEST
  390. #define MAX_VXLAN_NEST 3
  391. #endif
  392. static int max_vxlan_nesting = MAX_VXLAN_NEST;
  393. SYSCTL_INT(_net_link_vxlan, OID_AUTO, max_nesting, CTLFLAG_RW,
  394. &max_vxlan_nesting, 0, "Max nested tunnels");
  395. /* Default maximum number of addresses in the forwarding table. */
  396. #ifndef VXLAN_FTABLE_MAX
  397. #define VXLAN_FTABLE_MAX 2000
  398. #endif
  399. /* Timeout (in seconds) of addresses learned in the forwarding table. */
  400. #ifndef VXLAN_FTABLE_TIMEOUT
  401. #define VXLAN_FTABLE_TIMEOUT (20 * 60)
  402. #endif
  403. /*
  404. * Maximum timeout (in seconds) of addresses learned in the forwarding
  405. * table.
  406. */
  407. #ifndef VXLAN_FTABLE_MAX_TIMEOUT
  408. #define VXLAN_FTABLE_MAX_TIMEOUT (60 * 60 * 24)
  409. #endif
  410. /* Number of seconds between pruning attempts of the forwarding table. */
  411. #ifndef VXLAN_FTABLE_PRUNE
  412. #define VXLAN_FTABLE_PRUNE (5 * 60)
  413. #endif
  414. static int vxlan_ftable_prune_period = VXLAN_FTABLE_PRUNE;
  415. struct vxlan_control {
  416. int (*vxlc_func)(struct vxlan_softc *, void *);
  417. int vxlc_argsize;
  418. int vxlc_flags;
  419. #define VXLAN_CTRL_FLAG_COPYIN 0x01
  420. #define VXLAN_CTRL_FLAG_COPYOUT 0x02
  421. #define VXLAN_CTRL_FLAG_SUSER 0x04
  422. };
  423. static const struct vxlan_control vxlan_control_table[] = {
  424. [VXLAN_CMD_GET_CONFIG] =
  425. { vxlan_ctrl_get_config, sizeof(struct ifvxlancfg),
  426. VXLAN_CTRL_FLAG_COPYOUT
  427. },
  428. [VXLAN_CMD_SET_VNI] =
  429. { vxlan_ctrl_set_vni, sizeof(struct ifvxlancmd),
  430. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  431. },
  432. [VXLAN_CMD_SET_LOCAL_ADDR] =
  433. { vxlan_ctrl_set_local_addr, sizeof(struct ifvxlancmd),
  434. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  435. },
  436. [VXLAN_CMD_SET_REMOTE_ADDR] =
  437. { vxlan_ctrl_set_remote_addr, sizeof(struct ifvxlancmd),
  438. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  439. },
  440. [VXLAN_CMD_SET_LOCAL_PORT] =
  441. { vxlan_ctrl_set_local_port, sizeof(struct ifvxlancmd),
  442. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  443. },
  444. [VXLAN_CMD_SET_REMOTE_PORT] =
  445. { vxlan_ctrl_set_remote_port, sizeof(struct ifvxlancmd),
  446. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  447. },
  448. [VXLAN_CMD_SET_PORT_RANGE] =
  449. { vxlan_ctrl_set_port_range, sizeof(struct ifvxlancmd),
  450. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  451. },
  452. [VXLAN_CMD_SET_FTABLE_TIMEOUT] =
  453. { vxlan_ctrl_set_ftable_timeout, sizeof(struct ifvxlancmd),
  454. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  455. },
  456. [VXLAN_CMD_SET_FTABLE_MAX] =
  457. { vxlan_ctrl_set_ftable_max, sizeof(struct ifvxlancmd),
  458. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  459. },
  460. [VXLAN_CMD_SET_MULTICAST_IF] =
  461. { vxlan_ctrl_set_multicast_if, sizeof(struct ifvxlancmd),
  462. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  463. },
  464. [VXLAN_CMD_SET_TTL] =
  465. { vxlan_ctrl_set_ttl, sizeof(struct ifvxlancmd),
  466. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  467. },
  468. [VXLAN_CMD_SET_LEARN] =
  469. { vxlan_ctrl_set_learn, sizeof(struct ifvxlancmd),
  470. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  471. },
  472. [VXLAN_CMD_FTABLE_ENTRY_ADD] =
  473. { vxlan_ctrl_ftable_entry_add, sizeof(struct ifvxlancmd),
  474. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  475. },
  476. [VXLAN_CMD_FTABLE_ENTRY_REM] =
  477. { vxlan_ctrl_ftable_entry_rem, sizeof(struct ifvxlancmd),
  478. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  479. },
  480. [VXLAN_CMD_FLUSH] =
  481. { vxlan_ctrl_flush, sizeof(struct ifvxlancmd),
  482. VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
  483. },
  484. };
  485. static const int vxlan_control_table_size = nitems(vxlan_control_table);
  486. static int
  487. vxlan_ftable_addr_cmp(const uint8_t *a, const uint8_t *b)
  488. {
  489. int i, d;
  490. for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++)
  491. d = ((int)a[i]) - ((int)b[i]);
  492. return (d);
  493. }
  494. static void
  495. vxlan_ftable_init(struct vxlan_softc *sc)
  496. {
  497. int i;
  498. sc->vxl_ftable = malloc(sizeof(struct vxlan_ftable_head) *
  499. VXLAN_SC_FTABLE_SIZE, M_VXLAN, M_ZERO | M_WAITOK);
  500. for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++)
  501. LIST_INIT(&sc->vxl_ftable[i]);
  502. sc->vxl_ftable_hash_key = arc4random();
  503. }
  504. static void
  505. vxlan_ftable_fini(struct vxlan_softc *sc)
  506. {
  507. int i;
  508. for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
  509. KASSERT(LIST_EMPTY(&sc->vxl_ftable[i]),
  510. ("%s: vxlan %p ftable[%d] not empty", __func__, sc, i));
  511. }
  512. MPASS(sc->vxl_ftable_cnt == 0);
  513. free(sc->vxl_ftable, M_VXLAN);
  514. sc->vxl_ftable = NULL;
  515. }
  516. static void
  517. vxlan_ftable_flush(struct vxlan_softc *sc, int all)
  518. {
  519. struct vxlan_ftable_entry *fe, *tfe;
  520. int i;
  521. for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
  522. LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
  523. if (all || VXLAN_FE_IS_DYNAMIC(fe))
  524. vxlan_ftable_entry_destroy(sc, fe);
  525. }
  526. }
  527. }
  528. static void
  529. vxlan_ftable_expire(struct vxlan_softc *sc)
  530. {
  531. struct vxlan_ftable_entry *fe, *tfe;
  532. int i;
  533. VXLAN_LOCK_WASSERT(sc);
  534. for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
  535. LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
  536. if (VXLAN_FE_IS_DYNAMIC(fe) &&
  537. time_uptime >= fe->vxlfe_expire)
  538. vxlan_ftable_entry_destroy(sc, fe);
  539. }
  540. }
  541. }
  542. static int
  543. vxlan_ftable_update_locked(struct vxlan_softc *sc,
  544. const union vxlan_sockaddr *vxlsa, const uint8_t *mac,
  545. struct rm_priotracker *tracker)
  546. {
  547. struct vxlan_ftable_entry *fe;
  548. int error __unused;
  549. VXLAN_LOCK_ASSERT(sc);
  550. again:
  551. /*
  552. * A forwarding entry for this MAC address might already exist. If
  553. * so, update it, otherwise create a new one. We may have to upgrade
  554. * the lock if we have to change or create an entry.
  555. */
  556. fe = vxlan_ftable_entry_lookup(sc, mac);
  557. if (fe != NULL) {
  558. fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
  559. if (!VXLAN_FE_IS_DYNAMIC(fe) ||
  560. vxlan_sockaddr_in_equal(&fe->vxlfe_raddr, &vxlsa->sa))
  561. return (0);
  562. if (!VXLAN_LOCK_WOWNED(sc)) {
  563. VXLAN_RUNLOCK(sc, tracker);
  564. VXLAN_WLOCK(sc);
  565. sc->vxl_stats.ftable_lock_upgrade_failed++;
  566. goto again;
  567. }
  568. vxlan_sockaddr_in_copy(&fe->vxlfe_raddr, &vxlsa->sa);
  569. return (0);
  570. }
  571. if (!VXLAN_LOCK_WOWNED(sc)) {
  572. VXLAN_RUNLOCK(sc, tracker);
  573. VXLAN_WLOCK(sc);
  574. sc->vxl_stats.ftable_lock_upgrade_failed++;
  575. goto again;
  576. }
  577. if (sc->vxl_ftable_cnt >= sc->vxl_ftable_max) {
  578. sc->vxl_stats.ftable_nospace++;
  579. return (ENOSPC);
  580. }
  581. fe = vxlan_ftable_entry_alloc();
  582. if (fe == NULL)
  583. return (ENOMEM);
  584. vxlan_ftable_entry_init(sc, fe, mac, &vxlsa->sa, VXLAN_FE_FLAG_DYNAMIC);
  585. /* The prior lookup failed, so the insert should not. */
  586. error = vxlan_ftable_entry_insert(sc, fe);
  587. MPASS(error == 0);
  588. return (0);
  589. }
  590. static int
  591. vxlan_ftable_learn(struct vxlan_softc *sc, const struct sockaddr *sa,
  592. const uint8_t *mac)
  593. {
  594. struct rm_priotracker tracker;
  595. union vxlan_sockaddr vxlsa;
  596. int error;
  597. /*
  598. * The source port may be randomly selected by the remote host, so
  599. * use the port of the default destination address.
  600. */
  601. vxlan_sockaddr_copy(&vxlsa, sa);
  602. vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
  603. if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
  604. error = vxlan_sockaddr_in6_embedscope(&vxlsa);
  605. if (error)
  606. return (error);
  607. }
  608. VXLAN_RLOCK(sc, &tracker);
  609. error = vxlan_ftable_update_locked(sc, &vxlsa, mac, &tracker);
  610. VXLAN_UNLOCK(sc, &tracker);
  611. return (error);
  612. }
  613. static int
  614. vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS)
  615. {
  616. struct rm_priotracker tracker;
  617. struct sbuf sb;
  618. struct vxlan_softc *sc;
  619. struct vxlan_ftable_entry *fe;
  620. size_t size;
  621. int i, error;
  622. /*
  623. * This is mostly intended for debugging during development. It is
  624. * not practical to dump an entire large table this way.
  625. */
  626. sc = arg1;
  627. size = PAGE_SIZE; /* Calculate later. */
  628. sbuf_new(&sb, NULL, size, SBUF_FIXEDLEN);
  629. sbuf_putc(&sb, '\n');
  630. VXLAN_RLOCK(sc, &tracker);
  631. for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
  632. LIST_FOREACH(fe, &sc->vxl_ftable[i], vxlfe_hash) {
  633. if (sbuf_error(&sb) != 0)
  634. break;
  635. vxlan_ftable_entry_dump(fe, &sb);
  636. }
  637. }
  638. VXLAN_RUNLOCK(sc, &tracker);
  639. if (sbuf_len(&sb) == 1)
  640. sbuf_setpos(&sb, 0);
  641. sbuf_finish(&sb);
  642. error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
  643. sbuf_delete(&sb);
  644. return (error);
  645. }
  646. static struct vxlan_ftable_entry *
  647. vxlan_ftable_entry_alloc(void)
  648. {
  649. struct vxlan_ftable_entry *fe;
  650. fe = malloc(sizeof(*fe), M_VXLAN, M_ZERO | M_NOWAIT);
  651. return (fe);
  652. }
  653. static void
  654. vxlan_ftable_entry_free(struct vxlan_ftable_entry *fe)
  655. {
  656. free(fe, M_VXLAN);
  657. }
  658. static void
  659. vxlan_ftable_entry_init(struct vxlan_softc *sc, struct vxlan_ftable_entry *fe,
  660. const uint8_t *mac, const struct sockaddr *sa, uint32_t flags)
  661. {
  662. fe->vxlfe_flags = flags;
  663. fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
  664. memcpy(fe->vxlfe_mac, mac, ETHER_ADDR_LEN);
  665. vxlan_sockaddr_copy(&fe->vxlfe_raddr, sa);
  666. }
  667. static void
  668. vxlan_ftable_entry_destroy(struct vxlan_softc *sc,
  669. struct vxlan_ftable_entry *fe)
  670. {
  671. sc->vxl_ftable_cnt--;
  672. LIST_REMOVE(fe, vxlfe_hash);
  673. vxlan_ftable_entry_free(fe);
  674. }
  675. static int
  676. vxlan_ftable_entry_insert(struct vxlan_softc *sc,
  677. struct vxlan_ftable_entry *fe)
  678. {
  679. struct vxlan_ftable_entry *lfe;
  680. uint32_t hash;
  681. int dir;
  682. VXLAN_LOCK_WASSERT(sc);
  683. hash = VXLAN_SC_FTABLE_HASH(sc, fe->vxlfe_mac);
  684. lfe = LIST_FIRST(&sc->vxl_ftable[hash]);
  685. if (lfe == NULL) {
  686. LIST_INSERT_HEAD(&sc->vxl_ftable[hash], fe, vxlfe_hash);
  687. goto out;
  688. }
  689. do {
  690. dir = vxlan_ftable_addr_cmp(fe->vxlfe_mac, lfe->vxlfe_mac);
  691. if (dir == 0)
  692. return (EEXIST);
  693. if (dir > 0) {
  694. LIST_INSERT_BEFORE(lfe, fe, vxlfe_hash);
  695. goto out;
  696. } else if (LIST_NEXT(lfe, vxlfe_hash) == NULL) {
  697. LIST_INSERT_AFTER(lfe, fe, vxlfe_hash);
  698. goto out;
  699. } else
  700. lfe = LIST_NEXT(lfe, vxlfe_hash);
  701. } while (lfe != NULL);
  702. out:
  703. sc->vxl_ftable_cnt++;
  704. return (0);
  705. }
  706. static struct vxlan_ftable_entry *
  707. vxlan_ftable_entry_lookup(struct vxlan_softc *sc, const uint8_t *mac)
  708. {
  709. struct vxlan_ftable_entry *fe;
  710. uint32_t hash;
  711. int dir;
  712. VXLAN_LOCK_ASSERT(sc);
  713. hash = VXLAN_SC_FTABLE_HASH(sc, mac);
  714. LIST_FOREACH(fe, &sc->vxl_ftable[hash], vxlfe_hash) {
  715. dir = vxlan_ftable_addr_cmp(mac, fe->vxlfe_mac);
  716. if (dir == 0)
  717. return (fe);
  718. if (dir > 0)
  719. break;
  720. }
  721. return (NULL);
  722. }
  723. static void
  724. vxlan_ftable_entry_dump(struct vxlan_ftable_entry *fe, struct sbuf *sb)
  725. {
  726. char buf[64];
  727. const union vxlan_sockaddr *sa;
  728. const void *addr;
  729. int i, len, af, width;
  730. sa = &fe->vxlfe_raddr;
  731. af = sa->sa.sa_family;
  732. len = sbuf_len(sb);
  733. sbuf_printf(sb, "%c 0x%02X ", VXLAN_FE_IS_DYNAMIC(fe) ? 'D' : 'S',
  734. fe->vxlfe_flags);
  735. for (i = 0; i < ETHER_ADDR_LEN - 1; i++)
  736. sbuf_printf(sb, "%02X:", fe->vxlfe_mac[i]);
  737. sbuf_printf(sb, "%02X ", fe->vxlfe_mac[i]);
  738. if (af == AF_INET) {
  739. addr = &sa->in4.sin_addr;
  740. width = INET_ADDRSTRLEN - 1;
  741. } else {
  742. addr = &sa->in6.sin6_addr;
  743. width = INET6_ADDRSTRLEN - 1;
  744. }
  745. inet_ntop(af, addr, buf, sizeof(buf));
  746. sbuf_printf(sb, "%*s ", width, buf);
  747. sbuf_printf(sb, "%08jd", (intmax_t)fe->vxlfe_expire);
  748. sbuf_putc(sb, '\n');
  749. /* Truncate a partial line. */
  750. if (sbuf_error(sb) != 0)
  751. sbuf_setpos(sb, len);
  752. }
  753. static struct vxlan_socket *
  754. vxlan_socket_alloc(const union vxlan_sockaddr *sa)
  755. {
  756. struct vxlan_socket *vso;
  757. int i;
  758. vso = malloc(sizeof(*vso), M_VXLAN, M_WAITOK | M_ZERO);
  759. rm_init(&vso->vxlso_lock, "vxlansorm");
  760. refcount_init(&vso->vxlso_refcnt, 0);
  761. for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++)
  762. LIST_INIT(&vso->vxlso_vni_hash[i]);
  763. vso->vxlso_laddr = *sa;
  764. return (vso);
  765. }
  766. static void
  767. vxlan_socket_destroy(struct vxlan_socket *vso)
  768. {
  769. struct socket *so;
  770. #ifdef INVARIANTS
  771. int i;
  772. struct vxlan_socket_mc_info *mc;
  773. for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
  774. mc = &vso->vxlso_mc[i];
  775. KASSERT(mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC,
  776. ("%s: socket %p mc[%d] still has address",
  777. __func__, vso, i));
  778. }
  779. for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
  780. KASSERT(LIST_EMPTY(&vso->vxlso_vni_hash[i]),
  781. ("%s: socket %p vni_hash[%d] not empty",
  782. __func__, vso, i));
  783. }
  784. #endif
  785. so = vso->vxlso_sock;
  786. if (so != NULL) {
  787. vso->vxlso_sock = NULL;
  788. soclose(so);
  789. }
  790. rm_destroy(&vso->vxlso_lock);
  791. free(vso, M_VXLAN);
  792. }
  793. static void
  794. vxlan_socket_release(struct vxlan_socket *vso)
  795. {
  796. int destroy;
  797. VXLAN_LIST_LOCK();
  798. destroy = VXLAN_SO_RELEASE(vso);
  799. if (destroy != 0)
  800. LIST_REMOVE(vso, vxlso_entry);
  801. VXLAN_LIST_UNLOCK();
  802. if (destroy != 0)
  803. vxlan_socket_destroy(vso);
  804. }
  805. static struct vxlan_socket *
  806. vxlan_socket_lookup(union vxlan_sockaddr *vxlsa)
  807. {
  808. struct vxlan_socket *vso;
  809. VXLAN_LIST_LOCK();
  810. LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry) {
  811. if (vxlan_sockaddr_cmp(&vso->vxlso_laddr, &vxlsa->sa) == 0) {
  812. VXLAN_SO_ACQUIRE(vso);
  813. break;
  814. }
  815. }
  816. VXLAN_LIST_UNLOCK();
  817. return (vso);
  818. }
  819. static void
  820. vxlan_socket_insert(struct vxlan_socket *vso)
  821. {
  822. VXLAN_LIST_LOCK();
  823. VXLAN_SO_ACQUIRE(vso);
  824. LIST_INSERT_HEAD(&vxlan_socket_list, vso, vxlso_entry);
  825. VXLAN_LIST_UNLOCK();
  826. }
  827. static int
  828. vxlan_socket_init(struct vxlan_socket *vso, struct ifnet *ifp)
  829. {
  830. struct thread *td;
  831. int error;
  832. td = curthread;
  833. error = socreate(vso->vxlso_laddr.sa.sa_family, &vso->vxlso_sock,
  834. SOCK_DGRAM, IPPROTO_UDP, td->td_ucred, td);
  835. if (error) {
  836. if_printf(ifp, "cannot create socket: %d\n", error);
  837. return (error);
  838. }
  839. error = udp_set_kernel_tunneling(vso->vxlso_sock,
  840. vxlan_rcv_udp_packet, NULL, vso);
  841. if (error) {
  842. if_printf(ifp, "cannot set tunneling function: %d\n", error);
  843. return (error);
  844. }
  845. if (vxlan_reuse_port != 0) {
  846. struct sockopt sopt;
  847. int val = 1;
  848. bzero(&sopt, sizeof(sopt));
  849. sopt.sopt_dir = SOPT_SET;
  850. sopt.sopt_level = IPPROTO_IP;
  851. sopt.sopt_name = SO_REUSEPORT;
  852. sopt.sopt_val = &val;
  853. sopt.sopt_valsize = sizeof(val);
  854. error = sosetopt(vso->vxlso_sock, &sopt);
  855. if (error) {
  856. if_printf(ifp,
  857. "cannot set REUSEADDR socket opt: %d\n", error);
  858. return (error);
  859. }
  860. }
  861. return (0);
  862. }
  863. static int
  864. vxlan_socket_bind(struct vxlan_socket *vso, struct ifnet *ifp)
  865. {
  866. union vxlan_sockaddr laddr;
  867. struct thread *td;
  868. int error;
  869. td = curthread;
  870. laddr = vso->vxlso_laddr;
  871. error = sobind(vso->vxlso_sock, &laddr.sa, td);
  872. if (error) {
  873. if (error != EADDRINUSE)
  874. if_printf(ifp, "cannot bind socket: %d\n", error);
  875. return (error);
  876. }
  877. return (0);
  878. }
  879. static int
  880. vxlan_socket_create(struct ifnet *ifp, int multicast,
  881. const union vxlan_sockaddr *saddr, struct vxlan_socket **vsop)
  882. {
  883. union vxlan_sockaddr laddr;
  884. struct vxlan_socket *vso;
  885. int error;
  886. laddr = *saddr;
  887. /*
  888. * If this socket will be multicast, then only the local port
  889. * must be specified when binding.
  890. */
  891. if (multicast != 0) {
  892. if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
  893. laddr.in4.sin_addr.s_addr = INADDR_ANY;
  894. #ifdef INET6
  895. else
  896. laddr.in6.sin6_addr = in6addr_any;
  897. #endif
  898. }
  899. vso = vxlan_socket_alloc(&laddr);
  900. if (vso == NULL)
  901. return (ENOMEM);
  902. error = vxlan_socket_init(vso, ifp);
  903. if (error)
  904. goto fail;
  905. error = vxlan_socket_bind(vso, ifp);
  906. if (error)
  907. goto fail;
  908. /*
  909. * There is a small window between the bind completing and
  910. * inserting the socket, so that a concurrent create may fail.
  911. * Let's not worry about that for now.
  912. */
  913. vxlan_socket_insert(vso);
  914. *vsop = vso;
  915. return (0);
  916. fail:
  917. vxlan_socket_destroy(vso);
  918. return (error);
  919. }
  920. static void
  921. vxlan_socket_ifdetach(struct vxlan_socket *vso, struct ifnet *ifp,
  922. struct vxlan_softc_head *list)
  923. {
  924. struct rm_priotracker tracker;
  925. struct vxlan_softc *sc;
  926. int i;
  927. VXLAN_SO_RLOCK(vso, &tracker);
  928. for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
  929. LIST_FOREACH(sc, &vso->vxlso_vni_hash[i], vxl_entry)
  930. vxlan_ifdetach(sc, ifp, list);
  931. }
  932. VXLAN_SO_RUNLOCK(vso, &tracker);
  933. }
  934. static struct vxlan_socket *
  935. vxlan_socket_mc_lookup(const union vxlan_sockaddr *vxlsa)
  936. {
  937. union vxlan_sockaddr laddr;
  938. struct vxlan_socket *vso;
  939. laddr = *vxlsa;
  940. if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
  941. laddr.in4.sin_addr.s_addr = INADDR_ANY;
  942. #ifdef INET6
  943. else
  944. laddr.in6.sin6_addr = in6addr_any;
  945. #endif
  946. vso = vxlan_socket_lookup(&laddr);
  947. return (vso);
  948. }
  949. static int
  950. vxlan_sockaddr_mc_info_match(const struct vxlan_socket_mc_info *mc,
  951. const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
  952. int ifidx)
  953. {
  954. if (!vxlan_sockaddr_in_any(local) &&
  955. !vxlan_sockaddr_in_equal(&mc->vxlsomc_saddr, &local->sa))
  956. return (0);
  957. if (!vxlan_sockaddr_in_equal(&mc->vxlsomc_gaddr, &group->sa))
  958. return (0);
  959. if (ifidx != 0 && ifidx != mc->vxlsomc_ifidx)
  960. return (0);
  961. return (1);
  962. }
  963. static int
  964. vxlan_socket_mc_join_group(struct vxlan_socket *vso,
  965. const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
  966. int *ifidx, union vxlan_sockaddr *source)
  967. {
  968. struct sockopt sopt;
  969. int error;
  970. *source = *local;
  971. if (VXLAN_SOCKADDR_IS_IPV4(group)) {
  972. struct ip_mreq mreq;
  973. mreq.imr_multiaddr = group->in4.sin_addr;
  974. mreq.imr_interface = local->in4.sin_addr;
  975. bzero(&sopt, sizeof(sopt));
  976. sopt.sopt_dir = SOPT_SET;
  977. sopt.sopt_level = IPPROTO_IP;
  978. sopt.sopt_name = IP_ADD_MEMBERSHIP;
  979. sopt.sopt_val = &mreq;
  980. sopt.sopt_valsize = sizeof(mreq);
  981. error = sosetopt(vso->vxlso_sock, &sopt);
  982. if (error)
  983. return (error);
  984. /*
  985. * BMV: Ideally, there would be a formal way for us to get
  986. * the local interface that was selected based on the
  987. * imr_interface address. We could then update *ifidx so
  988. * vxlan_sockaddr_mc_info_match() would return a match for
  989. * later creates that explicitly set the multicast interface.
  990. *
  991. * If we really need to, we can of course look in the INP's
  992. * membership list:
  993. * sotoinpcb(vso->vxlso_sock)->inp_moptions->
  994. * imo_head[]->imf_inm->inm_ifp
  995. * similarly to imo_match_group().
  996. */
  997. source->in4.sin_addr = local->in4.sin_addr;
  998. } else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
  999. struct ipv6_mreq mreq;
  1000. mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
  1001. mreq.ipv6mr_interface = *ifidx;
  1002. bzero(&sopt, sizeof(sopt));
  1003. sopt.sopt_dir = SOPT_SET;
  1004. sopt.sopt_level = IPPROTO_IPV6;
  1005. sopt.sopt_name = IPV6_JOIN_GROUP;
  1006. sopt.sopt_val = &mreq;
  1007. sopt.sopt_valsize = sizeof(mreq);
  1008. error = sosetopt(vso->vxlso_sock, &sopt);
  1009. if (error)
  1010. return (error);
  1011. /*
  1012. * BMV: As with IPv4, we would really like to know what
  1013. * interface in6p_lookup_mcast_ifp() selected.
  1014. */
  1015. } else
  1016. error = EAFNOSUPPORT;
  1017. return (error);
  1018. }
  1019. static int
  1020. vxlan_socket_mc_leave_group(struct vxlan_socket *vso,
  1021. const union vxlan_sockaddr *group, const union vxlan_sockaddr *source,
  1022. int ifidx)
  1023. {
  1024. struct sockopt sopt;
  1025. int error;
  1026. bzero(&sopt, sizeof(sopt));
  1027. sopt.sopt_dir = SOPT_SET;
  1028. if (VXLAN_SOCKADDR_IS_IPV4(group)) {
  1029. struct ip_mreq mreq;
  1030. mreq.imr_multiaddr = group->in4.sin_addr;
  1031. mreq.imr_interface = source->in4.sin_addr;
  1032. sopt.sopt_level = IPPROTO_IP;
  1033. sopt.sopt_name = IP_DROP_MEMBERSHIP;
  1034. sopt.sopt_val = &mreq;
  1035. sopt.sopt_valsize = sizeof(mreq);
  1036. error = sosetopt(vso->vxlso_sock, &sopt);
  1037. } else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
  1038. struct ipv6_mreq mreq;
  1039. mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
  1040. mreq.ipv6mr_interface = ifidx;
  1041. sopt.sopt_level = IPPROTO_IPV6;
  1042. sopt.sopt_name = IPV6_LEAVE_GROUP;
  1043. sopt.sopt_val = &mreq;
  1044. sopt.sopt_valsize = sizeof(mreq);
  1045. error = sosetopt(vso->vxlso_sock, &sopt);
  1046. } else
  1047. error = EAFNOSUPPORT;
  1048. return (error);
  1049. }
  1050. static int
  1051. vxlan_socket_mc_add_group(struct vxlan_socket *vso,
  1052. const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
  1053. int ifidx, int *idx)
  1054. {
  1055. union vxlan_sockaddr source;
  1056. struct vxlan_socket_mc_info *mc;
  1057. int i, empty, error;
  1058. /*
  1059. * Within a socket, the same multicast group may be used by multiple
  1060. * interfaces, each with a different network identifier. But a socket
  1061. * may only join a multicast group once, so keep track of the users
  1062. * here.
  1063. */
  1064. VXLAN_SO_WLOCK(vso);
  1065. for (empty = 0, i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
  1066. mc = &vso->vxlso_mc[i];
  1067. if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
  1068. empty++;
  1069. continue;
  1070. }
  1071. if (vxlan_sockaddr_mc_info_match(mc, group, local, ifidx))
  1072. goto out;
  1073. }
  1074. VXLAN_SO_WUNLOCK(vso);
  1075. if (empty == 0)
  1076. return (ENOSPC);
  1077. error = vxlan_socket_mc_join_group(vso, group, local, &ifidx, &source);
  1078. if (error)
  1079. return (error);
  1080. VXLAN_SO_WLOCK(vso);
  1081. for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
  1082. mc = &vso->vxlso_mc[i];
  1083. if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
  1084. vxlan_sockaddr_copy(&mc->vxlsomc_gaddr, &group->sa);
  1085. vxlan_sockaddr_copy(&mc->vxlsomc_saddr, &source.sa);
  1086. mc->vxlsomc_ifidx = ifidx;
  1087. goto out;
  1088. }
  1089. }
  1090. VXLAN_SO_WUNLOCK(vso);
  1091. error = vxlan_socket_mc_leave_group(vso, group, &source, ifidx);
  1092. MPASS(error == 0);
  1093. return (ENOSPC);
  1094. out:
  1095. mc->vxlsomc_users++;
  1096. VXLAN_SO_WUNLOCK(vso);
  1097. *idx = i;
  1098. return (0);
  1099. }
  1100. static void
  1101. vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *vso, int idx)
  1102. {
  1103. union vxlan_sockaddr group, source;
  1104. struct vxlan_socket_mc_info *mc;
  1105. int ifidx, leave;
  1106. KASSERT(idx >= 0 && idx < VXLAN_SO_MC_MAX_GROUPS,
  1107. ("%s: vso %p idx %d out of bounds", __func__, vso, idx));
  1108. leave = 0;
  1109. mc = &vso->vxlso_mc[idx];
  1110. VXLAN_SO_WLOCK(vso);
  1111. mc->vxlsomc_users--;
  1112. if (mc->vxlsomc_users == 0) {
  1113. group = mc->vxlsomc_gaddr;
  1114. source = mc->vxlsomc_saddr;
  1115. ifidx = mc->vxlsomc_ifidx;
  1116. bzero(mc, sizeof(*mc));
  1117. leave = 1;
  1118. }
  1119. VXLAN_SO_WUNLOCK(vso);
  1120. if (leave != 0) {
  1121. /*
  1122. * Our socket's membership in this group may have already
  1123. * been removed if we joined through an interface that's
  1124. * been detached.
  1125. */
  1126. vxlan_socket_mc_leave_group(vso, &group, &source, ifidx);
  1127. }
  1128. }
  1129. static struct vxlan_softc *
  1130. vxlan_socket_lookup_softc_locked(struct vxlan_socket *vso, uint32_t vni)
  1131. {
  1132. struct vxlan_softc *sc;
  1133. uint32_t hash;
  1134. VXLAN_SO_LOCK_ASSERT(vso);
  1135. hash = VXLAN_SO_VNI_HASH(vni);
  1136. LIST_FOREACH(sc, &vso->vxlso_vni_hash[hash], vxl_entry) {
  1137. if (sc->vxl_vni == vni) {
  1138. VXLAN_ACQUIRE(sc);
  1139. break;
  1140. }
  1141. }
  1142. return (sc);
  1143. }
  1144. static struct vxlan_softc *
  1145. vxlan_socket_lookup_softc(struct vxlan_socket *vso, uint32_t vni)
  1146. {
  1147. struct rm_priotracker tracker;
  1148. struct vxlan_softc *sc;
  1149. VXLAN_SO_RLOCK(vso, &tracker);
  1150. sc = vxlan_socket_lookup_softc_locked(vso, vni);
  1151. VXLAN_SO_RUNLOCK(vso, &tracker);
  1152. return (sc);
  1153. }
  1154. static int
  1155. vxlan_socket_insert_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
  1156. {
  1157. struct vxlan_softc *tsc;
  1158. uint32_t vni, hash;
  1159. vni = sc->vxl_vni;
  1160. hash = VXLAN_SO_VNI_HASH(vni);
  1161. VXLAN_SO_WLOCK(vso);
  1162. tsc = vxlan_socket_lookup_softc_locked(vso, vni);
  1163. if (tsc != NULL) {
  1164. VXLAN_SO_WUNLOCK(vso);
  1165. vxlan_release(tsc);
  1166. return (EEXIST);
  1167. }
  1168. VXLAN_ACQUIRE(sc);
  1169. LIST_INSERT_HEAD(&vso->vxlso_vni_hash[hash], sc, vxl_entry);
  1170. VXLAN_SO_WUNLOCK(vso);
  1171. return (0);
  1172. }
  1173. static void
  1174. vxlan_socket_remove_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
  1175. {
  1176. VXLAN_SO_WLOCK(vso);
  1177. LIST_REMOVE(sc, vxl_entry);
  1178. VXLAN_SO_WUNLOCK(vso);
  1179. vxlan_release(sc);
  1180. }
  1181. static struct ifnet *
  1182. vxlan_multicast_if_ref(struct vxlan_softc *sc, int ipv4)
  1183. {
  1184. struct ifnet *ifp;
  1185. VXLAN_LOCK_ASSERT(sc);
  1186. if (ipv4 && sc->vxl_im4o != NULL)
  1187. ifp = sc->vxl_im4o->imo_multicast_ifp;
  1188. else if (!ipv4 && sc->vxl_im6o != NULL)
  1189. ifp = sc->vxl_im6o->im6o_multicast_ifp;
  1190. else
  1191. ifp = NULL;
  1192. if (ifp != NULL)
  1193. if_ref(ifp);
  1194. return (ifp);
  1195. }
  1196. static void
  1197. vxlan_free_multicast(struct vxlan_softc *sc)
  1198. {
  1199. if (sc->vxl_mc_ifp != NULL) {
  1200. if_rele(sc->vxl_mc_ifp);
  1201. sc->vxl_mc_ifp = NULL;
  1202. sc->vxl_mc_ifindex = 0;
  1203. }
  1204. if (sc->vxl_im4o != NULL) {
  1205. free(sc->vxl_im4o, M_VXLAN);
  1206. sc->vxl_im4o = NULL;
  1207. }
  1208. if (sc->vxl_im6o != NULL) {
  1209. free(sc->vxl_im6o, M_VXLAN);
  1210. sc->vxl_im6o = NULL;
  1211. }
  1212. }
  1213. static int
  1214. vxlan_setup_multicast_interface(struct vxlan_softc *sc)
  1215. {
  1216. struct ifnet *ifp;
  1217. ifp = ifunit_ref(sc->vxl_mc_ifname);
  1218. if (ifp == NULL) {
  1219. if_printf(sc->vxl_ifp, "multicast interface %s does "
  1220. "not exist\n", sc->vxl_mc_ifname);
  1221. return (ENOENT);
  1222. }
  1223. if ((ifp->if_flags & IFF_MULTICAST) == 0) {
  1224. if_printf(sc->vxl_ifp, "interface %s does not support "
  1225. "multicast\n", sc->vxl_mc_ifname);
  1226. if_rele(ifp);
  1227. return (ENOTSUP);
  1228. }
  1229. sc->vxl_mc_ifp = ifp;
  1230. sc->vxl_mc_ifindex = ifp->if_index;
  1231. return (0);
  1232. }
  1233. static int
  1234. vxlan_setup_multicast(struct vxlan_softc *sc)
  1235. {
  1236. const union vxlan_sockaddr *group;
  1237. int error;
  1238. group = &sc->vxl_dst_addr;
  1239. error = 0;
  1240. if (sc->vxl_mc_ifname[0] != '\0') {
  1241. error = vxlan_setup_multicast_interface(sc);
  1242. if (error)
  1243. return (error);
  1244. }
  1245. /*
  1246. * Initialize an multicast options structure that is sufficiently
  1247. * populated for use in the respective IP output routine. This
  1248. * structure is typically stored in the socket, but our sockets
  1249. * may be shared among multiple interfaces.
  1250. */
  1251. if (VXLAN_SOCKADDR_IS_IPV4(group)) {
  1252. sc->vxl_im4o = malloc(sizeof(struct ip_moptions), M_VXLAN,
  1253. M_ZERO | M_WAITOK);
  1254. sc->vxl_im4o->imo_multicast_ifp = sc->vxl_mc_ifp;
  1255. sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
  1256. sc->vxl_im4o->imo_multicast_vif = -1;
  1257. } else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
  1258. sc->vxl_im6o = malloc(sizeof(struct ip6_moptions), M_VXLAN,
  1259. M_ZERO | M_WAITOK);
  1260. sc->vxl_im6o->im6o_multicast_ifp = sc->vxl_mc_ifp;
  1261. sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
  1262. }
  1263. return (error);
  1264. }
  1265. static int
  1266. vxlan_setup_socket(struct vxlan_softc *sc)
  1267. {
  1268. struct vxlan_socket *vso;
  1269. struct ifnet *ifp;
  1270. union vxlan_sockaddr *saddr, *daddr;
  1271. int multicast, error;
  1272. vso = NULL;
  1273. ifp = sc->vxl_ifp;
  1274. saddr = &sc->vxl_src_addr;
  1275. daddr = &sc->vxl_dst_addr;
  1276. multicast = vxlan_sockaddr_in_multicast(daddr);
  1277. MPASS(multicast != -1);
  1278. sc->vxl_vso_mc_index = -1;
  1279. /*
  1280. * Try to create the socket. If that fails, attempt to use an
  1281. * existing socket.
  1282. */
  1283. error = vxlan_socket_create(ifp, multicast, saddr, &vso);
  1284. if (error) {
  1285. if (multicast != 0)
  1286. vso = vxlan_socket_mc_lookup(saddr);
  1287. else
  1288. vso = vxlan_socket_lookup(saddr);
  1289. if (vso == NULL) {
  1290. if_printf(ifp, "cannot create socket (error: %d), "
  1291. "and no existing socket found\n", error);
  1292. goto out;
  1293. }
  1294. }
  1295. if (multicast != 0) {
  1296. error = vxlan_setup_multicast(sc);
  1297. if (error)
  1298. goto out;
  1299. error = vxlan_socket_mc_add_group(vso, daddr, saddr,
  1300. sc->vxl_mc_ifindex, &sc->vxl_vso_mc_index);
  1301. if (error)
  1302. goto out;
  1303. }
  1304. sc->vxl_sock = vso;
  1305. error = vxlan_socket_insert_softc(vso, sc);
  1306. if (error) {
  1307. sc->vxl_sock = NULL;
  1308. if_printf(ifp, "network identifier %d already exists in "
  1309. "this socket\n", sc->vxl_vni);
  1310. goto out;
  1311. }
  1312. return (0);
  1313. out:
  1314. if (vso != NULL) {
  1315. if (sc->vxl_vso_mc_index != -1) {
  1316. vxlan_socket_mc_release_group_by_idx(vso,
  1317. sc->vxl_vso_mc_index);
  1318. sc->vxl_vso_mc_index = -1;
  1319. }
  1320. if (multicast != 0)
  1321. vxlan_free_multicast(sc);
  1322. vxlan_socket_release(vso);
  1323. }
  1324. return (error);
  1325. }
  1326. #ifdef INET6
  1327. static void
  1328. vxlan_setup_zero_checksum_port(struct vxlan_softc *sc)
  1329. {
  1330. if (!VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_src_addr))
  1331. return;
  1332. MPASS(sc->vxl_src_addr.in6.sin6_port != 0);
  1333. MPASS(sc->vxl_dst_addr.in6.sin6_port != 0);
  1334. if (sc->vxl_src_addr.in6.sin6_port != sc->vxl_dst_addr.in6.sin6_port) {
  1335. if_printf(sc->vxl_ifp, "port %d in src address does not match "
  1336. "port %d in dst address, rfc6935_port (%d) not updated.\n",
  1337. ntohs(sc->vxl_src_addr.in6.sin6_port),
  1338. ntohs(sc->vxl_dst_addr.in6.sin6_port),
  1339. V_zero_checksum_port);
  1340. return;
  1341. }
  1342. if (V_zero_checksum_port != 0) {
  1343. if (V_zero_checksum_port !=
  1344. ntohs(sc->vxl_src_addr.in6.sin6_port)) {
  1345. if_printf(sc->vxl_ifp, "rfc6935_port is already set to "
  1346. "%d, cannot set it to %d.\n", V_zero_checksum_port,
  1347. ntohs(sc->vxl_src_addr.in6.sin6_port));
  1348. }
  1349. return;
  1350. }
  1351. V_zero_checksum_port = ntohs(sc->vxl_src_addr.in6.sin6_port);
  1352. if_printf(sc->vxl_ifp, "rfc6935_port set to %d\n",
  1353. V_zero_checksum_port);
  1354. }
  1355. #endif
  1356. static void
  1357. vxlan_setup_interface_hdrlen(struct vxlan_softc *sc)
  1358. {
  1359. struct ifnet *ifp;
  1360. VXLAN_LOCK_WASSERT(sc);
  1361. ifp = sc->vxl_ifp;
  1362. ifp->if_hdrlen = ETHER_HDR_LEN + sizeof(struct vxlanudphdr);
  1363. if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr) != 0)
  1364. ifp->if_hdrlen += sizeof(struct ip);
  1365. else if (VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_dst_addr) != 0)
  1366. ifp->if_hdrlen += sizeof(struct ip6_hdr);
  1367. if ((sc->vxl_flags & VXLAN_FLAG_USER_MTU) == 0)
  1368. ifp->if_mtu = ETHERMTU - ifp->if_hdrlen;
  1369. }
  1370. static int
  1371. vxlan_valid_init_config(struct vxlan_softc *sc)
  1372. {
  1373. const char *reason;
  1374. if (vxlan_check_vni(sc->vxl_vni) != 0) {
  1375. reason = "invalid virtual network identifier specified";
  1376. goto fail;
  1377. }
  1378. if (vxlan_sockaddr_supported(&sc->vxl_src_addr, 1) == 0) {
  1379. reason = "source address type is not supported";
  1380. goto fail;
  1381. }
  1382. if (vxlan_sockaddr_supported(&sc->vxl_dst_addr, 0) == 0) {
  1383. reason = "destination address type is not supported";
  1384. goto fail;
  1385. }
  1386. if (vxlan_sockaddr_in_any(&sc->vxl_dst_addr) != 0) {
  1387. reason = "no valid destination address specified";
  1388. goto fail;
  1389. }
  1390. if (vxlan_sockaddr_in_multicast(&sc->vxl_dst_addr) == 0 &&
  1391. sc->vxl_mc_ifname[0] != '\0') {
  1392. reason = "can only specify interface with a group address";
  1393. goto fail;
  1394. }
  1395. if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
  1396. if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_src_addr) ^
  1397. VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr)) {
  1398. reason = "source and destination address must both "
  1399. "be either IPv4 or IPv6";
  1400. goto fail;
  1401. }
  1402. }
  1403. if (sc->vxl_src_addr.in4.sin_port == 0) {
  1404. reason = "local port not specified";
  1405. goto fail;
  1406. }
  1407. if (sc->vxl_dst_addr.in4.sin_port == 0) {
  1408. reason = "remote port not specified";
  1409. goto fail;
  1410. }
  1411. return (0);
  1412. fail:
  1413. if_printf(sc->vxl_ifp, "cannot initialize interface: %s\n", reason);
  1414. return (EINVAL);
  1415. }
  1416. static void
  1417. vxlan_init_wait(struct vxlan_softc *sc)
  1418. {
  1419. VXLAN_LOCK_WASSERT(sc);
  1420. while (sc->vxl_flags & VXLAN_FLAG_INIT)
  1421. rm_sleep(sc, &sc->vxl_lock, 0, "vxlint", hz);
  1422. }
  1423. static void
  1424. vxlan_init_complete(struct vxlan_softc *sc)
  1425. {
  1426. VXLAN_WLOCK(sc);
  1427. sc->vxl_flags &= ~VXLAN_FLAG_INIT;
  1428. wakeup(sc);
  1429. VXLAN_WUNLOCK(sc);
  1430. }
  1431. static void
  1432. vxlan_init(void *xsc)
  1433. {
  1434. static const uint8_t empty_mac[ETHER_ADDR_LEN];
  1435. struct vxlan_softc *sc;
  1436. struct ifnet *ifp;
  1437. sc = xsc;
  1438. ifp = sc->vxl_ifp;
  1439. sx_xlock(&vxlan_sx);
  1440. VXLAN_WLOCK(sc);
  1441. if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  1442. VXLAN_WUNLOCK(sc);
  1443. sx_xunlock(&vxlan_sx);
  1444. return;
  1445. }
  1446. sc->vxl_flags |= VXLAN_FLAG_INIT;
  1447. VXLAN_WUNLOCK(sc);
  1448. if (vxlan_valid_init_config(sc) != 0)
  1449. goto out;
  1450. if (vxlan_setup_socket(sc) != 0)
  1451. goto out;
  1452. #ifdef INET6
  1453. vxlan_setup_zero_checksum_port(sc);
  1454. #endif
  1455. /* Initialize the default forwarding entry. */
  1456. vxlan_ftable_entry_init(sc, &sc->vxl_default_fe, empty_mac,
  1457. &sc->vxl_dst_addr.sa, VXLAN_FE_FLAG_STATIC);
  1458. VXLAN_WLOCK(sc);
  1459. ifp->if_drv_flags |= IFF_DRV_RUNNING;
  1460. callout_reset(&sc->vxl_callout, vxlan_ftable_prune_period * hz,
  1461. vxlan_timer, sc);
  1462. VXLAN_WUNLOCK(sc);
  1463. if_link_state_change(ifp, LINK_STATE_UP);
  1464. EVENTHANDLER_INVOKE(vxlan_start, ifp, sc->vxl_src_addr.in4.sin_family,
  1465. ntohs(sc->vxl_src_addr.in4.sin_port));
  1466. out:
  1467. vxlan_init_complete(sc);
  1468. sx_xunlock(&vxlan_sx);
  1469. }
  1470. static void
  1471. vxlan_release(struct vxlan_softc *sc)
  1472. {
  1473. /*
  1474. * The softc may be destroyed as soon as we release our reference,
  1475. * so we cannot serialize the wakeup with the softc lock. We use a
  1476. * timeout in our sleeps so a missed wakeup is unfortunate but not
  1477. * fatal.
  1478. */
  1479. if (VXLAN_RELEASE(sc) != 0)
  1480. wakeup(sc);
  1481. }
  1482. static void
  1483. vxlan_teardown_wait(struct vxlan_softc *sc)
  1484. {
  1485. VXLAN_LOCK_WASSERT(sc);
  1486. while (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
  1487. rm_sleep(sc, &sc->vxl_lock, 0, "vxltrn", hz);
  1488. }
  1489. static void
  1490. vxlan_teardown_complete(struct vxlan_softc *sc)
  1491. {
  1492. VXLAN_WLOCK(sc);
  1493. sc->vxl_flags &= ~VXLAN_FLAG_TEARDOWN;
  1494. wakeup(sc);
  1495. VXLAN_WUNLOCK(sc);
  1496. }
  1497. static void
  1498. vxlan_teardown_locked(struct vxlan_softc *sc)
  1499. {
  1500. struct ifnet *ifp;
  1501. struct vxlan_socket *vso;
  1502. sx_assert(&vxlan_sx, SA_XLOCKED);
  1503. VXLAN_LOCK_WASSERT(sc);
  1504. MPASS(sc->vxl_flags & VXLAN_FLAG_TEARDOWN);
  1505. ifp = sc->vxl_ifp;
  1506. ifp->if_flags &= ~IFF_UP;
  1507. ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  1508. callout_stop(&sc->vxl_callout);
  1509. vso = sc->vxl_sock;
  1510. sc->vxl_sock = NULL;
  1511. VXLAN_WUNLOCK(sc);
  1512. if_link_state_change(ifp, LINK_STATE_DOWN);
  1513. EVENTHANDLER_INVOKE(vxlan_stop, ifp, sc->vxl_src_addr.in4.sin_family,
  1514. ntohs(sc->vxl_src_addr.in4.sin_port));
  1515. if (vso != NULL) {
  1516. vxlan_socket_remove_softc(vso, sc);
  1517. if (sc->vxl_vso_mc_index != -1) {
  1518. vxlan_socket_mc_release_group_by_idx(vso,
  1519. sc->vxl_vso_mc_index);
  1520. sc->vxl_vso_mc_index = -1;
  1521. }
  1522. }
  1523. VXLAN_WLOCK(sc);
  1524. while (sc->vxl_refcnt != 0)
  1525. rm_sleep(sc, &sc->vxl_lock, 0, "vxldrn", hz);
  1526. VXLAN_WUNLOCK(sc);
  1527. callout_drain(&sc->vxl_callout);
  1528. vxlan_free_multicast(sc);
  1529. if (vso != NULL)
  1530. vxlan_socket_release(vso);
  1531. vxlan_teardown_complete(sc);
  1532. }
  1533. static void
  1534. vxlan_teardown(struct vxlan_softc *sc)
  1535. {
  1536. sx_xlock(&vxlan_sx);
  1537. VXLAN_WLOCK(sc);
  1538. if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN) {
  1539. vxlan_teardown_wait(sc);
  1540. VXLAN_WUNLOCK(sc);
  1541. sx_xunlock(&vxlan_sx);
  1542. return;
  1543. }
  1544. sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
  1545. vxlan_teardown_locked(sc);
  1546. sx_xunlock(&vxlan_sx);
  1547. }
  1548. static void
  1549. vxlan_ifdetach(struct vxlan_softc *sc, struct ifnet *ifp,
  1550. struct vxlan_softc_head *list)
  1551. {
  1552. VXLAN_WLOCK(sc);
  1553. if (sc->vxl_mc_ifp != ifp)
  1554. goto out;
  1555. if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
  1556. goto out;
  1557. sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
  1558. LIST_INSERT_HEAD(list, sc, vxl_ifdetach_list);
  1559. out:
  1560. VXLAN_WUNLOCK(sc);
  1561. }
  1562. static void
  1563. vxlan_timer(void *xsc)
  1564. {
  1565. struct vxlan_softc *sc;
  1566. sc = xsc;
  1567. VXLAN_LOCK_WASSERT(sc);
  1568. vxlan_ftable_expire(sc);
  1569. callout_schedule(&sc->vxl_callout, vxlan_ftable_prune_period * hz);
  1570. }
  1571. static int
  1572. vxlan_ioctl_ifflags(struct vxlan_softc *sc)
  1573. {
  1574. struct ifnet *ifp;
  1575. ifp = sc->vxl_ifp;
  1576. if (ifp->if_flags & IFF_UP) {
  1577. if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  1578. vxlan_init(sc);
  1579. } else {
  1580. if (ifp->if_drv_flags & IFF_DRV_RUNNING)
  1581. vxlan_teardown(sc);
  1582. }
  1583. return (0);
  1584. }
  1585. static int
  1586. vxlan_ctrl_get_config(struct vxlan_softc *sc, void *arg)
  1587. {
  1588. struct rm_priotracker tracker;
  1589. struct ifvxlancfg *cfg;
  1590. cfg = arg;
  1591. bzero(cfg, sizeof(*cfg));
  1592. VXLAN_RLOCK(sc, &tracker);
  1593. cfg->vxlc_vni = sc->vxl_vni;
  1594. memcpy(&cfg->vxlc_local_sa, &sc->vxl_src_addr,
  1595. sizeof(union vxlan_sockaddr));
  1596. memcpy(&cfg->vxlc_remote_sa, &sc->vxl_dst_addr,
  1597. sizeof(union vxlan_sockaddr));
  1598. cfg->vxlc_mc_ifindex = sc->vxl_mc_ifindex;
  1599. cfg->vxlc_ftable_cnt = sc->vxl_ftable_cnt;
  1600. cfg->vxlc_ftable_max = sc->vxl_ftable_max;
  1601. cfg->vxlc_ftable_timeout = sc->vxl_ftable_timeout;
  1602. cfg->vxlc_port_min = sc->vxl_min_port;
  1603. cfg->vxlc_port_max = sc->vxl_max_port;
  1604. cfg->vxlc_learn = (sc->vxl_flags & VXLAN_FLAG_LEARN) != 0;
  1605. cfg->vxlc_ttl = sc->vxl_ttl;
  1606. VXLAN_RUNLOCK(sc, &tracker);
  1607. #ifdef INET6
  1608. if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_local_sa))
  1609. sa6_recoverscope(&cfg->vxlc_local_sa.in6);
  1610. if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_remote_sa))
  1611. sa6_recoverscope(&cfg->vxlc_remote_sa.in6);
  1612. #endif
  1613. return (0);
  1614. }
  1615. static int
  1616. vxlan_ctrl_set_vni(struct vxlan_softc *sc, void *arg)
  1617. {
  1618. struct ifvxlancmd *cmd;
  1619. int error;
  1620. cmd = arg;
  1621. if (vxlan_check_vni(cmd->vxlcmd_vni) != 0)
  1622. return (EINVAL);
  1623. VXLAN_WLOCK(sc);
  1624. if (vxlan_can_change_config(sc)) {
  1625. sc->vxl_vni = cmd->vxlcmd_vni;
  1626. error = 0;
  1627. } else
  1628. error = EBUSY;
  1629. VXLAN_WUNLOCK(sc);
  1630. return (error);
  1631. }
  1632. static int
  1633. vxlan_ctrl_set_local_addr(struct vxlan_softc *sc, void *arg)
  1634. {
  1635. struct ifvxlancmd *cmd;
  1636. union vxlan_sockaddr *vxlsa;
  1637. int error;
  1638. cmd = arg;
  1639. vxlsa = &cmd->vxlcmd_sa;
  1640. if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
  1641. return (EINVAL);
  1642. if (vxlan_sockaddr_in_multicast(vxlsa) != 0)
  1643. return (EINVAL);
  1644. if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
  1645. error = vxlan_sockaddr_in6_embedscope(vxlsa);
  1646. if (error)
  1647. return (error);
  1648. }
  1649. VXLAN_WLOCK(sc);
  1650. if (vxlan_can_change_config(sc)) {
  1651. vxlan_sockaddr_in_copy(&sc->vxl_src_addr, &vxlsa->sa);
  1652. vxlan_set_hwcaps(sc);
  1653. error = 0;
  1654. } else
  1655. error = EBUSY;
  1656. VXLAN_WUNLOCK(sc);
  1657. return (error);
  1658. }
  1659. static int
  1660. vxlan_ctrl_set_remote_addr(struct vxlan_softc *sc, void *arg)
  1661. {
  1662. struct ifvxlancmd *cmd;
  1663. union vxlan_sockaddr *vxlsa;
  1664. int error;
  1665. cmd = arg;
  1666. vxlsa = &cmd->vxlcmd_sa;
  1667. if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
  1668. return (EINVAL);
  1669. if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
  1670. error = vxlan_sockaddr_in6_embedscope(vxlsa);
  1671. if (error)
  1672. return (error);
  1673. }
  1674. VXLAN_WLOCK(sc);
  1675. if (vxlan_can_change_config(sc)) {
  1676. vxlan_sockaddr_in_copy(&sc->vxl_dst_addr, &vxlsa->sa);
  1677. vxlan_setup_interface_hdrlen(sc);
  1678. error = 0;
  1679. } else
  1680. error = EBUSY;
  1681. VXLAN_WUNLOCK(sc);
  1682. return (error);
  1683. }
  1684. static int
  1685. vxlan_ctrl_set_local_port(struct vxlan_softc *sc, void *arg)
  1686. {
  1687. struct ifvxlancmd *cmd;
  1688. int error;
  1689. cmd = arg;
  1690. if (cmd->vxlcmd_port == 0)
  1691. return (EINVAL);
  1692. VXLAN_WLOCK(sc);
  1693. if (vxlan_can_change_config(sc)) {
  1694. sc->vxl_src_addr.in4.sin_port = htons(cmd->vxlcmd_port);
  1695. error = 0;
  1696. } else
  1697. error = EBUSY;
  1698. VXLAN_WUNLOCK(sc);
  1699. return (error);
  1700. }
  1701. static int
  1702. vxlan_ctrl_set_remote_port(struct vxlan_softc *sc, void *arg)
  1703. {
  1704. struct ifvxlancmd *cmd;
  1705. int error;
  1706. cmd = arg;
  1707. if (cmd->vxlcmd_port == 0)
  1708. return (EINVAL);
  1709. VXLAN_WLOCK(sc);
  1710. if (vxlan_can_change_config(sc)) {
  1711. sc->vxl_dst_addr.in4.sin_port = htons(cmd->vxlcmd_port);
  1712. error = 0;
  1713. } else
  1714. error = EBUSY;
  1715. VXLAN_WUNLOCK(sc);
  1716. return (error);
  1717. }
  1718. static int
  1719. vxlan_ctrl_set_port_range(struct vxlan_softc *sc, void *arg)
  1720. {
  1721. struct ifvxlancmd *cmd;
  1722. uint16_t min, max;
  1723. int error;
  1724. cmd = arg;
  1725. min = cmd->vxlcmd_port_min;
  1726. max = cmd->vxlcmd_port_max;
  1727. if (max < min)
  1728. return (EINVAL);
  1729. VXLAN_WLOCK(sc);
  1730. if (vxlan_can_change_config(sc)) {
  1731. sc->vxl_min_port = min;
  1732. sc->vxl_max_port = max;
  1733. error = 0;
  1734. } else
  1735. error = EBUSY;
  1736. VXLAN_WUNLOCK(sc);
  1737. return (error);
  1738. }
  1739. static int
  1740. vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *sc, void *arg)
  1741. {
  1742. struct ifvxlancmd *cmd;
  1743. int error;
  1744. cmd = arg;
  1745. VXLAN_WLOCK(sc);
  1746. if (vxlan_check_ftable_timeout(cmd->vxlcmd_ftable_timeout) == 0) {
  1747. sc->vxl_ftable_timeout = cmd->vxlcmd_ftable_timeout;
  1748. error = 0;
  1749. } else
  1750. error = EINVAL;
  1751. VXLAN_WUNLOCK(sc);
  1752. return (error);
  1753. }
  1754. static int
  1755. vxlan_ctrl_set_ftable_max(struct vxlan_softc *sc, void *arg)
  1756. {
  1757. struct ifvxlancmd *cmd;
  1758. int error;
  1759. cmd = arg;
  1760. VXLAN_WLOCK(sc);
  1761. if (vxlan_check_ftable_max(cmd->vxlcmd_ftable_max) == 0) {
  1762. sc->vxl_ftable_max = cmd->vxlcmd_ftable_max;
  1763. error = 0;
  1764. } else
  1765. error = EINVAL;
  1766. VXLAN_WUNLOCK(sc);
  1767. return (error);
  1768. }
  1769. static int
  1770. vxlan_ctrl_set_multicast_if(struct vxlan_softc * sc, void *arg)
  1771. {
  1772. struct ifvxlancmd *cmd;
  1773. int error;
  1774. cmd = arg;
  1775. VXLAN_WLOCK(sc);
  1776. if (vxlan_can_change_config(sc)) {
  1777. strlcpy(sc->vxl_mc_ifname, cmd->vxlcmd_ifname, IFNAMSIZ);
  1778. vxlan_set_hwcaps(sc);
  1779. error = 0;
  1780. } else
  1781. error = EBUSY;
  1782. VXLAN_WUNLOCK(sc);
  1783. return (error);
  1784. }
  1785. static int
  1786. vxlan_ctrl_set_ttl(struct vxlan_softc *sc, void *arg)
  1787. {
  1788. struct ifvxlancmd *cmd;
  1789. int error;
  1790. cmd = arg;
  1791. VXLAN_WLOCK(sc);
  1792. if (vxlan_check_ttl(cmd->vxlcmd_ttl) == 0) {
  1793. sc->vxl_ttl = cmd->vxlcmd_ttl;
  1794. if (sc->vxl_im4o != NULL)
  1795. sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
  1796. if (sc->vxl_im6o != NULL)
  1797. sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
  1798. error = 0;
  1799. } else
  1800. error = EINVAL;
  1801. VXLAN_WUNLOCK(sc);
  1802. return (error);
  1803. }
  1804. static int
  1805. vxlan_ctrl_set_learn(struct vxlan_softc *sc, void *arg)
  1806. {
  1807. struct ifvxlancmd *cmd;
  1808. cmd = arg;
  1809. VXLAN_WLOCK(sc);
  1810. if (cmd->vxlcmd_flags & VXLAN_CMD_FLAG_LEARN)
  1811. sc->vxl_flags |= VXLAN_FLAG_LEARN;
  1812. else
  1813. sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
  1814. VXLAN_WUNLOCK(sc);
  1815. return (0);
  1816. }
  1817. static int
  1818. vxlan_ctrl_ftable_entry_add(struct vxlan_softc *sc, void *arg)
  1819. {
  1820. union vxlan_sockaddr vxlsa;
  1821. struct ifvxlancmd *cmd;
  1822. struct vxlan_ftable_entry *fe;
  1823. int error;
  1824. cmd = arg;
  1825. vxlsa = cmd->vxlcmd_sa;
  1826. if (!VXLAN_SOCKADDR_IS_IPV46(&vxlsa))
  1827. return (EINVAL);
  1828. if (vxlan_sockaddr_in_any(&vxlsa) != 0)
  1829. return (EINVAL);
  1830. if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
  1831. return (EINVAL);
  1832. /* BMV: We could support both IPv4 and IPv6 later. */
  1833. if (vxlsa.sa.sa_family != sc->vxl_dst_addr.sa.sa_family)
  1834. return (EAFNOSUPPORT);
  1835. if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
  1836. error = vxlan_sockaddr_in6_embedscope(&vxlsa);
  1837. if (error)
  1838. return (error);
  1839. }
  1840. fe = vxlan_ftable_entry_alloc();
  1841. if (fe == NULL)
  1842. return (ENOMEM);
  1843. if (vxlsa.in4.sin_port == 0)
  1844. vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
  1845. vxlan_ftable_entry_init(sc, fe, cmd->vxlcmd_mac, &vxlsa.sa,
  1846. VXLAN_FE_FLAG_STATIC);
  1847. VXLAN_WLOCK(sc);
  1848. error = vxlan_ftable_entry_insert(sc, fe);
  1849. VXLAN_WUNLOCK(sc);
  1850. if (error)
  1851. vxlan_ftable_entry_free(fe);
  1852. return (error);
  1853. }
  1854. static int
  1855. vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *sc, void *arg)
  1856. {
  1857. struct ifvxlancmd *cmd;
  1858. struct vxlan_ftable_entry *fe;
  1859. int error;
  1860. cmd = arg;
  1861. VXLAN_WLOCK(sc);
  1862. fe = vxlan_ftable_entry_lookup(sc, cmd->vxlcmd_mac);
  1863. if (fe != NULL) {
  1864. vxlan_ftable_entry_destroy(sc, fe);
  1865. error = 0;
  1866. } else
  1867. error = ENOENT;
  1868. VXLAN_WUNLOCK(sc);
  1869. return (error);
  1870. }
  1871. static int
  1872. vxlan_ctrl_flush(struct vxlan_softc *sc, void *arg)
  1873. {
  1874. struct ifvxlancmd *cmd;
  1875. int all;
  1876. cmd = arg;
  1877. all = cmd->vxlcmd_flags & VXLAN_CMD_FLAG_FLUSH_ALL;
  1878. VXLAN_WLOCK(sc);
  1879. vxlan_ftable_flush(sc, all);
  1880. VXLAN_WUNLOCK(sc);
  1881. return (0);
  1882. }
  1883. static int
  1884. vxlan_ioctl_drvspec(struct vxlan_softc *sc, struct ifdrv *ifd, int get)
  1885. {
  1886. const struct vxlan_control *vc;
  1887. union {
  1888. struct ifvxlancfg cfg;
  1889. struct ifvxlancmd cmd;
  1890. } args;
  1891. int out, error;
  1892. if (ifd->ifd_cmd >= vxlan_control_table_size)
  1893. return (EINVAL);
  1894. bzero(&args, sizeof(args));
  1895. vc = &vxlan_control_table[ifd->ifd_cmd];
  1896. out = (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) != 0;
  1897. if ((get != 0 && out == 0) || (get == 0 && out != 0))
  1898. return (EINVAL);
  1899. if (vc->vxlc_flags & VXLAN_CTRL_FLAG_SUSER) {
  1900. error = priv_check(curthread, PRIV_NET_VXLAN);
  1901. if (error)
  1902. return (error);
  1903. }
  1904. if (ifd->ifd_len != vc->vxlc_argsize ||
  1905. ifd->ifd_len > sizeof(args))
  1906. return (EINVAL);
  1907. if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYIN) {
  1908. error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
  1909. if (error)
  1910. return (error);
  1911. }
  1912. error = vc->vxlc_func(sc, &args);
  1913. if (error)
  1914. return (error);
  1915. if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) {
  1916. error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
  1917. if (error)
  1918. return (error);
  1919. }
  1920. return (0);
  1921. }
  1922. static int
  1923. vxlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  1924. {
  1925. struct rm_priotracker tracker;
  1926. struct vxlan_softc *sc;
  1927. struct ifreq *ifr;
  1928. struct ifdrv *ifd;
  1929. int error;
  1930. sc = ifp->if_softc;
  1931. ifr = (struct ifreq *) data;
  1932. ifd = (struct ifdrv *) data;
  1933. error = 0;
  1934. switch (cmd) {
  1935. case SIOCADDMULTI:
  1936. case SIOCDELMULTI:
  1937. break;
  1938. case SIOCGDRVSPEC:
  1939. case SIOCSDRVSPEC:
  1940. error = vxlan_ioctl_drvspec(sc, ifd, cmd == SIOCGDRVSPEC);
  1941. break;
  1942. case SIOCSIFFLAGS:
  1943. error = vxlan_ioctl_ifflags(sc);
  1944. break;
  1945. case SIOCSIFMEDIA:
  1946. case SIOCGIFMEDIA:
  1947. error = ifmedia_ioctl(ifp, ifr, &sc->vxl_media, cmd);
  1948. break;
  1949. case SIOCSIFMTU:
  1950. if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VXLAN_MAX_MTU) {
  1951. error = EINVAL;
  1952. } else {
  1953. VXLAN_WLOCK(sc);
  1954. ifp->if_mtu = ifr->ifr_mtu;
  1955. sc->vxl_flags |= VXLAN_FLAG_USER_MTU;
  1956. VXLAN_WUNLOCK(sc);
  1957. }
  1958. break;
  1959. case SIOCSIFCAP:
  1960. VXLAN_WLOCK(sc);
  1961. error = vxlan_set_reqcap(sc, ifp, ifr->ifr_reqcap);
  1962. if (error == 0)
  1963. vxlan_set_hwcaps(sc);
  1964. VXLAN_WUNLOCK(sc);
  1965. break;
  1966. case SIOCGTUNFIB:
  1967. VXLAN_RLOCK(sc, &tracker);
  1968. ifr->ifr_fib = sc->vxl_fibnum;
  1969. VXLAN_RUNLOCK(sc, &tracker);
  1970. break;
  1971. case SIOCSTUNFIB:
  1972. if ((error = priv_check(curthread, PRIV_NET_VXLAN)) != 0)
  1973. break;
  1974. if (ifr->ifr_fib >= rt_numfibs)
  1975. error = EINVAL;
  1976. else {
  1977. VXLAN_WLOCK(sc);
  1978. sc->vxl_fibnum = ifr->ifr_fib;
  1979. VXLAN_WUNLOCK(sc);
  1980. }
  1981. break;
  1982. default:
  1983. error = ether_ioctl(ifp, cmd, data);
  1984. break;
  1985. }
  1986. return (error);
  1987. }
  1988. #if defined(INET) || defined(INET6)
  1989. static uint16_t
  1990. vxlan_pick_source_port(struct vxlan_softc *sc, struct mbuf *m)
  1991. {
  1992. int range;
  1993. uint32_t hash;
  1994. range = sc->vxl_max_port - sc->vxl_min_port + 1;
  1995. if (M_HASHTYPE_ISHASH(m))
  1996. hash = m->m_pkthdr.flowid;
  1997. else
  1998. hash = jenkins_hash(m->m_data, ETHER_HDR_LEN,
  1999. sc->vxl_port_hash_key);
  2000. return (sc->vxl_min_port + (hash % range));
  2001. }
  2002. static void
  2003. vxlan_encap_header(struct vxlan_softc *sc, struct mbuf *m, int ipoff,
  2004. uint16_t srcport, uint16_t dstport)
  2005. {
  2006. struct vxlanudphdr *hdr;
  2007. struct udphdr *udph;
  2008. struct vxlan_header *vxh;
  2009. int len;
  2010. len = m->m_pkthdr.len - ipoff;
  2011. MPASS(len >= sizeof(struct vxlanudphdr));
  2012. hdr = mtodo(m, ipoff);
  2013. udph = &hdr->vxlh_udp;
  2014. udph->uh_sport = srcport;
  2015. udph->uh_dport = dstport;
  2016. udph->uh_ulen = htons(len);
  2017. udph->uh_sum = 0;
  2018. vxh = &hdr->vxlh_hdr;
  2019. vxh->vxlh_flags = htonl(VXLAN_HDR_FLAGS_VALID_VNI);
  2020. vxh->vxlh_vni = htonl(sc->vxl_vni << VXLAN_HDR_VNI_SHIFT);
  2021. }
  2022. #endif
  2023. #if defined(INET6) || defined(INET)
  2024. /*
  2025. * Return the CSUM_INNER_* equivalent of CSUM_* caps.
  2026. */
  2027. static uint32_t
  2028. csum_flags_to_inner_flags(uint32_t csum_flags_in, const uint32_t encap)
  2029. {
  2030. uint32_t csum_flags = encap;
  2031. const uint32_t v4 = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP;
  2032. /*
  2033. * csum_flags can request either v4 or v6 offload but not both.
  2034. * tcp_output always sets CSUM_TSO (both CSUM_IP_TSO and CSUM_IP6_TSO)
  2035. * so those bits are no good to detect the IP version. Other bits are
  2036. * always set with CSUM_TSO and we use those to figure out the IP
  2037. * version.
  2038. */
  2039. if (csum_flags_in & v4) {
  2040. if (csum_flags_in & CSUM_IP)
  2041. csum_flags |= CSUM_INNER_IP;
  2042. if (csum_flags_in & CSUM_IP_UDP)
  2043. csum_flags |= CSUM_INNER_IP_UDP;
  2044. if (csum_flags_in & CSUM_IP_TCP)
  2045. csum_flags |= CSUM_INNER_IP_TCP;
  2046. if (csum_flags_in & CSUM_IP_TSO)
  2047. csum_flags |= CSUM_INNER_IP_TSO;
  2048. } else {
  2049. #ifdef INVARIANTS
  2050. const uint32_t v6 = CSUM_IP6_UDP | CSUM_IP6_TCP;
  2051. MPASS((csum_flags_in & v6) != 0);
  2052. #endif
  2053. if (csum_flags_in & CSUM_IP6_UDP)
  2054. csum_flags |= CSUM_INNER_IP6_UDP;
  2055. if (csum_flags_in & CSUM_IP6_TCP)
  2056. csum_flags |= CSUM_INNER_IP6_TCP;
  2057. if (csum_flags_in & CSUM_IP6_TSO)
  2058. csum_flags |= CSUM_INNER_IP6_TSO;
  2059. }
  2060. return (csum_flags);
  2061. }
  2062. #endif
  2063. static int
  2064. vxlan_encap4(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
  2065. struct mbuf *m)
  2066. {
  2067. #ifdef INET
  2068. struct ifnet *ifp;
  2069. struct ip *ip;
  2070. struct in_addr srcaddr, dstaddr;
  2071. uint16_t srcport, dstport;
  2072. int plen, mcast, error;
  2073. struct route route, *ro;
  2074. struct sockaddr_in *sin;
  2075. uint32_t csum_flags;
  2076. NET_EPOCH_ASSERT();
  2077. ifp = sc->vxl_ifp;
  2078. srcaddr = sc->vxl_src_addr.in4.sin_addr;
  2079. srcport = vxlan_pick_source_port(sc, m);
  2080. dstaddr = fvxlsa->in4.sin_addr;
  2081. dstport = fvxlsa->in4.sin_port;
  2082. plen = m->m_pkthdr.len;
  2083. M_PREPEND(m, sizeof(struct ip) + sizeof(struct vxlanudphdr),
  2084. M_NOWAIT);
  2085. if (m == NULL) {
  2086. if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  2087. return (ENOBUFS);
  2088. }
  2089. ip = mtod(m, struct ip *);
  2090. ip->ip_tos = 0;
  2091. ip->ip_len = htons(m->m_pkthdr.len);
  2092. ip->ip_off = 0;
  2093. ip->ip_ttl = sc->vxl_ttl;
  2094. ip->ip_p = IPPROTO_UDP;
  2095. ip->ip_sum = 0;
  2096. ip->ip_src = srcaddr;
  2097. ip->ip_dst = dstaddr;
  2098. vxlan_encap_header(sc, m, sizeof(struct ip), srcport, dstport);
  2099. mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
  2100. m->m_flags &= ~(M_MCAST | M_BCAST);
  2101. m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
  2102. if (m->m_pkthdr.csum_flags != 0) {
  2103. /*
  2104. * HW checksum (L3 and/or L4) or TSO has been requested. Look
  2105. * up the ifnet for the outbound route and verify that the
  2106. * outbound ifnet can perform the requested operation on the
  2107. * inner frame.
  2108. */
  2109. bzero(&route, sizeof(route));
  2110. ro = &route;
  2111. sin = (struct sockaddr_in *)&ro->ro_dst;
  2112. sin->sin_family = AF_INET;
  2113. sin->sin_len = sizeof(*sin);
  2114. sin->sin_addr = ip->ip_dst;
  2115. ro->ro_nh = fib4_lookup(M_GETFIB(m), ip->ip_dst, 0, NHR_NONE,
  2116. 0);
  2117. if (ro->ro_nh == NULL) {
  2118. m_freem(m);
  2119. if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  2120. return (EHOSTUNREACH);
  2121. }
  2122. csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
  2123. CSUM_ENCAP_VXLAN);
  2124. if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
  2125. csum_flags) {
  2126. if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
  2127. const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
  2128. if_printf(ifp, "interface %s is missing hwcaps "
  2129. "0x%08x, csum_flags 0x%08x -> 0x%08x, "
  2130. "hwassist 0x%08x\n", nh_ifp->if_xname,
  2131. csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
  2132. m->m_pkthdr.csum_flags, csum_flags,
  2133. (uint32_t)nh_ifp->if_hwassist);
  2134. }
  2135. m_freem(m);
  2136. if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  2137. return (ENXIO);
  2138. }
  2139. m->m_pkthdr.csum_flags = csum_flags;
  2140. if (csum_flags &
  2141. (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
  2142. CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
  2143. counter_u64_add(sc->vxl_stats.txcsum, 1);
  2144. if (csum_flags & CSUM_INNER_TSO)
  2145. counter_u64_add(sc->vxl_stats.tso, 1);
  2146. }
  2147. } else
  2148. ro = NULL;
  2149. error = ip_output(m, NULL, ro, 0, sc->vxl_im4o, NULL);
  2150. if (error == 0) {
  2151. if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
  2152. if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
  2153. if (mcast != 0)
  2154. if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
  2155. } else
  2156. if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  2157. return (error);
  2158. #else
  2159. m_freem(m);
  2160. return (ENOTSUP);
  2161. #endif
  2162. }
  2163. static int
  2164. vxlan_encap6(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
  2165. struct mbuf *m)
  2166. {
  2167. #ifdef INET6
  2168. struct ifnet *ifp;
  2169. struct ip6_hdr *ip6;
  2170. const struct in6_addr *srcaddr, *dstaddr;
  2171. uint16_t srcport, dstport;
  2172. int plen, mcast, error;
  2173. struct route_in6 route, *ro;
  2174. struct sockaddr_in6 *sin6;
  2175. uint32_t csum_flags;
  2176. NET_EPOCH_ASSERT();
  2177. ifp = sc->vxl_ifp;
  2178. srcaddr = &sc->vxl_src_addr.in6.sin6_addr;
  2179. srcport = vxlan_pick_source_port(sc, m);
  2180. dstaddr = &fvxlsa->in6.sin6_addr;
  2181. dstport = fvxlsa->in6.sin6_port;
  2182. plen = m->m_pkthdr.len;
  2183. M_PREPEND(m, sizeof(struct ip6_hdr) + sizeof(struct vxlanudphdr),
  2184. M_NOWAIT);
  2185. if (m == NULL) {
  2186. if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  2187. return (ENOBUFS);
  2188. }
  2189. ip6 = mtod(m, struct ip6_hdr *);
  2190. ip6->ip6_flow = 0; /* BMV: Keep in forwarding entry? */
  2191. ip6->ip6_vfc = IPV6_VERSION;
  2192. ip6->ip6_plen = 0;
  2193. ip6->ip6_nxt = IPPROTO_UDP;
  2194. ip6->ip6_hlim = sc->vxl_ttl;
  2195. ip6->ip6_src = *srcaddr;
  2196. ip6->ip6_dst = *dstaddr;
  2197. vxlan_encap_header(sc, m, sizeof(struct ip6_hdr), srcport, dstport);
  2198. mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
  2199. m->m_flags &= ~(M_MCAST | M_BCAST);
  2200. ro = NULL;
  2201. m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
  2202. if (m->m_pkthdr.csum_flags != 0) {
  2203. /*
  2204. * HW checksum (L3 and/or L4) or TSO has been requested. Look
  2205. * up the ifnet for the outbound route and verify that the
  2206. * outbound ifnet can perform the requested operation on the
  2207. * inner frame.
  2208. */
  2209. bzero(&route, sizeof(route));
  2210. ro = &route;
  2211. sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
  2212. sin6->sin6_family = AF_INET6;
  2213. sin6->sin6_len = sizeof(*sin6);
  2214. sin6->sin6_addr = ip6->ip6_dst;
  2215. ro->ro_nh = fib6_lookup(M_GETFIB(m), &ip6->ip6_dst, 0,
  2216. NHR_NONE, 0);
  2217. if (ro->ro_nh == NULL) {
  2218. m_freem(m);
  2219. if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  2220. return (EHOSTUNREACH);
  2221. }
  2222. csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
  2223. CSUM_ENCAP_VXLAN);
  2224. if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
  2225. csum_flags) {
  2226. if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
  2227. const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
  2228. if_printf(ifp, "interface %s is missing hwcaps "
  2229. "0x%08x, csum_flags 0x%08x -> 0x%08x, "
  2230. "hwassist 0x%08x\n", nh_ifp->if_xname,
  2231. csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
  2232. m->m_pkthdr.csum_flags, csum_flags,
  2233. (uint32_t)nh_ifp->if_hwassist);
  2234. }
  2235. m_freem(m);
  2236. if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  2237. return (ENXIO);
  2238. }
  2239. m->m_pkthdr.csum_flags = csum_flags;
  2240. if (csum_flags &
  2241. (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
  2242. CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
  2243. counter_u64_add(sc->vxl_stats.txcsum, 1);
  2244. if (csum_flags & CSUM_INNER_TSO)
  2245. counter_u64_add(sc->vxl_stats.tso, 1);
  2246. }
  2247. } else if (ntohs(dstport) != V_zero_checksum_port) {
  2248. struct udphdr *hdr = mtodo(m, sizeof(struct ip6_hdr));
  2249. hdr->uh_sum = in6_cksum_pseudo(ip6,
  2250. m->m_pkthdr.len - sizeof(struct ip6_hdr), IPPROTO_UDP, 0);
  2251. m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
  2252. m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
  2253. }
  2254. error = ip6_output(m, NULL, ro, 0, sc->vxl_im6o, NULL, NULL);
  2255. if (error == 0) {
  2256. if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
  2257. if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
  2258. if (mcast != 0)
  2259. if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
  2260. } else
  2261. if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  2262. return (error);
  2263. #else
  2264. m_freem(m);
  2265. return (ENOTSUP);
  2266. #endif
  2267. }
  2268. #define MTAG_VXLAN_LOOP 0x7876706c /* vxlp */
  2269. static int
  2270. vxlan_transmit(struct ifnet *ifp, struct mbuf *m)
  2271. {
  2272. struct rm_priotracker tracker;
  2273. union vxlan_sockaddr vxlsa;
  2274. struct vxlan_softc *sc;
  2275. struct vxlan_ftable_entry *fe;
  2276. struct ifnet *mcifp;
  2277. struct ether_header *eh;
  2278. int ipv4, error;
  2279. sc = ifp->if_softc;
  2280. eh = mtod(m, struct ether_header *);
  2281. fe = NULL;
  2282. mcifp = NULL;
  2283. ETHER_BPF_MTAP(ifp, m);
  2284. VXLAN_RLOCK(sc, &tracker);
  2285. M_SETFIB(m, sc->vxl_fibnum);
  2286. if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
  2287. VXLAN_RUNLOCK(sc, &tracker);
  2288. m_freem(m);
  2289. return (ENETDOWN);
  2290. }
  2291. if (__predict_false(if_tunnel_check_nesting(ifp, m, MTAG_VXLAN_LOOP,
  2292. max_vxlan_nesting) != 0)) {
  2293. VXLAN_RUNLOCK(sc, &tracker);
  2294. m_freem(m);
  2295. if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  2296. return (ELOOP);
  2297. }
  2298. if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
  2299. fe = vxlan_ftable_entry_lookup(sc, eh->ether_dhost);
  2300. if (fe == NULL)
  2301. fe = &sc->vxl_default_fe;
  2302. vxlan_sockaddr_copy(&vxlsa, &fe->vxlfe_raddr.sa);
  2303. ipv4 = VXLAN_SOCKADDR_IS_IPV4(&vxlsa) != 0;
  2304. if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
  2305. mcifp = vxlan_multicast_if_ref(sc, ipv4);
  2306. VXLAN_ACQUIRE(sc);
  2307. VXLAN_RUNLOCK(sc, &tracker);
  2308. if (ipv4 != 0)
  2309. error = vxlan_encap4(sc, &vxlsa, m);
  2310. else
  2311. error = vxlan_encap6(sc, &vxlsa, m);
  2312. vxlan_release(sc);
  2313. if (mcifp != NULL)
  2314. if_rele(mcifp);
  2315. return (error);
  2316. }
  2317. static void
  2318. vxlan_qflush(struct ifnet *ifp __unused)
  2319. {
  2320. }
  2321. static bool
  2322. vxlan_rcv_udp_packet(struct mbuf *m, int offset, struct inpcb *inpcb,
  2323. const struct sockaddr *srcsa, void *xvso)
  2324. {
  2325. struct vxlan_socket *vso;
  2326. struct vxlan_header *vxh, vxlanhdr;
  2327. uint32_t vni;
  2328. int error __unused;
  2329. M_ASSERTPKTHDR(m);
  2330. vso = xvso;
  2331. offset += sizeof(struct udphdr);
  2332. if (m->m_pkthdr.len < offset + sizeof(struct vxlan_header))
  2333. goto out;
  2334. if (__predict_false(m->m_len < offset + sizeof(struct vxlan_header))) {
  2335. m_copydata(m, offset, sizeof(struct vxlan_header),
  2336. (caddr_t) &vxlanhdr);
  2337. vxh = &vxlanhdr;
  2338. } else
  2339. vxh = mtodo(m, offset);
  2340. /*
  2341. * Drop if there is a reserved bit set in either the flags or VNI
  2342. * fields of the header. This goes against the specification, but
  2343. * a bit set may indicate an unsupported new feature. This matches
  2344. * the behavior of the Linux implementation.
  2345. */
  2346. if (vxh->vxlh_flags != htonl(VXLAN_HDR_FLAGS_VALID_VNI) ||
  2347. vxh->vxlh_vni & ~VXLAN_VNI_MASK)
  2348. goto out;
  2349. vni = ntohl(vxh->vxlh_vni) >> VXLAN_HDR_VNI_SHIFT;
  2350. /* Adjust to the start of the inner Ethernet frame. */
  2351. m_adj_decap(m, offset + sizeof(struct vxlan_header));
  2352. error = vxlan_input(vso, vni, &m, srcsa);
  2353. MPASS(error != 0 || m == NULL);
  2354. out:
  2355. if (m != NULL)
  2356. m_freem(m);
  2357. return (true);
  2358. }
  2359. static int
  2360. vxlan_input(struct vxlan_socket *vso, uint32_t vni, struct mbuf **m0,
  2361. const struct sockaddr *sa)
  2362. {
  2363. struct vxlan_softc *sc;
  2364. struct ifnet *ifp;
  2365. struct mbuf *m;
  2366. struct ether_header *eh;
  2367. int error;
  2368. m = *m0;
  2369. if (m->m_pkthdr.len < ETHER_HDR_LEN)
  2370. return (EINVAL);
  2371. sc = vxlan_socket_lookup_softc(vso, vni);
  2372. if (sc == NULL)
  2373. return (ENOENT);
  2374. ifp = sc->vxl_ifp;
  2375. if (m->m_len < ETHER_HDR_LEN &&
  2376. (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
  2377. *m0 = NULL;
  2378. error = ENOBUFS;
  2379. goto out;
  2380. }
  2381. eh = mtod(m, struct ether_header *);
  2382. if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
  2383. error = ENETDOWN;
  2384. goto out;
  2385. } else if (ifp == m->m_pkthdr.rcvif) {
  2386. /* XXX Does not catch more complex loops. */
  2387. error = EDEADLK;
  2388. goto out;
  2389. }
  2390. if (sc->vxl_flags & VXLAN_FLAG_LEARN)
  2391. vxlan_ftable_learn(sc, sa, eh->ether_shost);
  2392. m_clrprotoflags(m);
  2393. m->m_pkthdr.rcvif = ifp;
  2394. M_SETFIB(m, ifp->if_fib);
  2395. if (((ifp->if_capenable & IFCAP_RXCSUM &&
  2396. m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC) ||
  2397. (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
  2398. !(m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)))) {
  2399. uint32_t csum_flags = 0;
  2400. if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)
  2401. csum_flags |= CSUM_L3_CALC;
  2402. if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_VALID)
  2403. csum_flags |= CSUM_L3_VALID;
  2404. if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_CALC)
  2405. csum_flags |= CSUM_L4_CALC;
  2406. if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_VALID)
  2407. csum_flags |= CSUM_L4_VALID;
  2408. m->m_pkthdr.csum_flags = csum_flags;
  2409. counter_u64_add(sc->vxl_stats.rxcsum, 1);
  2410. } else {
  2411. /* clear everything */
  2412. m->m_pkthdr.csum_flags = 0;
  2413. m->m_pkthdr.csum_data = 0;
  2414. }
  2415. if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
  2416. (*ifp->if_input)(ifp, m);
  2417. *m0 = NULL;
  2418. error = 0;
  2419. out:
  2420. vxlan_release(sc);
  2421. return (error);
  2422. }
  2423. static void
  2424. vxlan_stats_alloc(struct vxlan_softc *sc)
  2425. {
  2426. struct vxlan_statistics *stats = &sc->vxl_stats;
  2427. stats->txcsum = counter_u64_alloc(M_WAITOK);
  2428. stats->tso = counter_u64_alloc(M_WAITOK);
  2429. stats->rxcsum = counter_u64_alloc(M_WAITOK);
  2430. }
  2431. static void
  2432. vxlan_stats_free(struct vxlan_softc *sc)
  2433. {
  2434. struct vxlan_statistics *stats = &sc->vxl_stats;
  2435. counter_u64_free(stats->txcsum);
  2436. counter_u64_free(stats->tso);
  2437. counter_u64_free(stats->rxcsum);
  2438. }
  2439. static void
  2440. vxlan_set_default_config(struct vxlan_softc *sc)
  2441. {
  2442. sc->vxl_flags |= VXLAN_FLAG_LEARN;
  2443. sc->vxl_vni = VXLAN_VNI_MAX;
  2444. sc->vxl_ttl = IPDEFTTL;
  2445. if (!vxlan_tunable_int(sc, "legacy_port", vxlan_legacy_port)) {
  2446. sc->vxl_src_addr.in4.sin_port = htons(VXLAN_PORT);
  2447. sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_PORT);
  2448. } else {
  2449. sc->vxl_src_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
  2450. sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
  2451. }
  2452. sc->vxl_min_port = V_ipport_firstauto;
  2453. sc->vxl_max_port = V_ipport_lastauto;
  2454. sc->vxl_ftable_max = VXLAN_FTABLE_MAX;
  2455. sc->vxl_ftable_timeout = VXLAN_FTABLE_TIMEOUT;
  2456. }
  2457. static int
  2458. vxlan_set_user_config(struct vxlan_softc *sc, struct ifvxlanparam *vxlp)
  2459. {
  2460. #ifndef INET
  2461. if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR4 |
  2462. VXLAN_PARAM_WITH_REMOTE_ADDR4))
  2463. return (EAFNOSUPPORT);
  2464. #endif
  2465. #ifndef INET6
  2466. if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR6 |
  2467. VXLAN_PARAM_WITH_REMOTE_ADDR6))
  2468. return (EAFNOSUPPORT);
  2469. #else
  2470. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
  2471. int error = vxlan_sockaddr_in6_embedscope(&vxlp->vxlp_local_sa);
  2472. if (error)
  2473. return (error);
  2474. }
  2475. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
  2476. int error = vxlan_sockaddr_in6_embedscope(
  2477. &vxlp->vxlp_remote_sa);
  2478. if (error)
  2479. return (error);
  2480. }
  2481. #endif
  2482. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_VNI) {
  2483. if (vxlan_check_vni(vxlp->vxlp_vni) == 0)
  2484. sc->vxl_vni = vxlp->vxlp_vni;
  2485. }
  2486. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR4) {
  2487. sc->vxl_src_addr.in4.sin_len = sizeof(struct sockaddr_in);
  2488. sc->vxl_src_addr.in4.sin_family = AF_INET;
  2489. sc->vxl_src_addr.in4.sin_addr =
  2490. vxlp->vxlp_local_sa.in4.sin_addr;
  2491. } else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
  2492. sc->vxl_src_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
  2493. sc->vxl_src_addr.in6.sin6_family = AF_INET6;
  2494. sc->vxl_src_addr.in6.sin6_addr =
  2495. vxlp->vxlp_local_sa.in6.sin6_addr;
  2496. }
  2497. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR4) {
  2498. sc->vxl_dst_addr.in4.sin_len = sizeof(struct sockaddr_in);
  2499. sc->vxl_dst_addr.in4.sin_family = AF_INET;
  2500. sc->vxl_dst_addr.in4.sin_addr =
  2501. vxlp->vxlp_remote_sa.in4.sin_addr;
  2502. } else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
  2503. sc->vxl_dst_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
  2504. sc->vxl_dst_addr.in6.sin6_family = AF_INET6;
  2505. sc->vxl_dst_addr.in6.sin6_addr =
  2506. vxlp->vxlp_remote_sa.in6.sin6_addr;
  2507. }
  2508. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_PORT)
  2509. sc->vxl_src_addr.in4.sin_port = htons(vxlp->vxlp_local_port);
  2510. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_PORT)
  2511. sc->vxl_dst_addr.in4.sin_port = htons(vxlp->vxlp_remote_port);
  2512. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_PORT_RANGE) {
  2513. if (vxlp->vxlp_min_port <= vxlp->vxlp_max_port) {
  2514. sc->vxl_min_port = vxlp->vxlp_min_port;
  2515. sc->vxl_max_port = vxlp->vxlp_max_port;
  2516. }
  2517. }
  2518. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_MULTICAST_IF)
  2519. strlcpy(sc->vxl_mc_ifname, vxlp->vxlp_mc_ifname, IFNAMSIZ);
  2520. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_TIMEOUT) {
  2521. if (vxlan_check_ftable_timeout(vxlp->vxlp_ftable_timeout) == 0)
  2522. sc->vxl_ftable_timeout = vxlp->vxlp_ftable_timeout;
  2523. }
  2524. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_MAX) {
  2525. if (vxlan_check_ftable_max(vxlp->vxlp_ftable_max) == 0)
  2526. sc->vxl_ftable_max = vxlp->vxlp_ftable_max;
  2527. }
  2528. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_TTL) {
  2529. if (vxlan_check_ttl(vxlp->vxlp_ttl) == 0)
  2530. sc->vxl_ttl = vxlp->vxlp_ttl;
  2531. }
  2532. if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LEARN) {
  2533. if (vxlp->vxlp_learn == 0)
  2534. sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
  2535. }
  2536. return (0);
  2537. }
  2538. static int
  2539. vxlan_set_reqcap(struct vxlan_softc *sc, struct ifnet *ifp, int reqcap)
  2540. {
  2541. int mask = reqcap ^ ifp->if_capenable;
  2542. /* Disable TSO if tx checksums are disabled. */
  2543. if (mask & IFCAP_TXCSUM && !(reqcap & IFCAP_TXCSUM) &&
  2544. reqcap & IFCAP_TSO4) {
  2545. reqcap &= ~IFCAP_TSO4;
  2546. if_printf(ifp, "tso4 disabled due to -txcsum.\n");
  2547. }
  2548. if (mask & IFCAP_TXCSUM_IPV6 && !(reqcap & IFCAP_TXCSUM_IPV6) &&
  2549. reqcap & IFCAP_TSO6) {
  2550. reqcap &= ~IFCAP_TSO6;
  2551. if_printf(ifp, "tso6 disabled due to -txcsum6.\n");
  2552. }
  2553. /* Do not enable TSO if tx checksums are disabled. */
  2554. if (mask & IFCAP_TSO4 && reqcap & IFCAP_TSO4 &&
  2555. !(reqcap & IFCAP_TXCSUM)) {
  2556. if_printf(ifp, "enable txcsum first.\n");
  2557. return (EAGAIN);
  2558. }
  2559. if (mask & IFCAP_TSO6 && reqcap & IFCAP_TSO6 &&
  2560. !(reqcap & IFCAP_TXCSUM_IPV6)) {
  2561. if_printf(ifp, "enable txcsum6 first.\n");
  2562. return (EAGAIN);
  2563. }
  2564. sc->vxl_reqcap = reqcap;
  2565. return (0);
  2566. }
  2567. /*
  2568. * A VXLAN interface inherits the capabilities of the vxlandev or the interface
  2569. * hosting the vxlanlocal address.
  2570. */
  2571. static void
  2572. vxlan_set_hwcaps(struct vxlan_softc *sc)
  2573. {
  2574. struct epoch_tracker et;
  2575. struct ifnet *p;
  2576. struct ifaddr *ifa;
  2577. u_long hwa;
  2578. int cap, ena;
  2579. bool rel;
  2580. struct ifnet *ifp = sc->vxl_ifp;
  2581. /* reset caps */
  2582. ifp->if_capabilities &= VXLAN_BASIC_IFCAPS;
  2583. ifp->if_capenable &= VXLAN_BASIC_IFCAPS;
  2584. ifp->if_hwassist = 0;
  2585. NET_EPOCH_ENTER(et);
  2586. CURVNET_SET(ifp->if_vnet);
  2587. rel = false;
  2588. p = NULL;
  2589. if (sc->vxl_mc_ifname[0] != '\0') {
  2590. rel = true;
  2591. p = ifunit_ref(sc->vxl_mc_ifname);
  2592. } else if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
  2593. if (sc->vxl_src_addr.sa.sa_family == AF_INET) {
  2594. struct sockaddr_in in4 = sc->vxl_src_addr.in4;
  2595. in4.sin_port = 0;
  2596. ifa = ifa_ifwithaddr((struct sockaddr *)&in4);
  2597. if (ifa != NULL)
  2598. p = ifa->ifa_ifp;
  2599. } else if (sc->vxl_src_addr.sa.sa_family == AF_INET6) {
  2600. struct sockaddr_in6 in6 = sc->vxl_src_addr.in6;
  2601. in6.sin6_port = 0;
  2602. ifa = ifa_ifwithaddr((struct sockaddr *)&in6);
  2603. if (ifa != NULL)
  2604. p = ifa->ifa_ifp;
  2605. }
  2606. }
  2607. if (p == NULL)
  2608. goto done;
  2609. cap = ena = hwa = 0;
  2610. /* checksum offload */
  2611. if (p->if_capabilities & IFCAP_VXLAN_HWCSUM)
  2612. cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
  2613. if (p->if_capenable & IFCAP_VXLAN_HWCSUM) {
  2614. ena |= sc->vxl_reqcap & p->if_capenable &
  2615. (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
  2616. if (ena & IFCAP_TXCSUM) {
  2617. if (p->if_hwassist & CSUM_INNER_IP)
  2618. hwa |= CSUM_IP;
  2619. if (p->if_hwassist & CSUM_INNER_IP_UDP)
  2620. hwa |= CSUM_IP_UDP;
  2621. if (p->if_hwassist & CSUM_INNER_IP_TCP)
  2622. hwa |= CSUM_IP_TCP;
  2623. }
  2624. if (ena & IFCAP_TXCSUM_IPV6) {
  2625. if (p->if_hwassist & CSUM_INNER_IP6_UDP)
  2626. hwa |= CSUM_IP6_UDP;
  2627. if (p->if_hwassist & CSUM_INNER_IP6_TCP)
  2628. hwa |= CSUM_IP6_TCP;
  2629. }
  2630. }
  2631. /* hardware TSO */
  2632. if (p->if_capabilities & IFCAP_VXLAN_HWTSO) {
  2633. cap |= p->if_capabilities & IFCAP_TSO;
  2634. if (p->if_hw_tsomax > IP_MAXPACKET - ifp->if_hdrlen)
  2635. ifp->if_hw_tsomax = IP_MAXPACKET - ifp->if_hdrlen;
  2636. else
  2637. ifp->if_hw_tsomax = p->if_hw_tsomax;
  2638. /* XXX: tsomaxsegcount decrement is cxgbe specific */
  2639. ifp->if_hw_tsomaxsegcount = p->if_hw_tsomaxsegcount - 1;
  2640. ifp->if_hw_tsomaxsegsize = p->if_hw_tsomaxsegsize;
  2641. }
  2642. if (p->if_capenable & IFCAP_VXLAN_HWTSO) {
  2643. ena |= sc->vxl_reqcap & p->if_capenable & IFCAP_TSO;
  2644. if (ena & IFCAP_TSO) {
  2645. if (p->if_hwassist & CSUM_INNER_IP_TSO)
  2646. hwa |= CSUM_IP_TSO;
  2647. if (p->if_hwassist & CSUM_INNER_IP6_TSO)
  2648. hwa |= CSUM_IP6_TSO;
  2649. }
  2650. }
  2651. ifp->if_capabilities |= cap;
  2652. ifp->if_capenable |= ena;
  2653. ifp->if_hwassist |= hwa;
  2654. if (rel)
  2655. if_rele(p);
  2656. done:
  2657. CURVNET_RESTORE();
  2658. NET_EPOCH_EXIT(et);
  2659. }
  2660. static int
  2661. vxlan_clone_create(struct if_clone *ifc, char *name, size_t len,
  2662. struct ifc_data *ifd, struct ifnet **ifpp)
  2663. {
  2664. struct vxlan_softc *sc;
  2665. struct ifnet *ifp;
  2666. struct ifvxlanparam vxlp;
  2667. int error;
  2668. sc = malloc(sizeof(struct vxlan_softc), M_VXLAN, M_WAITOK | M_ZERO);
  2669. sc->vxl_unit = ifd->unit;
  2670. sc->vxl_fibnum = curthread->td_proc->p_fibnum;
  2671. vxlan_set_default_config(sc);
  2672. if (ifd->params != NULL) {
  2673. error = ifc_copyin(ifd, &vxlp, sizeof(vxlp));
  2674. if (error)
  2675. goto fail;
  2676. error = vxlan_set_user_config(sc, &vxlp);
  2677. if (error)
  2678. goto fail;
  2679. }
  2680. vxlan_stats_alloc(sc);
  2681. ifp = if_alloc(IFT_ETHER);
  2682. sc->vxl_ifp = ifp;
  2683. rm_init(&sc->vxl_lock, "vxlanrm");
  2684. callout_init_rw(&sc->vxl_callout, &sc->vxl_lock, 0);
  2685. sc->vxl_port_hash_key = arc4random();
  2686. vxlan_ftable_init(sc);
  2687. vxlan_sysctl_setup(sc);
  2688. ifp->if_softc = sc;
  2689. if_initname(ifp, vxlan_name, ifd->unit);
  2690. ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  2691. ifp->if_init = vxlan_init;
  2692. ifp->if_ioctl = vxlan_ioctl;
  2693. ifp->if_transmit = vxlan_transmit;
  2694. ifp->if_qflush = vxlan_qflush;
  2695. ifp->if_capabilities = VXLAN_BASIC_IFCAPS;
  2696. ifp->if_capenable = VXLAN_BASIC_IFCAPS;
  2697. sc->vxl_reqcap = -1;
  2698. vxlan_set_hwcaps(sc);
  2699. ifmedia_init(&sc->vxl_media, 0, vxlan_media_change, vxlan_media_status);
  2700. ifmedia_add(&sc->vxl_media, IFM_ETHER | IFM_AUTO, 0, NULL);
  2701. ifmedia_set(&sc->vxl_media, IFM_ETHER | IFM_AUTO);
  2702. ether_gen_addr(ifp, &sc->vxl_hwaddr);
  2703. ether_ifattach(ifp, sc->vxl_hwaddr.octet);
  2704. ifp->if_baudrate = 0;
  2705. VXLAN_WLOCK(sc);
  2706. vxlan_setup_interface_hdrlen(sc);
  2707. VXLAN_WUNLOCK(sc);
  2708. *ifpp = ifp;
  2709. return (0);
  2710. fail:
  2711. free(sc, M_VXLAN);
  2712. return (error);
  2713. }
  2714. static int
  2715. vxlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
  2716. {
  2717. struct vxlan_softc *sc;
  2718. sc = ifp->if_softc;
  2719. vxlan_teardown(sc);
  2720. vxlan_ftable_flush(sc, 1);
  2721. ether_ifdetach(ifp);
  2722. if_free(ifp);
  2723. ifmedia_removeall(&sc->vxl_media);
  2724. vxlan_ftable_fini(sc);
  2725. vxlan_sysctl_destroy(sc);
  2726. rm_destroy(&sc->vxl_lock);
  2727. vxlan_stats_free(sc);
  2728. free(sc, M_VXLAN);
  2729. return (0);
  2730. }
  2731. /* BMV: Taken from if_bridge. */
  2732. static uint32_t
  2733. vxlan_mac_hash(struct vxlan_softc *sc, const uint8_t *addr)
  2734. {
  2735. uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->vxl_ftable_hash_key;
  2736. b += addr[5] << 8;
  2737. b += addr[4];
  2738. a += addr[3] << 24;
  2739. a += addr[2] << 16;
  2740. a += addr[1] << 8;
  2741. a += addr[0];
  2742. /*
  2743. * The following hash function is adapted from "Hash Functions" by Bob Jenkins
  2744. * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
  2745. */
  2746. #define mix(a, b, c) \
  2747. do { \
  2748. a -= b; a -= c; a ^= (c >> 13); \
  2749. b -= c; b -= a; b ^= (a << 8); \
  2750. c -= a; c -= b; c ^= (b >> 13); \
  2751. a -= b; a -= c; a ^= (c >> 12); \
  2752. b -= c; b -= a; b ^= (a << 16); \
  2753. c -= a; c -= b; c ^= (b >> 5); \
  2754. a -= b; a -= c; a ^= (c >> 3); \
  2755. b -= c; b -= a; b ^= (a << 10); \
  2756. c -= a; c -= b; c ^= (b >> 15); \
  2757. } while (0)
  2758. mix(a, b, c);
  2759. #undef mix
  2760. return (c);
  2761. }
  2762. static int
  2763. vxlan_media_change(struct ifnet *ifp)
  2764. {
  2765. /* Ignore. */
  2766. return (0);
  2767. }
  2768. static void
  2769. vxlan_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
  2770. {
  2771. ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
  2772. ifmr->ifm_active = IFM_ETHER | IFM_FDX;
  2773. }
  2774. static int
  2775. vxlan_sockaddr_cmp(const union vxlan_sockaddr *vxladdr,
  2776. const struct sockaddr *sa)
  2777. {
  2778. return (bcmp(&vxladdr->sa, sa, vxladdr->sa.sa_len));
  2779. }
  2780. static void
  2781. vxlan_sockaddr_copy(union vxlan_sockaddr *vxladdr,
  2782. const struct sockaddr *sa)
  2783. {
  2784. MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
  2785. bzero(vxladdr, sizeof(*vxladdr));
  2786. if (sa->sa_family == AF_INET) {
  2787. vxladdr->in4 = *satoconstsin(sa);
  2788. vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
  2789. } else if (sa->sa_family == AF_INET6) {
  2790. vxladdr->in6 = *satoconstsin6(sa);
  2791. vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
  2792. }
  2793. }
  2794. static int
  2795. vxlan_sockaddr_in_equal(const union vxlan_sockaddr *vxladdr,
  2796. const struct sockaddr *sa)
  2797. {
  2798. int equal;
  2799. if (sa->sa_family == AF_INET) {
  2800. const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
  2801. equal = in4->s_addr == vxladdr->in4.sin_addr.s_addr;
  2802. } else if (sa->sa_family == AF_INET6) {
  2803. const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
  2804. equal = IN6_ARE_ADDR_EQUAL(in6, &vxladdr->in6.sin6_addr);
  2805. } else
  2806. equal = 0;
  2807. return (equal);
  2808. }
  2809. static void
  2810. vxlan_sockaddr_in_copy(union vxlan_sockaddr *vxladdr,
  2811. const struct sockaddr *sa)
  2812. {
  2813. MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
  2814. if (sa->sa_family == AF_INET) {
  2815. const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
  2816. vxladdr->in4.sin_family = AF_INET;
  2817. vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
  2818. vxladdr->in4.sin_addr = *in4;
  2819. } else if (sa->sa_family == AF_INET6) {
  2820. const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
  2821. vxladdr->in6.sin6_family = AF_INET6;
  2822. vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
  2823. vxladdr->in6.sin6_addr = *in6;
  2824. }
  2825. }
  2826. static int
  2827. vxlan_sockaddr_supported(const union vxlan_sockaddr *vxladdr, int unspec)
  2828. {
  2829. const struct sockaddr *sa;
  2830. int supported;
  2831. sa = &vxladdr->sa;
  2832. supported = 0;
  2833. if (sa->sa_family == AF_UNSPEC && unspec != 0) {
  2834. supported = 1;
  2835. } else if (sa->sa_family == AF_INET) {
  2836. #ifdef INET
  2837. supported = 1;
  2838. #endif
  2839. } else if (sa->sa_family == AF_INET6) {
  2840. #ifdef INET6
  2841. supported = 1;
  2842. #endif
  2843. }
  2844. return (supported);
  2845. }
  2846. static int
  2847. vxlan_sockaddr_in_any(const union vxlan_sockaddr *vxladdr)
  2848. {
  2849. const struct sockaddr *sa;
  2850. int any;
  2851. sa = &vxladdr->sa;
  2852. if (sa->sa_family == AF_INET) {
  2853. const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
  2854. any = in4->s_addr == INADDR_ANY;
  2855. } else if (sa->sa_family == AF_INET6) {
  2856. const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
  2857. any = IN6_IS_ADDR_UNSPECIFIED(in6);
  2858. } else
  2859. any = -1;
  2860. return (any);
  2861. }
  2862. static int
  2863. vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *vxladdr)
  2864. {
  2865. const struct sockaddr *sa;
  2866. int mc;
  2867. sa = &vxladdr->sa;
  2868. if (sa->sa_family == AF_INET) {
  2869. const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
  2870. mc = IN_MULTICAST(ntohl(in4->s_addr));
  2871. } else if (sa->sa_family == AF_INET6) {
  2872. const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
  2873. mc = IN6_IS_ADDR_MULTICAST(in6);
  2874. } else
  2875. mc = -1;
  2876. return (mc);
  2877. }
  2878. static int
  2879. vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *vxladdr)
  2880. {
  2881. int error;
  2882. MPASS(VXLAN_SOCKADDR_IS_IPV6(vxladdr));
  2883. #ifdef INET6
  2884. error = sa6_embedscope(&vxladdr->in6, V_ip6_use_defzone);
  2885. #else
  2886. error = EAFNOSUPPORT;
  2887. #endif
  2888. return (error);
  2889. }
  2890. static int
  2891. vxlan_can_change_config(struct vxlan_softc *sc)
  2892. {
  2893. struct ifnet *ifp;
  2894. ifp = sc->vxl_ifp;
  2895. VXLAN_LOCK_ASSERT(sc);
  2896. if (ifp->if_drv_flags & IFF_DRV_RUNNING)
  2897. return (0);
  2898. if (sc->vxl_flags & (VXLAN_FLAG_INIT | VXLAN_FLAG_TEARDOWN))
  2899. return (0);
  2900. return (1);
  2901. }
  2902. static int
  2903. vxlan_check_vni(uint32_t vni)
  2904. {
  2905. return (vni >= VXLAN_VNI_MAX);
  2906. }
  2907. static int
  2908. vxlan_check_ttl(int ttl)
  2909. {
  2910. return (ttl > MAXTTL);
  2911. }
  2912. static int
  2913. vxlan_check_ftable_timeout(uint32_t timeout)
  2914. {
  2915. return (timeout > VXLAN_FTABLE_MAX_TIMEOUT);
  2916. }
  2917. static int
  2918. vxlan_check_ftable_max(uint32_t max)
  2919. {
  2920. return (max > VXLAN_FTABLE_MAX);
  2921. }
  2922. static void
  2923. vxlan_sysctl_setup(struct vxlan_softc *sc)
  2924. {
  2925. struct sysctl_ctx_list *ctx;
  2926. struct sysctl_oid *node;
  2927. struct vxlan_statistics *stats;
  2928. char namebuf[8];
  2929. ctx = &sc->vxl_sysctl_ctx;
  2930. stats = &sc->vxl_stats;
  2931. snprintf(namebuf, sizeof(namebuf), "%d", sc->vxl_unit);
  2932. sysctl_ctx_init(ctx);
  2933. sc->vxl_sysctl_node = SYSCTL_ADD_NODE(ctx,
  2934. SYSCTL_STATIC_CHILDREN(_net_link_vxlan), OID_AUTO, namebuf,
  2935. CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
  2936. node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
  2937. OID_AUTO, "ftable", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
  2938. SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "count",
  2939. CTLFLAG_RD, &sc->vxl_ftable_cnt, 0,
  2940. "Number of entries in forwarding table");
  2941. SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "max",
  2942. CTLFLAG_RD, &sc->vxl_ftable_max, 0,
  2943. "Maximum number of entries allowed in forwarding table");
  2944. SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "timeout",
  2945. CTLFLAG_RD, &sc->vxl_ftable_timeout, 0,
  2946. "Number of seconds between prunes of the forwarding table");
  2947. SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "dump",
  2948. CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
  2949. sc, 0, vxlan_ftable_sysctl_dump, "A",
  2950. "Dump the forwarding table entries");
  2951. node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
  2952. OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
  2953. SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
  2954. "ftable_nospace", CTLFLAG_RD, &stats->ftable_nospace, 0,
  2955. "Fowarding table reached maximum entries");
  2956. SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
  2957. "ftable_lock_upgrade_failed", CTLFLAG_RD,
  2958. &stats->ftable_lock_upgrade_failed, 0,
  2959. "Forwarding table update required lock upgrade");
  2960. SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "txcsum",
  2961. CTLFLAG_RD, &stats->txcsum,
  2962. "# of times hardware assisted with tx checksum");
  2963. SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "tso",
  2964. CTLFLAG_RD, &stats->tso, "# of times hardware assisted with TSO");
  2965. SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "rxcsum",
  2966. CTLFLAG_RD, &stats->rxcsum,
  2967. "# of times hardware assisted with rx checksum");
  2968. }
  2969. static void
  2970. vxlan_sysctl_destroy(struct vxlan_softc *sc)
  2971. {
  2972. sysctl_ctx_free(&sc->vxl_sysctl_ctx);
  2973. sc->vxl_sysctl_node = NULL;
  2974. }
  2975. static int
  2976. vxlan_tunable_int(struct vxlan_softc *sc, const char *knob, int def)
  2977. {
  2978. char path[64];
  2979. snprintf(path, sizeof(path), "net.link.vxlan.%d.%s",
  2980. sc->vxl_unit, knob);
  2981. TUNABLE_INT_FETCH(path, &def);
  2982. return (def);
  2983. }
  2984. static void
  2985. vxlan_ifdetach_event(void *arg __unused, struct ifnet *ifp)
  2986. {
  2987. struct vxlan_softc_head list;
  2988. struct vxlan_socket *vso;
  2989. struct vxlan_softc *sc, *tsc;
  2990. LIST_INIT(&list);
  2991. if (ifp->if_flags & IFF_RENAMING)
  2992. return;
  2993. if ((ifp->if_flags & IFF_MULTICAST) == 0)
  2994. return;
  2995. VXLAN_LIST_LOCK();
  2996. LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry)
  2997. vxlan_socket_ifdetach(vso, ifp, &list);
  2998. VXLAN_LIST_UNLOCK();
  2999. LIST_FOREACH_SAFE(sc, &list, vxl_ifdetach_list, tsc) {
  3000. LIST_REMOVE(sc, vxl_ifdetach_list);
  3001. sx_xlock(&vxlan_sx);
  3002. VXLAN_WLOCK(sc);
  3003. if (sc->vxl_flags & VXLAN_FLAG_INIT)
  3004. vxlan_init_wait(sc);
  3005. vxlan_teardown_locked(sc);
  3006. sx_xunlock(&vxlan_sx);
  3007. }
  3008. }
  3009. static void
  3010. vxlan_load(void)
  3011. {
  3012. mtx_init(&vxlan_list_mtx, "vxlan list", NULL, MTX_DEF);
  3013. LIST_INIT(&vxlan_socket_list);
  3014. vxlan_ifdetach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
  3015. vxlan_ifdetach_event, NULL, EVENTHANDLER_PRI_ANY);
  3016. struct if_clone_addreq req = {
  3017. .create_f = vxlan_clone_create,
  3018. .destroy_f = vxlan_clone_destroy,
  3019. .flags = IFC_F_AUTOUNIT,
  3020. };
  3021. vxlan_cloner = ifc_attach_cloner(vxlan_name, &req);
  3022. }
  3023. static void
  3024. vxlan_unload(void)
  3025. {
  3026. EVENTHANDLER_DEREGISTER(ifnet_departure_event,
  3027. vxlan_ifdetach_event_tag);
  3028. ifc_detach_cloner(vxlan_cloner);
  3029. mtx_destroy(&vxlan_list_mtx);
  3030. MPASS(LIST_EMPTY(&vxlan_socket_list));
  3031. }
  3032. static int
  3033. vxlan_modevent(module_t mod, int type, void *unused)
  3034. {
  3035. int error;
  3036. error = 0;
  3037. switch (type) {
  3038. case MOD_LOAD:
  3039. vxlan_load();
  3040. break;
  3041. case MOD_UNLOAD:
  3042. vxlan_unload();
  3043. break;
  3044. default:
  3045. error = ENOTSUP;
  3046. break;
  3047. }
  3048. return (error);
  3049. }
  3050. static moduledata_t vxlan_mod = {
  3051. "if_vxlan",
  3052. vxlan_modevent,
  3053. 0
  3054. };
  3055. DECLARE_MODULE(if_vxlan, vxlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
  3056. MODULE_VERSION(if_vxlan, 1);