tun.c 82 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553
  1. /*
  2. * TUN - Universal TUN/TAP device driver.
  3. * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
  16. */
  17. /*
  18. * Changes:
  19. *
  20. * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  21. * Add TUNSETLINK ioctl to set the link encapsulation
  22. *
  23. * Mark Smith <markzzzsmith@yahoo.com.au>
  24. * Use eth_random_addr() for tap MAC address.
  25. *
  26. * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
  27. * Fixes in packet dropping, queue length setting and queue wakeup.
  28. * Increased default tx queue length.
  29. * Added ethtool API.
  30. * Minor cleanups
  31. *
  32. * Daniel Podlejski <underley@underley.eu.org>
  33. * Modifications for 2.3.99-pre5 kernel.
  34. */
  35. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36. #define DRV_NAME "tun"
  37. #define DRV_VERSION "1.6"
  38. #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
  39. #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
  40. #include <linux/module.h>
  41. #include <linux/errno.h>
  42. #include <linux/kernel.h>
  43. #include <linux/sched/signal.h>
  44. #include <linux/major.h>
  45. #include <linux/slab.h>
  46. #include <linux/poll.h>
  47. #include <linux/fcntl.h>
  48. #include <linux/init.h>
  49. #include <linux/skbuff.h>
  50. #include <linux/netdevice.h>
  51. #include <linux/etherdevice.h>
  52. #include <linux/miscdevice.h>
  53. #include <linux/ethtool.h>
  54. #include <linux/rtnetlink.h>
  55. #include <linux/compat.h>
  56. #include <linux/if.h>
  57. #include <linux/if_arp.h>
  58. #include <linux/if_ether.h>
  59. #include <linux/if_tun.h>
  60. #include <linux/if_vlan.h>
  61. #include <linux/crc32.h>
  62. #include <linux/nsproxy.h>
  63. #include <linux/virtio_net.h>
  64. #include <linux/rcupdate.h>
  65. #include <net/net_namespace.h>
  66. #include <net/netns/generic.h>
  67. #include <net/rtnetlink.h>
  68. #include <net/sock.h>
  69. #include <net/xdp.h>
  70. #include <linux/seq_file.h>
  71. #include <linux/uio.h>
  72. #include <linux/skb_array.h>
  73. #include <linux/bpf.h>
  74. #include <linux/bpf_trace.h>
  75. #include <linux/mutex.h>
  76. #include <linux/uaccess.h>
  77. #include <linux/proc_fs.h>
  78. static void tun_default_link_ksettings(struct net_device *dev,
  79. struct ethtool_link_ksettings *cmd);
  80. /* Uncomment to enable debugging */
  81. /* #define TUN_DEBUG 1 */
  82. #ifdef TUN_DEBUG
  83. static int debug;
  84. #define tun_debug(level, tun, fmt, args...) \
  85. do { \
  86. if (tun->debug) \
  87. netdev_printk(level, tun->dev, fmt, ##args); \
  88. } while (0)
  89. #define DBG1(level, fmt, args...) \
  90. do { \
  91. if (debug == 2) \
  92. printk(level fmt, ##args); \
  93. } while (0)
  94. #else
  95. #define tun_debug(level, tun, fmt, args...) \
  96. do { \
  97. if (0) \
  98. netdev_printk(level, tun->dev, fmt, ##args); \
  99. } while (0)
  100. #define DBG1(level, fmt, args...) \
  101. do { \
  102. if (0) \
  103. printk(level fmt, ##args); \
  104. } while (0)
  105. #endif
  106. #define TUN_HEADROOM 256
  107. #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
  108. /* TUN device flags */
  109. /* IFF_ATTACH_QUEUE is never stored in device flags,
  110. * overload it to mean fasync when stored there.
  111. */
  112. #define TUN_FASYNC IFF_ATTACH_QUEUE
  113. /* High bits in flags field are unused. */
  114. #define TUN_VNET_LE 0x80000000
  115. #define TUN_VNET_BE 0x40000000
  116. #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
  117. IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
  118. #define GOODCOPY_LEN 128
  119. #define FLT_EXACT_COUNT 8
  120. struct tap_filter {
  121. unsigned int count; /* Number of addrs. Zero means disabled */
  122. u32 mask[2]; /* Mask of the hashed addrs */
  123. unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
  124. };
  125. /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
  126. * to max number of VCPUs in guest. */
  127. #define MAX_TAP_QUEUES 256
  128. #define MAX_TAP_FLOWS 4096
  129. #define TUN_FLOW_EXPIRE (3 * HZ)
  130. struct tun_pcpu_stats {
  131. u64 rx_packets;
  132. u64 rx_bytes;
  133. u64 tx_packets;
  134. u64 tx_bytes;
  135. struct u64_stats_sync syncp;
  136. u32 rx_dropped;
  137. u32 tx_dropped;
  138. u32 rx_frame_errors;
  139. };
  140. /* A tun_file connects an open character device to a tuntap netdevice. It
  141. * also contains all socket related structures (except sock_fprog and tap_filter)
  142. * to serve as one transmit queue for tuntap device. The sock_fprog and
  143. * tap_filter were kept in tun_struct since they were used for filtering for the
  144. * netdevice not for a specific queue (at least I didn't see the requirement for
  145. * this).
  146. *
  147. * RCU usage:
  148. * The tun_file and tun_struct are loosely coupled, the pointer from one to the
  149. * other can only be read while rcu_read_lock or rtnl_lock is held.
  150. */
  151. struct tun_file {
  152. struct sock sk;
  153. struct socket socket;
  154. struct socket_wq wq;
  155. struct tun_struct __rcu *tun;
  156. struct fasync_struct *fasync;
  157. /* only used for fasnyc */
  158. unsigned int flags;
  159. union {
  160. u16 queue_index;
  161. unsigned int ifindex;
  162. };
  163. struct napi_struct napi;
  164. bool napi_enabled;
  165. bool napi_frags_enabled;
  166. struct mutex napi_mutex; /* Protects access to the above napi */
  167. struct list_head next;
  168. struct tun_struct *detached;
  169. struct ptr_ring tx_ring;
  170. struct xdp_rxq_info xdp_rxq;
  171. };
  172. struct tun_flow_entry {
  173. struct hlist_node hash_link;
  174. struct rcu_head rcu;
  175. struct tun_struct *tun;
  176. u32 rxhash;
  177. u32 rps_rxhash;
  178. int queue_index;
  179. unsigned long updated;
  180. };
  181. #define TUN_NUM_FLOW_ENTRIES 1024
  182. #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
  183. struct tun_prog {
  184. struct rcu_head rcu;
  185. struct bpf_prog *prog;
  186. };
  187. /* Since the socket were moved to tun_file, to preserve the behavior of persist
  188. * device, socket filter, sndbuf and vnet header size were restore when the
  189. * file were attached to a persist device.
  190. */
  191. struct tun_struct {
  192. struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
  193. unsigned int numqueues;
  194. unsigned int flags;
  195. kuid_t owner;
  196. kgid_t group;
  197. struct net_device *dev;
  198. netdev_features_t set_features;
  199. #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
  200. NETIF_F_TSO6)
  201. int align;
  202. int vnet_hdr_sz;
  203. int sndbuf;
  204. struct tap_filter txflt;
  205. struct sock_fprog fprog;
  206. /* protected by rtnl lock */
  207. bool filter_attached;
  208. #ifdef TUN_DEBUG
  209. int debug;
  210. #endif
  211. spinlock_t lock;
  212. struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
  213. struct timer_list flow_gc_timer;
  214. unsigned long ageing_time;
  215. unsigned int numdisabled;
  216. struct list_head disabled;
  217. void *security;
  218. u32 flow_count;
  219. u32 rx_batched;
  220. struct tun_pcpu_stats __percpu *pcpu_stats;
  221. struct bpf_prog __rcu *xdp_prog;
  222. struct tun_prog __rcu *steering_prog;
  223. struct tun_prog __rcu *filter_prog;
  224. struct ethtool_link_ksettings link_ksettings;
  225. };
  226. struct veth {
  227. __be16 h_vlan_proto;
  228. __be16 h_vlan_TCI;
  229. };
  230. bool tun_is_xdp_frame(void *ptr)
  231. {
  232. return (unsigned long)ptr & TUN_XDP_FLAG;
  233. }
  234. EXPORT_SYMBOL(tun_is_xdp_frame);
  235. void *tun_xdp_to_ptr(void *ptr)
  236. {
  237. return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
  238. }
  239. EXPORT_SYMBOL(tun_xdp_to_ptr);
  240. void *tun_ptr_to_xdp(void *ptr)
  241. {
  242. return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
  243. }
  244. EXPORT_SYMBOL(tun_ptr_to_xdp);
  245. static int tun_napi_receive(struct napi_struct *napi, int budget)
  246. {
  247. struct tun_file *tfile = container_of(napi, struct tun_file, napi);
  248. struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
  249. struct sk_buff_head process_queue;
  250. struct sk_buff *skb;
  251. int received = 0;
  252. __skb_queue_head_init(&process_queue);
  253. spin_lock(&queue->lock);
  254. skb_queue_splice_tail_init(queue, &process_queue);
  255. spin_unlock(&queue->lock);
  256. while (received < budget && (skb = __skb_dequeue(&process_queue))) {
  257. napi_gro_receive(napi, skb);
  258. ++received;
  259. }
  260. if (!skb_queue_empty(&process_queue)) {
  261. spin_lock(&queue->lock);
  262. skb_queue_splice(&process_queue, queue);
  263. spin_unlock(&queue->lock);
  264. }
  265. return received;
  266. }
  267. static int tun_napi_poll(struct napi_struct *napi, int budget)
  268. {
  269. unsigned int received;
  270. received = tun_napi_receive(napi, budget);
  271. if (received < budget)
  272. napi_complete_done(napi, received);
  273. return received;
  274. }
  275. static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
  276. bool napi_en, bool napi_frags)
  277. {
  278. tfile->napi_enabled = napi_en;
  279. tfile->napi_frags_enabled = napi_en && napi_frags;
  280. if (napi_en) {
  281. netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
  282. NAPI_POLL_WEIGHT);
  283. napi_enable(&tfile->napi);
  284. }
  285. }
  286. static void tun_napi_disable(struct tun_file *tfile)
  287. {
  288. if (tfile->napi_enabled)
  289. napi_disable(&tfile->napi);
  290. }
  291. static void tun_napi_del(struct tun_file *tfile)
  292. {
  293. if (tfile->napi_enabled)
  294. netif_napi_del(&tfile->napi);
  295. }
  296. static bool tun_napi_frags_enabled(const struct tun_file *tfile)
  297. {
  298. return tfile->napi_frags_enabled;
  299. }
  300. #ifdef CONFIG_TUN_VNET_CROSS_LE
  301. static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
  302. {
  303. return tun->flags & TUN_VNET_BE ? false :
  304. virtio_legacy_is_little_endian();
  305. }
  306. static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
  307. {
  308. int be = !!(tun->flags & TUN_VNET_BE);
  309. if (put_user(be, argp))
  310. return -EFAULT;
  311. return 0;
  312. }
  313. static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
  314. {
  315. int be;
  316. if (get_user(be, argp))
  317. return -EFAULT;
  318. if (be)
  319. tun->flags |= TUN_VNET_BE;
  320. else
  321. tun->flags &= ~TUN_VNET_BE;
  322. return 0;
  323. }
  324. #else
  325. static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
  326. {
  327. return virtio_legacy_is_little_endian();
  328. }
  329. static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
  330. {
  331. return -EINVAL;
  332. }
  333. static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
  334. {
  335. return -EINVAL;
  336. }
  337. #endif /* CONFIG_TUN_VNET_CROSS_LE */
  338. static inline bool tun_is_little_endian(struct tun_struct *tun)
  339. {
  340. return tun->flags & TUN_VNET_LE ||
  341. tun_legacy_is_little_endian(tun);
  342. }
  343. static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
  344. {
  345. return __virtio16_to_cpu(tun_is_little_endian(tun), val);
  346. }
  347. static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
  348. {
  349. return __cpu_to_virtio16(tun_is_little_endian(tun), val);
  350. }
  351. static inline u32 tun_hashfn(u32 rxhash)
  352. {
  353. return rxhash & TUN_MASK_FLOW_ENTRIES;
  354. }
  355. static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
  356. {
  357. struct tun_flow_entry *e;
  358. hlist_for_each_entry_rcu(e, head, hash_link) {
  359. if (e->rxhash == rxhash)
  360. return e;
  361. }
  362. return NULL;
  363. }
  364. static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
  365. struct hlist_head *head,
  366. u32 rxhash, u16 queue_index)
  367. {
  368. struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
  369. if (e) {
  370. tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
  371. rxhash, queue_index);
  372. e->updated = jiffies;
  373. e->rxhash = rxhash;
  374. e->rps_rxhash = 0;
  375. e->queue_index = queue_index;
  376. e->tun = tun;
  377. hlist_add_head_rcu(&e->hash_link, head);
  378. ++tun->flow_count;
  379. }
  380. return e;
  381. }
  382. static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
  383. {
  384. tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
  385. e->rxhash, e->queue_index);
  386. hlist_del_rcu(&e->hash_link);
  387. kfree_rcu(e, rcu);
  388. --tun->flow_count;
  389. }
  390. static void tun_flow_flush(struct tun_struct *tun)
  391. {
  392. int i;
  393. spin_lock_bh(&tun->lock);
  394. for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
  395. struct tun_flow_entry *e;
  396. struct hlist_node *n;
  397. hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
  398. tun_flow_delete(tun, e);
  399. }
  400. spin_unlock_bh(&tun->lock);
  401. }
  402. static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
  403. {
  404. int i;
  405. spin_lock_bh(&tun->lock);
  406. for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
  407. struct tun_flow_entry *e;
  408. struct hlist_node *n;
  409. hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
  410. if (e->queue_index == queue_index)
  411. tun_flow_delete(tun, e);
  412. }
  413. }
  414. spin_unlock_bh(&tun->lock);
  415. }
  416. static void tun_flow_cleanup(struct timer_list *t)
  417. {
  418. struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
  419. unsigned long delay = tun->ageing_time;
  420. unsigned long next_timer = jiffies + delay;
  421. unsigned long count = 0;
  422. int i;
  423. tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
  424. spin_lock(&tun->lock);
  425. for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
  426. struct tun_flow_entry *e;
  427. struct hlist_node *n;
  428. hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
  429. unsigned long this_timer;
  430. this_timer = e->updated + delay;
  431. if (time_before_eq(this_timer, jiffies)) {
  432. tun_flow_delete(tun, e);
  433. continue;
  434. }
  435. count++;
  436. if (time_before(this_timer, next_timer))
  437. next_timer = this_timer;
  438. }
  439. }
  440. if (count)
  441. mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
  442. spin_unlock(&tun->lock);
  443. }
  444. static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
  445. struct tun_file *tfile)
  446. {
  447. struct hlist_head *head;
  448. struct tun_flow_entry *e;
  449. unsigned long delay = tun->ageing_time;
  450. u16 queue_index = tfile->queue_index;
  451. if (!rxhash)
  452. return;
  453. else
  454. head = &tun->flows[tun_hashfn(rxhash)];
  455. rcu_read_lock();
  456. e = tun_flow_find(head, rxhash);
  457. if (likely(e)) {
  458. /* TODO: keep queueing to old queue until it's empty? */
  459. e->queue_index = queue_index;
  460. e->updated = jiffies;
  461. sock_rps_record_flow_hash(e->rps_rxhash);
  462. } else {
  463. spin_lock_bh(&tun->lock);
  464. if (!tun_flow_find(head, rxhash) &&
  465. tun->flow_count < MAX_TAP_FLOWS)
  466. tun_flow_create(tun, head, rxhash, queue_index);
  467. if (!timer_pending(&tun->flow_gc_timer))
  468. mod_timer(&tun->flow_gc_timer,
  469. round_jiffies_up(jiffies + delay));
  470. spin_unlock_bh(&tun->lock);
  471. }
  472. rcu_read_unlock();
  473. }
  474. /**
  475. * Save the hash received in the stack receive path and update the
  476. * flow_hash table accordingly.
  477. */
  478. static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
  479. {
  480. if (unlikely(e->rps_rxhash != hash))
  481. e->rps_rxhash = hash;
  482. }
  483. /* We try to identify a flow through its rxhash first. The reason that
  484. * we do not check rxq no. is because some cards(e.g 82599), chooses
  485. * the rxq based on the txq where the last packet of the flow comes. As
  486. * the userspace application move between processors, we may get a
  487. * different rxq no. here. If we could not get rxhash, then we would
  488. * hope the rxq no. may help here.
  489. */
  490. static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
  491. {
  492. struct tun_flow_entry *e;
  493. u32 txq = 0;
  494. u32 numqueues = 0;
  495. numqueues = READ_ONCE(tun->numqueues);
  496. txq = __skb_get_hash_symmetric(skb);
  497. if (txq) {
  498. e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
  499. if (e) {
  500. tun_flow_save_rps_rxhash(e, txq);
  501. txq = e->queue_index;
  502. } else
  503. /* use multiply and shift instead of expensive divide */
  504. txq = ((u64)txq * numqueues) >> 32;
  505. } else if (likely(skb_rx_queue_recorded(skb))) {
  506. txq = skb_get_rx_queue(skb);
  507. while (unlikely(txq >= numqueues))
  508. txq -= numqueues;
  509. }
  510. return txq;
  511. }
  512. static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
  513. {
  514. struct tun_prog *prog;
  515. u32 numqueues;
  516. u16 ret = 0;
  517. numqueues = READ_ONCE(tun->numqueues);
  518. if (!numqueues)
  519. return 0;
  520. prog = rcu_dereference(tun->steering_prog);
  521. if (prog)
  522. ret = bpf_prog_run_clear_cb(prog->prog, skb);
  523. return ret % numqueues;
  524. }
  525. static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
  526. struct net_device *sb_dev,
  527. select_queue_fallback_t fallback)
  528. {
  529. struct tun_struct *tun = netdev_priv(dev);
  530. u16 ret;
  531. rcu_read_lock();
  532. if (rcu_dereference(tun->steering_prog))
  533. ret = tun_ebpf_select_queue(tun, skb);
  534. else
  535. ret = tun_automq_select_queue(tun, skb);
  536. rcu_read_unlock();
  537. return ret;
  538. }
  539. static inline bool tun_not_capable(struct tun_struct *tun)
  540. {
  541. const struct cred *cred = current_cred();
  542. struct net *net = dev_net(tun->dev);
  543. return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
  544. (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
  545. !ns_capable(net->user_ns, CAP_NET_ADMIN);
  546. }
  547. static void tun_set_real_num_queues(struct tun_struct *tun)
  548. {
  549. netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
  550. netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
  551. }
  552. static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
  553. {
  554. tfile->detached = tun;
  555. list_add_tail(&tfile->next, &tun->disabled);
  556. ++tun->numdisabled;
  557. }
  558. static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
  559. {
  560. struct tun_struct *tun = tfile->detached;
  561. tfile->detached = NULL;
  562. list_del_init(&tfile->next);
  563. --tun->numdisabled;
  564. return tun;
  565. }
  566. void tun_ptr_free(void *ptr)
  567. {
  568. if (!ptr)
  569. return;
  570. if (tun_is_xdp_frame(ptr)) {
  571. struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
  572. xdp_return_frame(xdpf);
  573. } else {
  574. __skb_array_destroy_skb(ptr);
  575. }
  576. }
  577. EXPORT_SYMBOL_GPL(tun_ptr_free);
  578. static void tun_queue_purge(struct tun_file *tfile)
  579. {
  580. void *ptr;
  581. while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
  582. tun_ptr_free(ptr);
  583. skb_queue_purge(&tfile->sk.sk_write_queue);
  584. skb_queue_purge(&tfile->sk.sk_error_queue);
  585. }
  586. static void __tun_detach(struct tun_file *tfile, bool clean)
  587. {
  588. struct tun_file *ntfile;
  589. struct tun_struct *tun;
  590. tun = rtnl_dereference(tfile->tun);
  591. if (tun && clean) {
  592. tun_napi_disable(tfile);
  593. tun_napi_del(tfile);
  594. }
  595. if (tun && !tfile->detached) {
  596. u16 index = tfile->queue_index;
  597. BUG_ON(index >= tun->numqueues);
  598. rcu_assign_pointer(tun->tfiles[index],
  599. tun->tfiles[tun->numqueues - 1]);
  600. ntfile = rtnl_dereference(tun->tfiles[index]);
  601. ntfile->queue_index = index;
  602. rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
  603. NULL);
  604. --tun->numqueues;
  605. if (clean) {
  606. RCU_INIT_POINTER(tfile->tun, NULL);
  607. sock_put(&tfile->sk);
  608. } else
  609. tun_disable_queue(tun, tfile);
  610. synchronize_net();
  611. tun_flow_delete_by_queue(tun, tun->numqueues + 1);
  612. /* Drop read queue */
  613. tun_queue_purge(tfile);
  614. tun_set_real_num_queues(tun);
  615. } else if (tfile->detached && clean) {
  616. tun = tun_enable_queue(tfile);
  617. sock_put(&tfile->sk);
  618. }
  619. if (clean) {
  620. if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
  621. netif_carrier_off(tun->dev);
  622. if (!(tun->flags & IFF_PERSIST) &&
  623. tun->dev->reg_state == NETREG_REGISTERED)
  624. unregister_netdevice(tun->dev);
  625. }
  626. if (tun)
  627. xdp_rxq_info_unreg(&tfile->xdp_rxq);
  628. ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
  629. sock_put(&tfile->sk);
  630. }
  631. }
  632. static void tun_detach(struct tun_file *tfile, bool clean)
  633. {
  634. struct tun_struct *tun;
  635. struct net_device *dev;
  636. rtnl_lock();
  637. tun = rtnl_dereference(tfile->tun);
  638. dev = tun ? tun->dev : NULL;
  639. __tun_detach(tfile, clean);
  640. if (dev)
  641. netdev_state_change(dev);
  642. rtnl_unlock();
  643. }
  644. static void tun_detach_all(struct net_device *dev)
  645. {
  646. struct tun_struct *tun = netdev_priv(dev);
  647. struct tun_file *tfile, *tmp;
  648. int i, n = tun->numqueues;
  649. for (i = 0; i < n; i++) {
  650. tfile = rtnl_dereference(tun->tfiles[i]);
  651. BUG_ON(!tfile);
  652. tun_napi_disable(tfile);
  653. tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
  654. tfile->socket.sk->sk_data_ready(tfile->socket.sk);
  655. RCU_INIT_POINTER(tfile->tun, NULL);
  656. --tun->numqueues;
  657. }
  658. list_for_each_entry(tfile, &tun->disabled, next) {
  659. tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
  660. tfile->socket.sk->sk_data_ready(tfile->socket.sk);
  661. RCU_INIT_POINTER(tfile->tun, NULL);
  662. }
  663. BUG_ON(tun->numqueues != 0);
  664. synchronize_net();
  665. for (i = 0; i < n; i++) {
  666. tfile = rtnl_dereference(tun->tfiles[i]);
  667. tun_napi_del(tfile);
  668. /* Drop read queue */
  669. tun_queue_purge(tfile);
  670. xdp_rxq_info_unreg(&tfile->xdp_rxq);
  671. sock_put(&tfile->sk);
  672. }
  673. list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
  674. tun_enable_queue(tfile);
  675. tun_queue_purge(tfile);
  676. xdp_rxq_info_unreg(&tfile->xdp_rxq);
  677. sock_put(&tfile->sk);
  678. }
  679. BUG_ON(tun->numdisabled != 0);
  680. if (tun->flags & IFF_PERSIST)
  681. module_put(THIS_MODULE);
  682. }
  683. static int tun_attach(struct tun_struct *tun, struct file *file,
  684. bool skip_filter, bool napi, bool napi_frags,
  685. bool publish_tun)
  686. {
  687. struct tun_file *tfile = file->private_data;
  688. struct net_device *dev = tun->dev;
  689. int err;
  690. err = security_tun_dev_attach(tfile->socket.sk, tun->security);
  691. if (err < 0)
  692. goto out;
  693. err = -EINVAL;
  694. if (rtnl_dereference(tfile->tun) && !tfile->detached)
  695. goto out;
  696. err = -EBUSY;
  697. if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
  698. goto out;
  699. err = -E2BIG;
  700. if (!tfile->detached &&
  701. tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
  702. goto out;
  703. err = 0;
  704. /* Re-attach the filter to persist device */
  705. if (!skip_filter && (tun->filter_attached == true)) {
  706. lock_sock(tfile->socket.sk);
  707. err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
  708. release_sock(tfile->socket.sk);
  709. if (!err)
  710. goto out;
  711. }
  712. if (!tfile->detached &&
  713. ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
  714. GFP_KERNEL, tun_ptr_free)) {
  715. err = -ENOMEM;
  716. goto out;
  717. }
  718. tfile->queue_index = tun->numqueues;
  719. tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
  720. if (tfile->detached) {
  721. /* Re-attach detached tfile, updating XDP queue_index */
  722. WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
  723. if (tfile->xdp_rxq.queue_index != tfile->queue_index)
  724. tfile->xdp_rxq.queue_index = tfile->queue_index;
  725. } else {
  726. /* Setup XDP RX-queue info, for new tfile getting attached */
  727. err = xdp_rxq_info_reg(&tfile->xdp_rxq,
  728. tun->dev, tfile->queue_index);
  729. if (err < 0)
  730. goto out;
  731. err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
  732. MEM_TYPE_PAGE_SHARED, NULL);
  733. if (err < 0) {
  734. xdp_rxq_info_unreg(&tfile->xdp_rxq);
  735. goto out;
  736. }
  737. err = 0;
  738. }
  739. if (tfile->detached) {
  740. tun_enable_queue(tfile);
  741. } else {
  742. sock_hold(&tfile->sk);
  743. tun_napi_init(tun, tfile, napi, napi_frags);
  744. }
  745. /* device is allowed to go away first, so no need to hold extra
  746. * refcnt.
  747. */
  748. /* Publish tfile->tun and tun->tfiles only after we've fully
  749. * initialized tfile; otherwise we risk using half-initialized
  750. * object.
  751. */
  752. if (publish_tun)
  753. rcu_assign_pointer(tfile->tun, tun);
  754. rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
  755. tun->numqueues++;
  756. tun_set_real_num_queues(tun);
  757. out:
  758. return err;
  759. }
  760. static struct tun_struct *tun_get(struct tun_file *tfile)
  761. {
  762. struct tun_struct *tun;
  763. rcu_read_lock();
  764. tun = rcu_dereference(tfile->tun);
  765. if (tun)
  766. dev_hold(tun->dev);
  767. rcu_read_unlock();
  768. return tun;
  769. }
  770. static void tun_put(struct tun_struct *tun)
  771. {
  772. dev_put(tun->dev);
  773. }
  774. /* TAP filtering */
  775. static void addr_hash_set(u32 *mask, const u8 *addr)
  776. {
  777. int n = ether_crc(ETH_ALEN, addr) >> 26;
  778. mask[n >> 5] |= (1 << (n & 31));
  779. }
  780. static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
  781. {
  782. int n = ether_crc(ETH_ALEN, addr) >> 26;
  783. return mask[n >> 5] & (1 << (n & 31));
  784. }
  785. static int update_filter(struct tap_filter *filter, void __user *arg)
  786. {
  787. struct { u8 u[ETH_ALEN]; } *addr;
  788. struct tun_filter uf;
  789. int err, alen, n, nexact;
  790. if (copy_from_user(&uf, arg, sizeof(uf)))
  791. return -EFAULT;
  792. if (!uf.count) {
  793. /* Disabled */
  794. filter->count = 0;
  795. return 0;
  796. }
  797. alen = ETH_ALEN * uf.count;
  798. addr = memdup_user(arg + sizeof(uf), alen);
  799. if (IS_ERR(addr))
  800. return PTR_ERR(addr);
  801. /* The filter is updated without holding any locks. Which is
  802. * perfectly safe. We disable it first and in the worst
  803. * case we'll accept a few undesired packets. */
  804. filter->count = 0;
  805. wmb();
  806. /* Use first set of addresses as an exact filter */
  807. for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
  808. memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
  809. nexact = n;
  810. /* Remaining multicast addresses are hashed,
  811. * unicast will leave the filter disabled. */
  812. memset(filter->mask, 0, sizeof(filter->mask));
  813. for (; n < uf.count; n++) {
  814. if (!is_multicast_ether_addr(addr[n].u)) {
  815. err = 0; /* no filter */
  816. goto free_addr;
  817. }
  818. addr_hash_set(filter->mask, addr[n].u);
  819. }
  820. /* For ALLMULTI just set the mask to all ones.
  821. * This overrides the mask populated above. */
  822. if ((uf.flags & TUN_FLT_ALLMULTI))
  823. memset(filter->mask, ~0, sizeof(filter->mask));
  824. /* Now enable the filter */
  825. wmb();
  826. filter->count = nexact;
  827. /* Return the number of exact filters */
  828. err = nexact;
  829. free_addr:
  830. kfree(addr);
  831. return err;
  832. }
  833. /* Returns: 0 - drop, !=0 - accept */
  834. static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
  835. {
  836. /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
  837. * at this point. */
  838. struct ethhdr *eh = (struct ethhdr *) skb->data;
  839. int i;
  840. /* Exact match */
  841. for (i = 0; i < filter->count; i++)
  842. if (ether_addr_equal(eh->h_dest, filter->addr[i]))
  843. return 1;
  844. /* Inexact match (multicast only) */
  845. if (is_multicast_ether_addr(eh->h_dest))
  846. return addr_hash_test(filter->mask, eh->h_dest);
  847. return 0;
  848. }
  849. /*
  850. * Checks whether the packet is accepted or not.
  851. * Returns: 0 - drop, !=0 - accept
  852. */
  853. static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
  854. {
  855. if (!filter->count)
  856. return 1;
  857. return run_filter(filter, skb);
  858. }
  859. /* Network device part of the driver */
  860. static const struct ethtool_ops tun_ethtool_ops;
  861. /* Net device detach from fd. */
  862. static void tun_net_uninit(struct net_device *dev)
  863. {
  864. tun_detach_all(dev);
  865. }
  866. /* Net device open. */
  867. static int tun_net_open(struct net_device *dev)
  868. {
  869. netif_tx_start_all_queues(dev);
  870. return 0;
  871. }
  872. /* Net device close. */
  873. static int tun_net_close(struct net_device *dev)
  874. {
  875. netif_tx_stop_all_queues(dev);
  876. return 0;
  877. }
  878. /* Net device start xmit */
  879. static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
  880. {
  881. #ifdef CONFIG_RPS
  882. if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
  883. /* Select queue was not called for the skbuff, so we extract the
  884. * RPS hash and save it into the flow_table here.
  885. */
  886. __u32 rxhash;
  887. rxhash = __skb_get_hash_symmetric(skb);
  888. if (rxhash) {
  889. struct tun_flow_entry *e;
  890. e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
  891. rxhash);
  892. if (e)
  893. tun_flow_save_rps_rxhash(e, rxhash);
  894. }
  895. }
  896. #endif
  897. }
  898. static unsigned int run_ebpf_filter(struct tun_struct *tun,
  899. struct sk_buff *skb,
  900. int len)
  901. {
  902. struct tun_prog *prog = rcu_dereference(tun->filter_prog);
  903. if (prog)
  904. len = bpf_prog_run_clear_cb(prog->prog, skb);
  905. return len;
  906. }
  907. /* Net device start xmit */
  908. static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
  909. {
  910. struct tun_struct *tun = netdev_priv(dev);
  911. int txq = skb->queue_mapping;
  912. struct tun_file *tfile;
  913. int len = skb->len;
  914. rcu_read_lock();
  915. tfile = rcu_dereference(tun->tfiles[txq]);
  916. /* Drop packet if interface is not attached */
  917. if (!tfile)
  918. goto drop;
  919. if (!rcu_dereference(tun->steering_prog))
  920. tun_automq_xmit(tun, skb);
  921. tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
  922. BUG_ON(!tfile);
  923. /* Drop if the filter does not like it.
  924. * This is a noop if the filter is disabled.
  925. * Filter can be enabled only for the TAP devices. */
  926. if (!check_filter(&tun->txflt, skb))
  927. goto drop;
  928. if (tfile->socket.sk->sk_filter &&
  929. sk_filter(tfile->socket.sk, skb))
  930. goto drop;
  931. len = run_ebpf_filter(tun, skb, len);
  932. if (len == 0 || pskb_trim(skb, len))
  933. goto drop;
  934. if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
  935. goto drop;
  936. skb_tx_timestamp(skb);
  937. /* Orphan the skb - required as we might hang on to it
  938. * for indefinite time.
  939. */
  940. skb_orphan(skb);
  941. nf_reset(skb);
  942. if (ptr_ring_produce(&tfile->tx_ring, skb))
  943. goto drop;
  944. /* Notify and wake up reader process */
  945. if (tfile->flags & TUN_FASYNC)
  946. kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
  947. tfile->socket.sk->sk_data_ready(tfile->socket.sk);
  948. rcu_read_unlock();
  949. return NETDEV_TX_OK;
  950. drop:
  951. this_cpu_inc(tun->pcpu_stats->tx_dropped);
  952. skb_tx_error(skb);
  953. kfree_skb(skb);
  954. rcu_read_unlock();
  955. return NET_XMIT_DROP;
  956. }
  957. static void tun_net_mclist(struct net_device *dev)
  958. {
  959. /*
  960. * This callback is supposed to deal with mc filter in
  961. * _rx_ path and has nothing to do with the _tx_ path.
  962. * In rx path we always accept everything userspace gives us.
  963. */
  964. }
  965. static netdev_features_t tun_net_fix_features(struct net_device *dev,
  966. netdev_features_t features)
  967. {
  968. struct tun_struct *tun = netdev_priv(dev);
  969. return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
  970. }
  971. static void tun_set_headroom(struct net_device *dev, int new_hr)
  972. {
  973. struct tun_struct *tun = netdev_priv(dev);
  974. if (new_hr < NET_SKB_PAD)
  975. new_hr = NET_SKB_PAD;
  976. tun->align = new_hr;
  977. }
  978. static void
  979. tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  980. {
  981. u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
  982. struct tun_struct *tun = netdev_priv(dev);
  983. struct tun_pcpu_stats *p;
  984. int i;
  985. for_each_possible_cpu(i) {
  986. u64 rxpackets, rxbytes, txpackets, txbytes;
  987. unsigned int start;
  988. p = per_cpu_ptr(tun->pcpu_stats, i);
  989. do {
  990. start = u64_stats_fetch_begin(&p->syncp);
  991. rxpackets = p->rx_packets;
  992. rxbytes = p->rx_bytes;
  993. txpackets = p->tx_packets;
  994. txbytes = p->tx_bytes;
  995. } while (u64_stats_fetch_retry(&p->syncp, start));
  996. stats->rx_packets += rxpackets;
  997. stats->rx_bytes += rxbytes;
  998. stats->tx_packets += txpackets;
  999. stats->tx_bytes += txbytes;
  1000. /* u32 counters */
  1001. rx_dropped += p->rx_dropped;
  1002. rx_frame_errors += p->rx_frame_errors;
  1003. tx_dropped += p->tx_dropped;
  1004. }
  1005. stats->rx_dropped = rx_dropped;
  1006. stats->rx_frame_errors = rx_frame_errors;
  1007. stats->tx_dropped = tx_dropped;
  1008. }
  1009. static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
  1010. struct netlink_ext_ack *extack)
  1011. {
  1012. struct tun_struct *tun = netdev_priv(dev);
  1013. struct bpf_prog *old_prog;
  1014. old_prog = rtnl_dereference(tun->xdp_prog);
  1015. rcu_assign_pointer(tun->xdp_prog, prog);
  1016. if (old_prog)
  1017. bpf_prog_put(old_prog);
  1018. return 0;
  1019. }
  1020. static u32 tun_xdp_query(struct net_device *dev)
  1021. {
  1022. struct tun_struct *tun = netdev_priv(dev);
  1023. const struct bpf_prog *xdp_prog;
  1024. xdp_prog = rtnl_dereference(tun->xdp_prog);
  1025. if (xdp_prog)
  1026. return xdp_prog->aux->id;
  1027. return 0;
  1028. }
  1029. static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
  1030. {
  1031. switch (xdp->command) {
  1032. case XDP_SETUP_PROG:
  1033. return tun_xdp_set(dev, xdp->prog, xdp->extack);
  1034. case XDP_QUERY_PROG:
  1035. xdp->prog_id = tun_xdp_query(dev);
  1036. return 0;
  1037. default:
  1038. return -EINVAL;
  1039. }
  1040. }
  1041. static const struct net_device_ops tun_netdev_ops = {
  1042. .ndo_uninit = tun_net_uninit,
  1043. .ndo_open = tun_net_open,
  1044. .ndo_stop = tun_net_close,
  1045. .ndo_start_xmit = tun_net_xmit,
  1046. .ndo_fix_features = tun_net_fix_features,
  1047. .ndo_select_queue = tun_select_queue,
  1048. .ndo_set_rx_headroom = tun_set_headroom,
  1049. .ndo_get_stats64 = tun_net_get_stats64,
  1050. };
  1051. static void __tun_xdp_flush_tfile(struct tun_file *tfile)
  1052. {
  1053. /* Notify and wake up reader process */
  1054. if (tfile->flags & TUN_FASYNC)
  1055. kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
  1056. tfile->socket.sk->sk_data_ready(tfile->socket.sk);
  1057. }
  1058. static int tun_xdp_xmit(struct net_device *dev, int n,
  1059. struct xdp_frame **frames, u32 flags)
  1060. {
  1061. struct tun_struct *tun = netdev_priv(dev);
  1062. struct tun_file *tfile;
  1063. u32 numqueues;
  1064. int drops = 0;
  1065. int cnt = n;
  1066. int i;
  1067. if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
  1068. return -EINVAL;
  1069. rcu_read_lock();
  1070. resample:
  1071. numqueues = READ_ONCE(tun->numqueues);
  1072. if (!numqueues) {
  1073. rcu_read_unlock();
  1074. return -ENXIO; /* Caller will free/return all frames */
  1075. }
  1076. tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
  1077. numqueues]);
  1078. if (unlikely(!tfile))
  1079. goto resample;
  1080. spin_lock(&tfile->tx_ring.producer_lock);
  1081. for (i = 0; i < n; i++) {
  1082. struct xdp_frame *xdp = frames[i];
  1083. /* Encode the XDP flag into lowest bit for consumer to differ
  1084. * XDP buffer from sk_buff.
  1085. */
  1086. void *frame = tun_xdp_to_ptr(xdp);
  1087. if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
  1088. this_cpu_inc(tun->pcpu_stats->tx_dropped);
  1089. xdp_return_frame_rx_napi(xdp);
  1090. drops++;
  1091. }
  1092. }
  1093. spin_unlock(&tfile->tx_ring.producer_lock);
  1094. if (flags & XDP_XMIT_FLUSH)
  1095. __tun_xdp_flush_tfile(tfile);
  1096. rcu_read_unlock();
  1097. return cnt - drops;
  1098. }
  1099. static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
  1100. {
  1101. struct xdp_frame *frame = convert_to_xdp_frame(xdp);
  1102. if (unlikely(!frame))
  1103. return -EOVERFLOW;
  1104. return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
  1105. }
  1106. static const struct net_device_ops tap_netdev_ops = {
  1107. .ndo_uninit = tun_net_uninit,
  1108. .ndo_open = tun_net_open,
  1109. .ndo_stop = tun_net_close,
  1110. .ndo_start_xmit = tun_net_xmit,
  1111. .ndo_fix_features = tun_net_fix_features,
  1112. .ndo_set_rx_mode = tun_net_mclist,
  1113. .ndo_set_mac_address = eth_mac_addr,
  1114. .ndo_validate_addr = eth_validate_addr,
  1115. .ndo_select_queue = tun_select_queue,
  1116. .ndo_features_check = passthru_features_check,
  1117. .ndo_set_rx_headroom = tun_set_headroom,
  1118. .ndo_get_stats64 = tun_net_get_stats64,
  1119. .ndo_bpf = tun_xdp,
  1120. .ndo_xdp_xmit = tun_xdp_xmit,
  1121. };
  1122. static void tun_flow_init(struct tun_struct *tun)
  1123. {
  1124. int i;
  1125. for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
  1126. INIT_HLIST_HEAD(&tun->flows[i]);
  1127. tun->ageing_time = TUN_FLOW_EXPIRE;
  1128. timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
  1129. mod_timer(&tun->flow_gc_timer,
  1130. round_jiffies_up(jiffies + tun->ageing_time));
  1131. }
  1132. static void tun_flow_uninit(struct tun_struct *tun)
  1133. {
  1134. del_timer_sync(&tun->flow_gc_timer);
  1135. tun_flow_flush(tun);
  1136. }
  1137. #define MIN_MTU 68
  1138. #define MAX_MTU 65535
  1139. /* Initialize net device. */
  1140. static void tun_net_init(struct net_device *dev)
  1141. {
  1142. struct tun_struct *tun = netdev_priv(dev);
  1143. switch (tun->flags & TUN_TYPE_MASK) {
  1144. case IFF_TUN:
  1145. dev->netdev_ops = &tun_netdev_ops;
  1146. /* Point-to-Point TUN Device */
  1147. dev->hard_header_len = 0;
  1148. dev->addr_len = 0;
  1149. dev->mtu = 1500;
  1150. /* Zero header length */
  1151. dev->type = ARPHRD_NONE;
  1152. dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
  1153. break;
  1154. case IFF_TAP:
  1155. dev->netdev_ops = &tap_netdev_ops;
  1156. /* Ethernet TAP Device */
  1157. ether_setup(dev);
  1158. dev->priv_flags &= ~IFF_TX_SKB_SHARING;
  1159. dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
  1160. eth_hw_addr_random(dev);
  1161. break;
  1162. }
  1163. dev->min_mtu = MIN_MTU;
  1164. dev->max_mtu = MAX_MTU - dev->hard_header_len;
  1165. }
  1166. static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
  1167. {
  1168. struct sock *sk = tfile->socket.sk;
  1169. return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
  1170. }
  1171. /* Character device part */
  1172. /* Poll */
  1173. static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
  1174. {
  1175. struct tun_file *tfile = file->private_data;
  1176. struct tun_struct *tun = tun_get(tfile);
  1177. struct sock *sk;
  1178. __poll_t mask = 0;
  1179. if (!tun)
  1180. return EPOLLERR;
  1181. sk = tfile->socket.sk;
  1182. tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
  1183. poll_wait(file, sk_sleep(sk), wait);
  1184. if (!ptr_ring_empty(&tfile->tx_ring))
  1185. mask |= EPOLLIN | EPOLLRDNORM;
  1186. /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
  1187. * guarantee EPOLLOUT to be raised by either here or
  1188. * tun_sock_write_space(). Then process could get notification
  1189. * after it writes to a down device and meets -EIO.
  1190. */
  1191. if (tun_sock_writeable(tun, tfile) ||
  1192. (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
  1193. tun_sock_writeable(tun, tfile)))
  1194. mask |= EPOLLOUT | EPOLLWRNORM;
  1195. if (tun->dev->reg_state != NETREG_REGISTERED)
  1196. mask = EPOLLERR;
  1197. tun_put(tun);
  1198. return mask;
  1199. }
  1200. static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
  1201. size_t len,
  1202. const struct iov_iter *it)
  1203. {
  1204. struct sk_buff *skb;
  1205. size_t linear;
  1206. int err;
  1207. int i;
  1208. if (it->nr_segs > MAX_SKB_FRAGS + 1)
  1209. return ERR_PTR(-ENOMEM);
  1210. local_bh_disable();
  1211. skb = napi_get_frags(&tfile->napi);
  1212. local_bh_enable();
  1213. if (!skb)
  1214. return ERR_PTR(-ENOMEM);
  1215. linear = iov_iter_single_seg_count(it);
  1216. err = __skb_grow(skb, linear);
  1217. if (err)
  1218. goto free;
  1219. skb->len = len;
  1220. skb->data_len = len - linear;
  1221. skb->truesize += skb->data_len;
  1222. for (i = 1; i < it->nr_segs; i++) {
  1223. struct page_frag *pfrag = &current->task_frag;
  1224. size_t fragsz = it->iov[i].iov_len;
  1225. if (fragsz == 0 || fragsz > PAGE_SIZE) {
  1226. err = -EINVAL;
  1227. goto free;
  1228. }
  1229. if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
  1230. err = -ENOMEM;
  1231. goto free;
  1232. }
  1233. skb_fill_page_desc(skb, i - 1, pfrag->page,
  1234. pfrag->offset, fragsz);
  1235. page_ref_inc(pfrag->page);
  1236. pfrag->offset += fragsz;
  1237. }
  1238. return skb;
  1239. free:
  1240. /* frees skb and all frags allocated with napi_alloc_frag() */
  1241. napi_free_frags(&tfile->napi);
  1242. return ERR_PTR(err);
  1243. }
  1244. /* prepad is the amount to reserve at front. len is length after that.
  1245. * linear is a hint as to how much to copy (usually headers). */
  1246. static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
  1247. size_t prepad, size_t len,
  1248. size_t linear, int noblock)
  1249. {
  1250. struct sock *sk = tfile->socket.sk;
  1251. struct sk_buff *skb;
  1252. int err;
  1253. /* Under a page? Don't bother with paged skb. */
  1254. if (prepad + len < PAGE_SIZE || !linear)
  1255. linear = len;
  1256. skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
  1257. &err, 0);
  1258. if (!skb)
  1259. return ERR_PTR(err);
  1260. skb_reserve(skb, prepad);
  1261. skb_put(skb, linear);
  1262. skb->data_len = len - linear;
  1263. skb->len += len - linear;
  1264. return skb;
  1265. }
  1266. static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
  1267. struct sk_buff *skb, int more)
  1268. {
  1269. struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
  1270. struct sk_buff_head process_queue;
  1271. u32 rx_batched = tun->rx_batched;
  1272. bool rcv = false;
  1273. if (!rx_batched || (!more && skb_queue_empty(queue))) {
  1274. local_bh_disable();
  1275. skb_record_rx_queue(skb, tfile->queue_index);
  1276. netif_receive_skb(skb);
  1277. local_bh_enable();
  1278. return;
  1279. }
  1280. spin_lock(&queue->lock);
  1281. if (!more || skb_queue_len(queue) == rx_batched) {
  1282. __skb_queue_head_init(&process_queue);
  1283. skb_queue_splice_tail_init(queue, &process_queue);
  1284. rcv = true;
  1285. } else {
  1286. __skb_queue_tail(queue, skb);
  1287. }
  1288. spin_unlock(&queue->lock);
  1289. if (rcv) {
  1290. struct sk_buff *nskb;
  1291. local_bh_disable();
  1292. while ((nskb = __skb_dequeue(&process_queue))) {
  1293. skb_record_rx_queue(nskb, tfile->queue_index);
  1294. netif_receive_skb(nskb);
  1295. }
  1296. skb_record_rx_queue(skb, tfile->queue_index);
  1297. netif_receive_skb(skb);
  1298. local_bh_enable();
  1299. }
  1300. }
  1301. static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
  1302. int len, int noblock, bool zerocopy)
  1303. {
  1304. if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
  1305. return false;
  1306. if (tfile->socket.sk->sk_sndbuf != INT_MAX)
  1307. return false;
  1308. if (!noblock)
  1309. return false;
  1310. if (zerocopy)
  1311. return false;
  1312. if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
  1313. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
  1314. return false;
  1315. return true;
  1316. }
  1317. static struct sk_buff *tun_build_skb(struct tun_struct *tun,
  1318. struct tun_file *tfile,
  1319. struct iov_iter *from,
  1320. struct virtio_net_hdr *hdr,
  1321. int len, int *skb_xdp)
  1322. {
  1323. struct page_frag *alloc_frag = &current->task_frag;
  1324. struct sk_buff *skb;
  1325. struct bpf_prog *xdp_prog;
  1326. int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  1327. unsigned int delta = 0;
  1328. char *buf;
  1329. size_t copied;
  1330. int err, pad = TUN_RX_PAD;
  1331. rcu_read_lock();
  1332. xdp_prog = rcu_dereference(tun->xdp_prog);
  1333. if (xdp_prog)
  1334. pad += TUN_HEADROOM;
  1335. buflen += SKB_DATA_ALIGN(len + pad);
  1336. rcu_read_unlock();
  1337. alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
  1338. if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
  1339. return ERR_PTR(-ENOMEM);
  1340. buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
  1341. copied = copy_page_from_iter(alloc_frag->page,
  1342. alloc_frag->offset + pad,
  1343. len, from);
  1344. if (copied != len)
  1345. return ERR_PTR(-EFAULT);
  1346. /* There's a small window that XDP may be set after the check
  1347. * of xdp_prog above, this should be rare and for simplicity
  1348. * we do XDP on skb in case the headroom is not enough.
  1349. */
  1350. if (hdr->gso_type || !xdp_prog)
  1351. *skb_xdp = 1;
  1352. else
  1353. *skb_xdp = 0;
  1354. local_bh_disable();
  1355. rcu_read_lock();
  1356. xdp_prog = rcu_dereference(tun->xdp_prog);
  1357. if (xdp_prog && !*skb_xdp) {
  1358. struct xdp_buff xdp;
  1359. void *orig_data;
  1360. u32 act;
  1361. xdp.data_hard_start = buf;
  1362. xdp.data = buf + pad;
  1363. xdp_set_data_meta_invalid(&xdp);
  1364. xdp.data_end = xdp.data + len;
  1365. xdp.rxq = &tfile->xdp_rxq;
  1366. orig_data = xdp.data;
  1367. act = bpf_prog_run_xdp(xdp_prog, &xdp);
  1368. switch (act) {
  1369. case XDP_REDIRECT:
  1370. get_page(alloc_frag->page);
  1371. alloc_frag->offset += buflen;
  1372. err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
  1373. xdp_do_flush_map();
  1374. if (err)
  1375. goto err_redirect;
  1376. rcu_read_unlock();
  1377. local_bh_enable();
  1378. return NULL;
  1379. case XDP_TX:
  1380. get_page(alloc_frag->page);
  1381. alloc_frag->offset += buflen;
  1382. if (tun_xdp_tx(tun->dev, &xdp) < 0)
  1383. goto err_redirect;
  1384. rcu_read_unlock();
  1385. local_bh_enable();
  1386. return NULL;
  1387. case XDP_PASS:
  1388. delta = orig_data - xdp.data;
  1389. len = xdp.data_end - xdp.data;
  1390. break;
  1391. default:
  1392. bpf_warn_invalid_xdp_action(act);
  1393. /* fall through */
  1394. case XDP_ABORTED:
  1395. trace_xdp_exception(tun->dev, xdp_prog, act);
  1396. /* fall through */
  1397. case XDP_DROP:
  1398. goto err_xdp;
  1399. }
  1400. }
  1401. skb = build_skb(buf, buflen);
  1402. if (!skb) {
  1403. rcu_read_unlock();
  1404. local_bh_enable();
  1405. return ERR_PTR(-ENOMEM);
  1406. }
  1407. skb_reserve(skb, pad - delta);
  1408. skb_put(skb, len);
  1409. skb_set_owner_w(skb, tfile->socket.sk);
  1410. get_page(alloc_frag->page);
  1411. alloc_frag->offset += buflen;
  1412. rcu_read_unlock();
  1413. local_bh_enable();
  1414. return skb;
  1415. err_redirect:
  1416. put_page(alloc_frag->page);
  1417. err_xdp:
  1418. rcu_read_unlock();
  1419. local_bh_enable();
  1420. this_cpu_inc(tun->pcpu_stats->rx_dropped);
  1421. return NULL;
  1422. }
  1423. /* Get packet from user space buffer */
  1424. static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
  1425. void *msg_control, struct iov_iter *from,
  1426. int noblock, bool more)
  1427. {
  1428. struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
  1429. struct sk_buff *skb;
  1430. size_t total_len = iov_iter_count(from);
  1431. size_t len = total_len, align = tun->align, linear;
  1432. struct virtio_net_hdr gso = { 0 };
  1433. struct tun_pcpu_stats *stats;
  1434. int good_linear;
  1435. int copylen;
  1436. bool zerocopy = false;
  1437. int err;
  1438. u32 rxhash = 0;
  1439. int skb_xdp = 1;
  1440. bool frags = tun_napi_frags_enabled(tfile);
  1441. if (!(tun->flags & IFF_NO_PI)) {
  1442. if (len < sizeof(pi))
  1443. return -EINVAL;
  1444. len -= sizeof(pi);
  1445. if (!copy_from_iter_full(&pi, sizeof(pi), from))
  1446. return -EFAULT;
  1447. }
  1448. if (tun->flags & IFF_VNET_HDR) {
  1449. int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
  1450. if (len < vnet_hdr_sz)
  1451. return -EINVAL;
  1452. len -= vnet_hdr_sz;
  1453. if (!copy_from_iter_full(&gso, sizeof(gso), from))
  1454. return -EFAULT;
  1455. if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
  1456. tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
  1457. gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
  1458. if (tun16_to_cpu(tun, gso.hdr_len) > len)
  1459. return -EINVAL;
  1460. iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
  1461. }
  1462. if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
  1463. align += NET_IP_ALIGN;
  1464. if (unlikely(len < ETH_HLEN ||
  1465. (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
  1466. return -EINVAL;
  1467. }
  1468. good_linear = SKB_MAX_HEAD(align);
  1469. if (msg_control) {
  1470. struct iov_iter i = *from;
  1471. /* There are 256 bytes to be copied in skb, so there is
  1472. * enough room for skb expand head in case it is used.
  1473. * The rest of the buffer is mapped from userspace.
  1474. */
  1475. copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
  1476. if (copylen > good_linear)
  1477. copylen = good_linear;
  1478. linear = copylen;
  1479. iov_iter_advance(&i, copylen);
  1480. if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
  1481. zerocopy = true;
  1482. }
  1483. if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
  1484. /* For the packet that is not easy to be processed
  1485. * (e.g gso or jumbo packet), we will do it at after
  1486. * skb was created with generic XDP routine.
  1487. */
  1488. skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
  1489. if (IS_ERR(skb)) {
  1490. this_cpu_inc(tun->pcpu_stats->rx_dropped);
  1491. return PTR_ERR(skb);
  1492. }
  1493. if (!skb)
  1494. return total_len;
  1495. } else {
  1496. if (!zerocopy) {
  1497. copylen = len;
  1498. if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
  1499. linear = good_linear;
  1500. else
  1501. linear = tun16_to_cpu(tun, gso.hdr_len);
  1502. }
  1503. if (frags) {
  1504. mutex_lock(&tfile->napi_mutex);
  1505. skb = tun_napi_alloc_frags(tfile, copylen, from);
  1506. /* tun_napi_alloc_frags() enforces a layout for the skb.
  1507. * If zerocopy is enabled, then this layout will be
  1508. * overwritten by zerocopy_sg_from_iter().
  1509. */
  1510. zerocopy = false;
  1511. } else {
  1512. skb = tun_alloc_skb(tfile, align, copylen, linear,
  1513. noblock);
  1514. }
  1515. if (IS_ERR(skb)) {
  1516. if (PTR_ERR(skb) != -EAGAIN)
  1517. this_cpu_inc(tun->pcpu_stats->rx_dropped);
  1518. if (frags)
  1519. mutex_unlock(&tfile->napi_mutex);
  1520. return PTR_ERR(skb);
  1521. }
  1522. if (zerocopy)
  1523. err = zerocopy_sg_from_iter(skb, from);
  1524. else
  1525. err = skb_copy_datagram_from_iter(skb, 0, from, len);
  1526. if (err) {
  1527. err = -EFAULT;
  1528. drop:
  1529. this_cpu_inc(tun->pcpu_stats->rx_dropped);
  1530. kfree_skb(skb);
  1531. if (frags) {
  1532. tfile->napi.skb = NULL;
  1533. mutex_unlock(&tfile->napi_mutex);
  1534. }
  1535. return err;
  1536. }
  1537. }
  1538. if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
  1539. this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
  1540. kfree_skb(skb);
  1541. if (frags) {
  1542. tfile->napi.skb = NULL;
  1543. mutex_unlock(&tfile->napi_mutex);
  1544. }
  1545. return -EINVAL;
  1546. }
  1547. switch (tun->flags & TUN_TYPE_MASK) {
  1548. case IFF_TUN:
  1549. if (tun->flags & IFF_NO_PI) {
  1550. u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
  1551. switch (ip_version) {
  1552. case 4:
  1553. pi.proto = htons(ETH_P_IP);
  1554. break;
  1555. case 6:
  1556. pi.proto = htons(ETH_P_IPV6);
  1557. break;
  1558. default:
  1559. this_cpu_inc(tun->pcpu_stats->rx_dropped);
  1560. kfree_skb(skb);
  1561. return -EINVAL;
  1562. }
  1563. }
  1564. skb_reset_mac_header(skb);
  1565. skb->protocol = pi.proto;
  1566. skb->dev = tun->dev;
  1567. break;
  1568. case IFF_TAP:
  1569. if (!frags)
  1570. skb->protocol = eth_type_trans(skb, tun->dev);
  1571. break;
  1572. }
  1573. /* copy skb_ubuf_info for callback when skb has no error */
  1574. if (zerocopy) {
  1575. skb_shinfo(skb)->destructor_arg = msg_control;
  1576. skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
  1577. skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
  1578. } else if (msg_control) {
  1579. struct ubuf_info *uarg = msg_control;
  1580. uarg->callback(uarg, false);
  1581. }
  1582. skb_reset_network_header(skb);
  1583. skb_probe_transport_header(skb, 0);
  1584. if (skb_xdp) {
  1585. struct bpf_prog *xdp_prog;
  1586. int ret;
  1587. local_bh_disable();
  1588. rcu_read_lock();
  1589. xdp_prog = rcu_dereference(tun->xdp_prog);
  1590. if (xdp_prog) {
  1591. ret = do_xdp_generic(xdp_prog, skb);
  1592. if (ret != XDP_PASS) {
  1593. rcu_read_unlock();
  1594. local_bh_enable();
  1595. if (frags) {
  1596. tfile->napi.skb = NULL;
  1597. mutex_unlock(&tfile->napi_mutex);
  1598. }
  1599. return total_len;
  1600. }
  1601. }
  1602. rcu_read_unlock();
  1603. local_bh_enable();
  1604. }
  1605. /* Compute the costly rx hash only if needed for flow updates.
  1606. * We may get a very small possibility of OOO during switching, not
  1607. * worth to optimize.
  1608. */
  1609. if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
  1610. !tfile->detached)
  1611. rxhash = __skb_get_hash_symmetric(skb);
  1612. rcu_read_lock();
  1613. if (unlikely(!(tun->dev->flags & IFF_UP))) {
  1614. err = -EIO;
  1615. rcu_read_unlock();
  1616. goto drop;
  1617. }
  1618. if (frags) {
  1619. /* Exercise flow dissector code path. */
  1620. u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
  1621. if (unlikely(headlen > skb_headlen(skb))) {
  1622. this_cpu_inc(tun->pcpu_stats->rx_dropped);
  1623. napi_free_frags(&tfile->napi);
  1624. rcu_read_unlock();
  1625. mutex_unlock(&tfile->napi_mutex);
  1626. WARN_ON(1);
  1627. return -ENOMEM;
  1628. }
  1629. local_bh_disable();
  1630. napi_gro_frags(&tfile->napi);
  1631. local_bh_enable();
  1632. mutex_unlock(&tfile->napi_mutex);
  1633. } else if (tfile->napi_enabled) {
  1634. struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
  1635. int queue_len;
  1636. spin_lock_bh(&queue->lock);
  1637. __skb_queue_tail(queue, skb);
  1638. queue_len = skb_queue_len(queue);
  1639. spin_unlock(&queue->lock);
  1640. if (!more || queue_len > NAPI_POLL_WEIGHT)
  1641. napi_schedule(&tfile->napi);
  1642. local_bh_enable();
  1643. } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
  1644. tun_rx_batched(tun, tfile, skb, more);
  1645. } else {
  1646. netif_rx_ni(skb);
  1647. }
  1648. rcu_read_unlock();
  1649. stats = get_cpu_ptr(tun->pcpu_stats);
  1650. u64_stats_update_begin(&stats->syncp);
  1651. stats->rx_packets++;
  1652. stats->rx_bytes += len;
  1653. u64_stats_update_end(&stats->syncp);
  1654. put_cpu_ptr(stats);
  1655. if (rxhash)
  1656. tun_flow_update(tun, rxhash, tfile);
  1657. return total_len;
  1658. }
  1659. static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
  1660. {
  1661. struct file *file = iocb->ki_filp;
  1662. struct tun_file *tfile = file->private_data;
  1663. struct tun_struct *tun = tun_get(tfile);
  1664. ssize_t result;
  1665. if (!tun)
  1666. return -EBADFD;
  1667. result = tun_get_user(tun, tfile, NULL, from,
  1668. file->f_flags & O_NONBLOCK, false);
  1669. tun_put(tun);
  1670. return result;
  1671. }
  1672. static ssize_t tun_put_user_xdp(struct tun_struct *tun,
  1673. struct tun_file *tfile,
  1674. struct xdp_frame *xdp_frame,
  1675. struct iov_iter *iter)
  1676. {
  1677. int vnet_hdr_sz = 0;
  1678. size_t size = xdp_frame->len;
  1679. struct tun_pcpu_stats *stats;
  1680. size_t ret;
  1681. if (tun->flags & IFF_VNET_HDR) {
  1682. struct virtio_net_hdr gso = { 0 };
  1683. vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
  1684. if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
  1685. return -EINVAL;
  1686. if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
  1687. sizeof(gso)))
  1688. return -EFAULT;
  1689. iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
  1690. }
  1691. ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
  1692. stats = get_cpu_ptr(tun->pcpu_stats);
  1693. u64_stats_update_begin(&stats->syncp);
  1694. stats->tx_packets++;
  1695. stats->tx_bytes += ret;
  1696. u64_stats_update_end(&stats->syncp);
  1697. put_cpu_ptr(tun->pcpu_stats);
  1698. return ret;
  1699. }
  1700. /* Put packet to the user space buffer */
  1701. static ssize_t tun_put_user(struct tun_struct *tun,
  1702. struct tun_file *tfile,
  1703. struct sk_buff *skb,
  1704. struct iov_iter *iter)
  1705. {
  1706. struct tun_pi pi = { 0, skb->protocol };
  1707. struct tun_pcpu_stats *stats;
  1708. ssize_t total;
  1709. int vlan_offset = 0;
  1710. int vlan_hlen = 0;
  1711. int vnet_hdr_sz = 0;
  1712. if (skb_vlan_tag_present(skb))
  1713. vlan_hlen = VLAN_HLEN;
  1714. if (tun->flags & IFF_VNET_HDR)
  1715. vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
  1716. total = skb->len + vlan_hlen + vnet_hdr_sz;
  1717. if (!(tun->flags & IFF_NO_PI)) {
  1718. if (iov_iter_count(iter) < sizeof(pi))
  1719. return -EINVAL;
  1720. total += sizeof(pi);
  1721. if (iov_iter_count(iter) < total) {
  1722. /* Packet will be striped */
  1723. pi.flags |= TUN_PKT_STRIP;
  1724. }
  1725. if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
  1726. return -EFAULT;
  1727. }
  1728. if (vnet_hdr_sz) {
  1729. struct virtio_net_hdr gso;
  1730. if (iov_iter_count(iter) < vnet_hdr_sz)
  1731. return -EINVAL;
  1732. if (virtio_net_hdr_from_skb(skb, &gso,
  1733. tun_is_little_endian(tun), true,
  1734. vlan_hlen)) {
  1735. struct skb_shared_info *sinfo = skb_shinfo(skb);
  1736. pr_err("unexpected GSO type: "
  1737. "0x%x, gso_size %d, hdr_len %d\n",
  1738. sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
  1739. tun16_to_cpu(tun, gso.hdr_len));
  1740. print_hex_dump(KERN_ERR, "tun: ",
  1741. DUMP_PREFIX_NONE,
  1742. 16, 1, skb->head,
  1743. min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
  1744. WARN_ON_ONCE(1);
  1745. return -EINVAL;
  1746. }
  1747. if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
  1748. return -EFAULT;
  1749. iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
  1750. }
  1751. if (vlan_hlen) {
  1752. int ret;
  1753. struct veth veth;
  1754. veth.h_vlan_proto = skb->vlan_proto;
  1755. veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
  1756. vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
  1757. ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
  1758. if (ret || !iov_iter_count(iter))
  1759. goto done;
  1760. ret = copy_to_iter(&veth, sizeof(veth), iter);
  1761. if (ret != sizeof(veth) || !iov_iter_count(iter))
  1762. goto done;
  1763. }
  1764. skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
  1765. done:
  1766. /* caller is in process context, */
  1767. stats = get_cpu_ptr(tun->pcpu_stats);
  1768. u64_stats_update_begin(&stats->syncp);
  1769. stats->tx_packets++;
  1770. stats->tx_bytes += skb->len + vlan_hlen;
  1771. u64_stats_update_end(&stats->syncp);
  1772. put_cpu_ptr(tun->pcpu_stats);
  1773. return total;
  1774. }
  1775. static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
  1776. {
  1777. DECLARE_WAITQUEUE(wait, current);
  1778. void *ptr = NULL;
  1779. int error = 0;
  1780. ptr = ptr_ring_consume(&tfile->tx_ring);
  1781. if (ptr)
  1782. goto out;
  1783. if (noblock) {
  1784. error = -EAGAIN;
  1785. goto out;
  1786. }
  1787. add_wait_queue(&tfile->wq.wait, &wait);
  1788. while (1) {
  1789. set_current_state(TASK_INTERRUPTIBLE);
  1790. ptr = ptr_ring_consume(&tfile->tx_ring);
  1791. if (ptr)
  1792. break;
  1793. if (signal_pending(current)) {
  1794. error = -ERESTARTSYS;
  1795. break;
  1796. }
  1797. if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
  1798. error = -EFAULT;
  1799. break;
  1800. }
  1801. schedule();
  1802. }
  1803. __set_current_state(TASK_RUNNING);
  1804. remove_wait_queue(&tfile->wq.wait, &wait);
  1805. out:
  1806. *err = error;
  1807. return ptr;
  1808. }
  1809. static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
  1810. struct iov_iter *to,
  1811. int noblock, void *ptr)
  1812. {
  1813. ssize_t ret;
  1814. int err;
  1815. tun_debug(KERN_INFO, tun, "tun_do_read\n");
  1816. if (!iov_iter_count(to)) {
  1817. tun_ptr_free(ptr);
  1818. return 0;
  1819. }
  1820. if (!ptr) {
  1821. /* Read frames from ring */
  1822. ptr = tun_ring_recv(tfile, noblock, &err);
  1823. if (!ptr)
  1824. return err;
  1825. }
  1826. if (tun_is_xdp_frame(ptr)) {
  1827. struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
  1828. ret = tun_put_user_xdp(tun, tfile, xdpf, to);
  1829. xdp_return_frame(xdpf);
  1830. } else {
  1831. struct sk_buff *skb = ptr;
  1832. ret = tun_put_user(tun, tfile, skb, to);
  1833. if (unlikely(ret < 0))
  1834. kfree_skb(skb);
  1835. else
  1836. consume_skb(skb);
  1837. }
  1838. return ret;
  1839. }
  1840. static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
  1841. {
  1842. struct file *file = iocb->ki_filp;
  1843. struct tun_file *tfile = file->private_data;
  1844. struct tun_struct *tun = tun_get(tfile);
  1845. ssize_t len = iov_iter_count(to), ret;
  1846. if (!tun)
  1847. return -EBADFD;
  1848. ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
  1849. ret = min_t(ssize_t, ret, len);
  1850. if (ret > 0)
  1851. iocb->ki_pos = ret;
  1852. tun_put(tun);
  1853. return ret;
  1854. }
  1855. static void tun_prog_free(struct rcu_head *rcu)
  1856. {
  1857. struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
  1858. bpf_prog_destroy(prog->prog);
  1859. kfree(prog);
  1860. }
  1861. static int __tun_set_ebpf(struct tun_struct *tun,
  1862. struct tun_prog __rcu **prog_p,
  1863. struct bpf_prog *prog)
  1864. {
  1865. struct tun_prog *old, *new = NULL;
  1866. if (prog) {
  1867. new = kmalloc(sizeof(*new), GFP_KERNEL);
  1868. if (!new)
  1869. return -ENOMEM;
  1870. new->prog = prog;
  1871. }
  1872. spin_lock_bh(&tun->lock);
  1873. old = rcu_dereference_protected(*prog_p,
  1874. lockdep_is_held(&tun->lock));
  1875. rcu_assign_pointer(*prog_p, new);
  1876. spin_unlock_bh(&tun->lock);
  1877. if (old)
  1878. call_rcu(&old->rcu, tun_prog_free);
  1879. return 0;
  1880. }
  1881. static void tun_free_netdev(struct net_device *dev)
  1882. {
  1883. struct tun_struct *tun = netdev_priv(dev);
  1884. BUG_ON(!(list_empty(&tun->disabled)));
  1885. free_percpu(tun->pcpu_stats);
  1886. tun_flow_uninit(tun);
  1887. security_tun_dev_free_security(tun->security);
  1888. __tun_set_ebpf(tun, &tun->steering_prog, NULL);
  1889. __tun_set_ebpf(tun, &tun->filter_prog, NULL);
  1890. }
  1891. static void tun_setup(struct net_device *dev)
  1892. {
  1893. struct tun_struct *tun = netdev_priv(dev);
  1894. tun->owner = INVALID_UID;
  1895. tun->group = INVALID_GID;
  1896. tun_default_link_ksettings(dev, &tun->link_ksettings);
  1897. dev->ethtool_ops = &tun_ethtool_ops;
  1898. dev->needs_free_netdev = true;
  1899. dev->priv_destructor = tun_free_netdev;
  1900. /* We prefer our own queue length */
  1901. dev->tx_queue_len = TUN_READQ_SIZE;
  1902. }
  1903. /* Trivial set of netlink ops to allow deleting tun or tap
  1904. * device with netlink.
  1905. */
  1906. static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
  1907. struct netlink_ext_ack *extack)
  1908. {
  1909. NL_SET_ERR_MSG(extack,
  1910. "tun/tap creation via rtnetlink is not supported.");
  1911. return -EOPNOTSUPP;
  1912. }
  1913. static size_t tun_get_size(const struct net_device *dev)
  1914. {
  1915. BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
  1916. BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
  1917. return nla_total_size(sizeof(uid_t)) + /* OWNER */
  1918. nla_total_size(sizeof(gid_t)) + /* GROUP */
  1919. nla_total_size(sizeof(u8)) + /* TYPE */
  1920. nla_total_size(sizeof(u8)) + /* PI */
  1921. nla_total_size(sizeof(u8)) + /* VNET_HDR */
  1922. nla_total_size(sizeof(u8)) + /* PERSIST */
  1923. nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
  1924. nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
  1925. nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
  1926. 0;
  1927. }
  1928. static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
  1929. {
  1930. struct tun_struct *tun = netdev_priv(dev);
  1931. if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
  1932. goto nla_put_failure;
  1933. if (uid_valid(tun->owner) &&
  1934. nla_put_u32(skb, IFLA_TUN_OWNER,
  1935. from_kuid_munged(current_user_ns(), tun->owner)))
  1936. goto nla_put_failure;
  1937. if (gid_valid(tun->group) &&
  1938. nla_put_u32(skb, IFLA_TUN_GROUP,
  1939. from_kgid_munged(current_user_ns(), tun->group)))
  1940. goto nla_put_failure;
  1941. if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
  1942. goto nla_put_failure;
  1943. if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
  1944. goto nla_put_failure;
  1945. if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
  1946. goto nla_put_failure;
  1947. if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
  1948. !!(tun->flags & IFF_MULTI_QUEUE)))
  1949. goto nla_put_failure;
  1950. if (tun->flags & IFF_MULTI_QUEUE) {
  1951. if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
  1952. goto nla_put_failure;
  1953. if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
  1954. tun->numdisabled))
  1955. goto nla_put_failure;
  1956. }
  1957. return 0;
  1958. nla_put_failure:
  1959. return -EMSGSIZE;
  1960. }
  1961. static struct rtnl_link_ops tun_link_ops __read_mostly = {
  1962. .kind = DRV_NAME,
  1963. .priv_size = sizeof(struct tun_struct),
  1964. .setup = tun_setup,
  1965. .validate = tun_validate,
  1966. .get_size = tun_get_size,
  1967. .fill_info = tun_fill_info,
  1968. };
  1969. static void tun_sock_write_space(struct sock *sk)
  1970. {
  1971. struct tun_file *tfile;
  1972. wait_queue_head_t *wqueue;
  1973. if (!sock_writeable(sk))
  1974. return;
  1975. if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
  1976. return;
  1977. wqueue = sk_sleep(sk);
  1978. if (wqueue && waitqueue_active(wqueue))
  1979. wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
  1980. EPOLLWRNORM | EPOLLWRBAND);
  1981. tfile = container_of(sk, struct tun_file, sk);
  1982. kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
  1983. }
  1984. static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
  1985. {
  1986. int ret;
  1987. struct tun_file *tfile = container_of(sock, struct tun_file, socket);
  1988. struct tun_struct *tun = tun_get(tfile);
  1989. if (!tun)
  1990. return -EBADFD;
  1991. ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
  1992. m->msg_flags & MSG_DONTWAIT,
  1993. m->msg_flags & MSG_MORE);
  1994. tun_put(tun);
  1995. return ret;
  1996. }
  1997. static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
  1998. int flags)
  1999. {
  2000. struct tun_file *tfile = container_of(sock, struct tun_file, socket);
  2001. struct tun_struct *tun = tun_get(tfile);
  2002. void *ptr = m->msg_control;
  2003. int ret;
  2004. if (!tun) {
  2005. ret = -EBADFD;
  2006. goto out_free;
  2007. }
  2008. if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
  2009. ret = -EINVAL;
  2010. goto out_put_tun;
  2011. }
  2012. if (flags & MSG_ERRQUEUE) {
  2013. ret = sock_recv_errqueue(sock->sk, m, total_len,
  2014. SOL_PACKET, TUN_TX_TIMESTAMP);
  2015. goto out;
  2016. }
  2017. ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
  2018. if (ret > (ssize_t)total_len) {
  2019. m->msg_flags |= MSG_TRUNC;
  2020. ret = flags & MSG_TRUNC ? ret : total_len;
  2021. }
  2022. out:
  2023. tun_put(tun);
  2024. return ret;
  2025. out_put_tun:
  2026. tun_put(tun);
  2027. out_free:
  2028. tun_ptr_free(ptr);
  2029. return ret;
  2030. }
  2031. static int tun_ptr_peek_len(void *ptr)
  2032. {
  2033. if (likely(ptr)) {
  2034. if (tun_is_xdp_frame(ptr)) {
  2035. struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
  2036. return xdpf->len;
  2037. }
  2038. return __skb_array_len_with_tag(ptr);
  2039. } else {
  2040. return 0;
  2041. }
  2042. }
  2043. static int tun_peek_len(struct socket *sock)
  2044. {
  2045. struct tun_file *tfile = container_of(sock, struct tun_file, socket);
  2046. struct tun_struct *tun;
  2047. int ret = 0;
  2048. tun = tun_get(tfile);
  2049. if (!tun)
  2050. return 0;
  2051. ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
  2052. tun_put(tun);
  2053. return ret;
  2054. }
  2055. /* Ops structure to mimic raw sockets with tun */
  2056. static const struct proto_ops tun_socket_ops = {
  2057. .peek_len = tun_peek_len,
  2058. .sendmsg = tun_sendmsg,
  2059. .recvmsg = tun_recvmsg,
  2060. };
  2061. static struct proto tun_proto = {
  2062. .name = "tun",
  2063. .owner = THIS_MODULE,
  2064. .obj_size = sizeof(struct tun_file),
  2065. };
  2066. static int tun_flags(struct tun_struct *tun)
  2067. {
  2068. return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
  2069. }
  2070. static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
  2071. char *buf)
  2072. {
  2073. struct tun_struct *tun = netdev_priv(to_net_dev(dev));
  2074. return sprintf(buf, "0x%x\n", tun_flags(tun));
  2075. }
  2076. static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
  2077. char *buf)
  2078. {
  2079. struct tun_struct *tun = netdev_priv(to_net_dev(dev));
  2080. return uid_valid(tun->owner)?
  2081. sprintf(buf, "%u\n",
  2082. from_kuid_munged(current_user_ns(), tun->owner)):
  2083. sprintf(buf, "-1\n");
  2084. }
  2085. static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
  2086. char *buf)
  2087. {
  2088. struct tun_struct *tun = netdev_priv(to_net_dev(dev));
  2089. return gid_valid(tun->group) ?
  2090. sprintf(buf, "%u\n",
  2091. from_kgid_munged(current_user_ns(), tun->group)):
  2092. sprintf(buf, "-1\n");
  2093. }
  2094. static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
  2095. static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
  2096. static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
  2097. static struct attribute *tun_dev_attrs[] = {
  2098. &dev_attr_tun_flags.attr,
  2099. &dev_attr_owner.attr,
  2100. &dev_attr_group.attr,
  2101. NULL
  2102. };
  2103. static const struct attribute_group tun_attr_group = {
  2104. .attrs = tun_dev_attrs
  2105. };
  2106. static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
  2107. {
  2108. struct tun_struct *tun;
  2109. struct tun_file *tfile = file->private_data;
  2110. struct net_device *dev;
  2111. int err;
  2112. if (tfile->detached)
  2113. return -EINVAL;
  2114. if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
  2115. if (!capable(CAP_NET_ADMIN))
  2116. return -EPERM;
  2117. if (!(ifr->ifr_flags & IFF_NAPI) ||
  2118. (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
  2119. return -EINVAL;
  2120. }
  2121. dev = __dev_get_by_name(net, ifr->ifr_name);
  2122. if (dev) {
  2123. if (ifr->ifr_flags & IFF_TUN_EXCL)
  2124. return -EBUSY;
  2125. if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
  2126. tun = netdev_priv(dev);
  2127. else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
  2128. tun = netdev_priv(dev);
  2129. else
  2130. return -EINVAL;
  2131. if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
  2132. !!(tun->flags & IFF_MULTI_QUEUE))
  2133. return -EINVAL;
  2134. if (tun_not_capable(tun))
  2135. return -EPERM;
  2136. err = security_tun_dev_open(tun->security);
  2137. if (err < 0)
  2138. return err;
  2139. err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
  2140. ifr->ifr_flags & IFF_NAPI,
  2141. ifr->ifr_flags & IFF_NAPI_FRAGS, true);
  2142. if (err < 0)
  2143. return err;
  2144. if (tun->flags & IFF_MULTI_QUEUE &&
  2145. (tun->numqueues + tun->numdisabled > 1)) {
  2146. /* One or more queue has already been attached, no need
  2147. * to initialize the device again.
  2148. */
  2149. netdev_state_change(dev);
  2150. return 0;
  2151. }
  2152. tun->flags = (tun->flags & ~TUN_FEATURES) |
  2153. (ifr->ifr_flags & TUN_FEATURES);
  2154. netdev_state_change(dev);
  2155. } else {
  2156. char *name;
  2157. unsigned long flags = 0;
  2158. int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
  2159. MAX_TAP_QUEUES : 1;
  2160. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  2161. return -EPERM;
  2162. err = security_tun_dev_create();
  2163. if (err < 0)
  2164. return err;
  2165. /* Set dev type */
  2166. if (ifr->ifr_flags & IFF_TUN) {
  2167. /* TUN device */
  2168. flags |= IFF_TUN;
  2169. name = "tun%d";
  2170. } else if (ifr->ifr_flags & IFF_TAP) {
  2171. /* TAP device */
  2172. flags |= IFF_TAP;
  2173. name = "tap%d";
  2174. } else
  2175. return -EINVAL;
  2176. if (*ifr->ifr_name)
  2177. name = ifr->ifr_name;
  2178. dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
  2179. NET_NAME_UNKNOWN, tun_setup, queues,
  2180. queues);
  2181. if (!dev)
  2182. return -ENOMEM;
  2183. err = dev_get_valid_name(net, dev, name);
  2184. if (err < 0)
  2185. goto err_free_dev;
  2186. dev_net_set(dev, net);
  2187. dev->rtnl_link_ops = &tun_link_ops;
  2188. dev->ifindex = tfile->ifindex;
  2189. dev->sysfs_groups[0] = &tun_attr_group;
  2190. tun = netdev_priv(dev);
  2191. tun->dev = dev;
  2192. tun->flags = flags;
  2193. tun->txflt.count = 0;
  2194. tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
  2195. tun->align = NET_SKB_PAD;
  2196. tun->filter_attached = false;
  2197. tun->sndbuf = tfile->socket.sk->sk_sndbuf;
  2198. tun->rx_batched = 0;
  2199. RCU_INIT_POINTER(tun->steering_prog, NULL);
  2200. tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
  2201. if (!tun->pcpu_stats) {
  2202. err = -ENOMEM;
  2203. goto err_free_dev;
  2204. }
  2205. spin_lock_init(&tun->lock);
  2206. err = security_tun_dev_alloc_security(&tun->security);
  2207. if (err < 0)
  2208. goto err_free_stat;
  2209. tun_net_init(dev);
  2210. tun_flow_init(tun);
  2211. dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
  2212. TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
  2213. NETIF_F_HW_VLAN_STAG_TX;
  2214. dev->features = dev->hw_features | NETIF_F_LLTX;
  2215. dev->vlan_features = dev->features &
  2216. ~(NETIF_F_HW_VLAN_CTAG_TX |
  2217. NETIF_F_HW_VLAN_STAG_TX);
  2218. tun->flags = (tun->flags & ~TUN_FEATURES) |
  2219. (ifr->ifr_flags & TUN_FEATURES);
  2220. INIT_LIST_HEAD(&tun->disabled);
  2221. err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
  2222. ifr->ifr_flags & IFF_NAPI_FRAGS, false);
  2223. if (err < 0)
  2224. goto err_free_flow;
  2225. err = register_netdevice(tun->dev);
  2226. if (err < 0)
  2227. goto err_detach;
  2228. /* free_netdev() won't check refcnt, to aovid race
  2229. * with dev_put() we need publish tun after registration.
  2230. */
  2231. rcu_assign_pointer(tfile->tun, tun);
  2232. }
  2233. netif_carrier_on(tun->dev);
  2234. tun_debug(KERN_INFO, tun, "tun_set_iff\n");
  2235. /* Make sure persistent devices do not get stuck in
  2236. * xoff state.
  2237. */
  2238. if (netif_running(tun->dev))
  2239. netif_tx_wake_all_queues(tun->dev);
  2240. strcpy(ifr->ifr_name, tun->dev->name);
  2241. return 0;
  2242. err_detach:
  2243. tun_detach_all(dev);
  2244. /* register_netdevice() already called tun_free_netdev() */
  2245. goto err_free_dev;
  2246. err_free_flow:
  2247. tun_flow_uninit(tun);
  2248. security_tun_dev_free_security(tun->security);
  2249. err_free_stat:
  2250. free_percpu(tun->pcpu_stats);
  2251. err_free_dev:
  2252. free_netdev(dev);
  2253. return err;
  2254. }
  2255. static void tun_get_iff(struct net *net, struct tun_struct *tun,
  2256. struct ifreq *ifr)
  2257. {
  2258. tun_debug(KERN_INFO, tun, "tun_get_iff\n");
  2259. strcpy(ifr->ifr_name, tun->dev->name);
  2260. ifr->ifr_flags = tun_flags(tun);
  2261. }
  2262. /* This is like a cut-down ethtool ops, except done via tun fd so no
  2263. * privs required. */
  2264. static int set_offload(struct tun_struct *tun, unsigned long arg)
  2265. {
  2266. netdev_features_t features = 0;
  2267. if (arg & TUN_F_CSUM) {
  2268. features |= NETIF_F_HW_CSUM;
  2269. arg &= ~TUN_F_CSUM;
  2270. if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
  2271. if (arg & TUN_F_TSO_ECN) {
  2272. features |= NETIF_F_TSO_ECN;
  2273. arg &= ~TUN_F_TSO_ECN;
  2274. }
  2275. if (arg & TUN_F_TSO4)
  2276. features |= NETIF_F_TSO;
  2277. if (arg & TUN_F_TSO6)
  2278. features |= NETIF_F_TSO6;
  2279. arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
  2280. }
  2281. arg &= ~TUN_F_UFO;
  2282. }
  2283. /* This gives the user a way to test for new features in future by
  2284. * trying to set them. */
  2285. if (arg)
  2286. return -EINVAL;
  2287. tun->set_features = features;
  2288. tun->dev->wanted_features &= ~TUN_USER_FEATURES;
  2289. tun->dev->wanted_features |= features;
  2290. netdev_update_features(tun->dev);
  2291. return 0;
  2292. }
  2293. static void tun_detach_filter(struct tun_struct *tun, int n)
  2294. {
  2295. int i;
  2296. struct tun_file *tfile;
  2297. for (i = 0; i < n; i++) {
  2298. tfile = rtnl_dereference(tun->tfiles[i]);
  2299. lock_sock(tfile->socket.sk);
  2300. sk_detach_filter(tfile->socket.sk);
  2301. release_sock(tfile->socket.sk);
  2302. }
  2303. tun->filter_attached = false;
  2304. }
  2305. static int tun_attach_filter(struct tun_struct *tun)
  2306. {
  2307. int i, ret = 0;
  2308. struct tun_file *tfile;
  2309. for (i = 0; i < tun->numqueues; i++) {
  2310. tfile = rtnl_dereference(tun->tfiles[i]);
  2311. lock_sock(tfile->socket.sk);
  2312. ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
  2313. release_sock(tfile->socket.sk);
  2314. if (ret) {
  2315. tun_detach_filter(tun, i);
  2316. return ret;
  2317. }
  2318. }
  2319. tun->filter_attached = true;
  2320. return ret;
  2321. }
  2322. static void tun_set_sndbuf(struct tun_struct *tun)
  2323. {
  2324. struct tun_file *tfile;
  2325. int i;
  2326. for (i = 0; i < tun->numqueues; i++) {
  2327. tfile = rtnl_dereference(tun->tfiles[i]);
  2328. tfile->socket.sk->sk_sndbuf = tun->sndbuf;
  2329. }
  2330. }
  2331. static int tun_set_queue(struct file *file, struct ifreq *ifr)
  2332. {
  2333. struct tun_file *tfile = file->private_data;
  2334. struct tun_struct *tun;
  2335. int ret = 0;
  2336. rtnl_lock();
  2337. if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
  2338. tun = tfile->detached;
  2339. if (!tun) {
  2340. ret = -EINVAL;
  2341. goto unlock;
  2342. }
  2343. ret = security_tun_dev_attach_queue(tun->security);
  2344. if (ret < 0)
  2345. goto unlock;
  2346. ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
  2347. tun->flags & IFF_NAPI_FRAGS, true);
  2348. } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
  2349. tun = rtnl_dereference(tfile->tun);
  2350. if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
  2351. ret = -EINVAL;
  2352. else
  2353. __tun_detach(tfile, false);
  2354. } else
  2355. ret = -EINVAL;
  2356. if (ret >= 0)
  2357. netdev_state_change(tun->dev);
  2358. unlock:
  2359. rtnl_unlock();
  2360. return ret;
  2361. }
  2362. static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
  2363. void __user *data)
  2364. {
  2365. struct bpf_prog *prog;
  2366. int fd;
  2367. if (copy_from_user(&fd, data, sizeof(fd)))
  2368. return -EFAULT;
  2369. if (fd == -1) {
  2370. prog = NULL;
  2371. } else {
  2372. prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
  2373. if (IS_ERR(prog))
  2374. return PTR_ERR(prog);
  2375. }
  2376. return __tun_set_ebpf(tun, prog_p, prog);
  2377. }
  2378. static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
  2379. unsigned long arg, int ifreq_len)
  2380. {
  2381. struct tun_file *tfile = file->private_data;
  2382. struct net *net = sock_net(&tfile->sk);
  2383. struct tun_struct *tun;
  2384. void __user* argp = (void __user*)arg;
  2385. struct ifreq ifr;
  2386. kuid_t owner;
  2387. kgid_t group;
  2388. int sndbuf;
  2389. int vnet_hdr_sz;
  2390. unsigned int ifindex;
  2391. int le;
  2392. int ret;
  2393. bool do_notify = false;
  2394. if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
  2395. (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
  2396. if (copy_from_user(&ifr, argp, ifreq_len))
  2397. return -EFAULT;
  2398. } else {
  2399. memset(&ifr, 0, sizeof(ifr));
  2400. }
  2401. if (cmd == TUNGETFEATURES) {
  2402. /* Currently this just means: "what IFF flags are valid?".
  2403. * This is needed because we never checked for invalid flags on
  2404. * TUNSETIFF.
  2405. */
  2406. return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
  2407. (unsigned int __user*)argp);
  2408. } else if (cmd == TUNSETQUEUE) {
  2409. return tun_set_queue(file, &ifr);
  2410. } else if (cmd == SIOCGSKNS) {
  2411. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  2412. return -EPERM;
  2413. return open_related_ns(&net->ns, get_net_ns);
  2414. }
  2415. ret = 0;
  2416. rtnl_lock();
  2417. tun = tun_get(tfile);
  2418. if (cmd == TUNSETIFF) {
  2419. ret = -EEXIST;
  2420. if (tun)
  2421. goto unlock;
  2422. ifr.ifr_name[IFNAMSIZ-1] = '\0';
  2423. ret = tun_set_iff(net, file, &ifr);
  2424. if (ret)
  2425. goto unlock;
  2426. if (copy_to_user(argp, &ifr, ifreq_len))
  2427. ret = -EFAULT;
  2428. goto unlock;
  2429. }
  2430. if (cmd == TUNSETIFINDEX) {
  2431. ret = -EPERM;
  2432. if (tun)
  2433. goto unlock;
  2434. ret = -EFAULT;
  2435. if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
  2436. goto unlock;
  2437. ret = 0;
  2438. tfile->ifindex = ifindex;
  2439. goto unlock;
  2440. }
  2441. ret = -EBADFD;
  2442. if (!tun)
  2443. goto unlock;
  2444. tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
  2445. ret = 0;
  2446. switch (cmd) {
  2447. case TUNGETIFF:
  2448. tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
  2449. if (tfile->detached)
  2450. ifr.ifr_flags |= IFF_DETACH_QUEUE;
  2451. if (!tfile->socket.sk->sk_filter)
  2452. ifr.ifr_flags |= IFF_NOFILTER;
  2453. if (copy_to_user(argp, &ifr, ifreq_len))
  2454. ret = -EFAULT;
  2455. break;
  2456. case TUNSETNOCSUM:
  2457. /* Disable/Enable checksum */
  2458. /* [unimplemented] */
  2459. tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
  2460. arg ? "disabled" : "enabled");
  2461. break;
  2462. case TUNSETPERSIST:
  2463. /* Disable/Enable persist mode. Keep an extra reference to the
  2464. * module to prevent the module being unprobed.
  2465. */
  2466. if (arg && !(tun->flags & IFF_PERSIST)) {
  2467. tun->flags |= IFF_PERSIST;
  2468. __module_get(THIS_MODULE);
  2469. do_notify = true;
  2470. }
  2471. if (!arg && (tun->flags & IFF_PERSIST)) {
  2472. tun->flags &= ~IFF_PERSIST;
  2473. module_put(THIS_MODULE);
  2474. do_notify = true;
  2475. }
  2476. tun_debug(KERN_INFO, tun, "persist %s\n",
  2477. arg ? "enabled" : "disabled");
  2478. break;
  2479. case TUNSETOWNER:
  2480. /* Set owner of the device */
  2481. owner = make_kuid(current_user_ns(), arg);
  2482. if (!uid_valid(owner)) {
  2483. ret = -EINVAL;
  2484. break;
  2485. }
  2486. tun->owner = owner;
  2487. do_notify = true;
  2488. tun_debug(KERN_INFO, tun, "owner set to %u\n",
  2489. from_kuid(&init_user_ns, tun->owner));
  2490. break;
  2491. case TUNSETGROUP:
  2492. /* Set group of the device */
  2493. group = make_kgid(current_user_ns(), arg);
  2494. if (!gid_valid(group)) {
  2495. ret = -EINVAL;
  2496. break;
  2497. }
  2498. tun->group = group;
  2499. do_notify = true;
  2500. tun_debug(KERN_INFO, tun, "group set to %u\n",
  2501. from_kgid(&init_user_ns, tun->group));
  2502. break;
  2503. case TUNSETLINK:
  2504. /* Only allow setting the type when the interface is down */
  2505. if (tun->dev->flags & IFF_UP) {
  2506. tun_debug(KERN_INFO, tun,
  2507. "Linktype set failed because interface is up\n");
  2508. ret = -EBUSY;
  2509. } else {
  2510. tun->dev->type = (int) arg;
  2511. tun_debug(KERN_INFO, tun, "linktype set to %d\n",
  2512. tun->dev->type);
  2513. ret = 0;
  2514. }
  2515. break;
  2516. #ifdef TUN_DEBUG
  2517. case TUNSETDEBUG:
  2518. tun->debug = arg;
  2519. break;
  2520. #endif
  2521. case TUNSETOFFLOAD:
  2522. ret = set_offload(tun, arg);
  2523. break;
  2524. case TUNSETTXFILTER:
  2525. /* Can be set only for TAPs */
  2526. ret = -EINVAL;
  2527. if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
  2528. break;
  2529. ret = update_filter(&tun->txflt, (void __user *)arg);
  2530. break;
  2531. case SIOCGIFHWADDR:
  2532. /* Get hw address */
  2533. memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
  2534. ifr.ifr_hwaddr.sa_family = tun->dev->type;
  2535. if (copy_to_user(argp, &ifr, ifreq_len))
  2536. ret = -EFAULT;
  2537. break;
  2538. case SIOCSIFHWADDR:
  2539. /* Set hw address */
  2540. tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
  2541. ifr.ifr_hwaddr.sa_data);
  2542. ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
  2543. break;
  2544. case TUNGETSNDBUF:
  2545. sndbuf = tfile->socket.sk->sk_sndbuf;
  2546. if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
  2547. ret = -EFAULT;
  2548. break;
  2549. case TUNSETSNDBUF:
  2550. if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
  2551. ret = -EFAULT;
  2552. break;
  2553. }
  2554. if (sndbuf <= 0) {
  2555. ret = -EINVAL;
  2556. break;
  2557. }
  2558. tun->sndbuf = sndbuf;
  2559. tun_set_sndbuf(tun);
  2560. break;
  2561. case TUNGETVNETHDRSZ:
  2562. vnet_hdr_sz = tun->vnet_hdr_sz;
  2563. if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
  2564. ret = -EFAULT;
  2565. break;
  2566. case TUNSETVNETHDRSZ:
  2567. if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
  2568. ret = -EFAULT;
  2569. break;
  2570. }
  2571. if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
  2572. ret = -EINVAL;
  2573. break;
  2574. }
  2575. tun->vnet_hdr_sz = vnet_hdr_sz;
  2576. break;
  2577. case TUNGETVNETLE:
  2578. le = !!(tun->flags & TUN_VNET_LE);
  2579. if (put_user(le, (int __user *)argp))
  2580. ret = -EFAULT;
  2581. break;
  2582. case TUNSETVNETLE:
  2583. if (get_user(le, (int __user *)argp)) {
  2584. ret = -EFAULT;
  2585. break;
  2586. }
  2587. if (le)
  2588. tun->flags |= TUN_VNET_LE;
  2589. else
  2590. tun->flags &= ~TUN_VNET_LE;
  2591. break;
  2592. case TUNGETVNETBE:
  2593. ret = tun_get_vnet_be(tun, argp);
  2594. break;
  2595. case TUNSETVNETBE:
  2596. ret = tun_set_vnet_be(tun, argp);
  2597. break;
  2598. case TUNATTACHFILTER:
  2599. /* Can be set only for TAPs */
  2600. ret = -EINVAL;
  2601. if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
  2602. break;
  2603. ret = -EFAULT;
  2604. if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
  2605. break;
  2606. ret = tun_attach_filter(tun);
  2607. break;
  2608. case TUNDETACHFILTER:
  2609. /* Can be set only for TAPs */
  2610. ret = -EINVAL;
  2611. if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
  2612. break;
  2613. ret = 0;
  2614. tun_detach_filter(tun, tun->numqueues);
  2615. break;
  2616. case TUNGETFILTER:
  2617. ret = -EINVAL;
  2618. if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
  2619. break;
  2620. ret = -EFAULT;
  2621. if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
  2622. break;
  2623. ret = 0;
  2624. break;
  2625. case TUNSETSTEERINGEBPF:
  2626. ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
  2627. break;
  2628. case TUNSETFILTEREBPF:
  2629. ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
  2630. break;
  2631. default:
  2632. ret = -EINVAL;
  2633. break;
  2634. }
  2635. if (do_notify)
  2636. netdev_state_change(tun->dev);
  2637. unlock:
  2638. rtnl_unlock();
  2639. if (tun)
  2640. tun_put(tun);
  2641. return ret;
  2642. }
  2643. static long tun_chr_ioctl(struct file *file,
  2644. unsigned int cmd, unsigned long arg)
  2645. {
  2646. return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
  2647. }
  2648. #ifdef CONFIG_COMPAT
  2649. static long tun_chr_compat_ioctl(struct file *file,
  2650. unsigned int cmd, unsigned long arg)
  2651. {
  2652. switch (cmd) {
  2653. case TUNSETIFF:
  2654. case TUNGETIFF:
  2655. case TUNSETTXFILTER:
  2656. case TUNGETSNDBUF:
  2657. case TUNSETSNDBUF:
  2658. case SIOCGIFHWADDR:
  2659. case SIOCSIFHWADDR:
  2660. arg = (unsigned long)compat_ptr(arg);
  2661. break;
  2662. default:
  2663. arg = (compat_ulong_t)arg;
  2664. break;
  2665. }
  2666. /*
  2667. * compat_ifreq is shorter than ifreq, so we must not access beyond
  2668. * the end of that structure. All fields that are used in this
  2669. * driver are compatible though, we don't need to convert the
  2670. * contents.
  2671. */
  2672. return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
  2673. }
  2674. #endif /* CONFIG_COMPAT */
  2675. static int tun_chr_fasync(int fd, struct file *file, int on)
  2676. {
  2677. struct tun_file *tfile = file->private_data;
  2678. int ret;
  2679. if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
  2680. goto out;
  2681. if (on) {
  2682. __f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
  2683. tfile->flags |= TUN_FASYNC;
  2684. } else
  2685. tfile->flags &= ~TUN_FASYNC;
  2686. ret = 0;
  2687. out:
  2688. return ret;
  2689. }
  2690. static int tun_chr_open(struct inode *inode, struct file * file)
  2691. {
  2692. struct net *net = current->nsproxy->net_ns;
  2693. struct tun_file *tfile;
  2694. DBG1(KERN_INFO, "tunX: tun_chr_open\n");
  2695. tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
  2696. &tun_proto, 0);
  2697. if (!tfile)
  2698. return -ENOMEM;
  2699. if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
  2700. sk_free(&tfile->sk);
  2701. return -ENOMEM;
  2702. }
  2703. mutex_init(&tfile->napi_mutex);
  2704. RCU_INIT_POINTER(tfile->tun, NULL);
  2705. tfile->flags = 0;
  2706. tfile->ifindex = 0;
  2707. init_waitqueue_head(&tfile->wq.wait);
  2708. RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
  2709. tfile->socket.file = file;
  2710. tfile->socket.ops = &tun_socket_ops;
  2711. sock_init_data(&tfile->socket, &tfile->sk);
  2712. tfile->sk.sk_write_space = tun_sock_write_space;
  2713. tfile->sk.sk_sndbuf = INT_MAX;
  2714. file->private_data = tfile;
  2715. INIT_LIST_HEAD(&tfile->next);
  2716. sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
  2717. return 0;
  2718. }
  2719. static int tun_chr_close(struct inode *inode, struct file *file)
  2720. {
  2721. struct tun_file *tfile = file->private_data;
  2722. tun_detach(tfile, true);
  2723. return 0;
  2724. }
  2725. #ifdef CONFIG_PROC_FS
  2726. static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
  2727. {
  2728. struct tun_file *tfile = file->private_data;
  2729. struct tun_struct *tun;
  2730. struct ifreq ifr;
  2731. memset(&ifr, 0, sizeof(ifr));
  2732. rtnl_lock();
  2733. tun = tun_get(tfile);
  2734. if (tun)
  2735. tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
  2736. rtnl_unlock();
  2737. if (tun)
  2738. tun_put(tun);
  2739. seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
  2740. }
  2741. #endif
  2742. static const struct file_operations tun_fops = {
  2743. .owner = THIS_MODULE,
  2744. .llseek = no_llseek,
  2745. .read_iter = tun_chr_read_iter,
  2746. .write_iter = tun_chr_write_iter,
  2747. .poll = tun_chr_poll,
  2748. .unlocked_ioctl = tun_chr_ioctl,
  2749. #ifdef CONFIG_COMPAT
  2750. .compat_ioctl = tun_chr_compat_ioctl,
  2751. #endif
  2752. .open = tun_chr_open,
  2753. .release = tun_chr_close,
  2754. .fasync = tun_chr_fasync,
  2755. #ifdef CONFIG_PROC_FS
  2756. .show_fdinfo = tun_chr_show_fdinfo,
  2757. #endif
  2758. };
  2759. static struct miscdevice tun_miscdev = {
  2760. .minor = TUN_MINOR,
  2761. .name = "tun",
  2762. .nodename = "net/tun",
  2763. .fops = &tun_fops,
  2764. };
  2765. /* ethtool interface */
  2766. static void tun_default_link_ksettings(struct net_device *dev,
  2767. struct ethtool_link_ksettings *cmd)
  2768. {
  2769. ethtool_link_ksettings_zero_link_mode(cmd, supported);
  2770. ethtool_link_ksettings_zero_link_mode(cmd, advertising);
  2771. cmd->base.speed = SPEED_10;
  2772. cmd->base.duplex = DUPLEX_FULL;
  2773. cmd->base.port = PORT_TP;
  2774. cmd->base.phy_address = 0;
  2775. cmd->base.autoneg = AUTONEG_DISABLE;
  2776. }
  2777. static int tun_get_link_ksettings(struct net_device *dev,
  2778. struct ethtool_link_ksettings *cmd)
  2779. {
  2780. struct tun_struct *tun = netdev_priv(dev);
  2781. memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
  2782. return 0;
  2783. }
  2784. static int tun_set_link_ksettings(struct net_device *dev,
  2785. const struct ethtool_link_ksettings *cmd)
  2786. {
  2787. struct tun_struct *tun = netdev_priv(dev);
  2788. memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
  2789. return 0;
  2790. }
  2791. static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  2792. {
  2793. struct tun_struct *tun = netdev_priv(dev);
  2794. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  2795. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  2796. switch (tun->flags & TUN_TYPE_MASK) {
  2797. case IFF_TUN:
  2798. strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
  2799. break;
  2800. case IFF_TAP:
  2801. strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
  2802. break;
  2803. }
  2804. }
  2805. static u32 tun_get_msglevel(struct net_device *dev)
  2806. {
  2807. #ifdef TUN_DEBUG
  2808. struct tun_struct *tun = netdev_priv(dev);
  2809. return tun->debug;
  2810. #else
  2811. return -EOPNOTSUPP;
  2812. #endif
  2813. }
  2814. static void tun_set_msglevel(struct net_device *dev, u32 value)
  2815. {
  2816. #ifdef TUN_DEBUG
  2817. struct tun_struct *tun = netdev_priv(dev);
  2818. tun->debug = value;
  2819. #endif
  2820. }
  2821. static int tun_get_coalesce(struct net_device *dev,
  2822. struct ethtool_coalesce *ec)
  2823. {
  2824. struct tun_struct *tun = netdev_priv(dev);
  2825. ec->rx_max_coalesced_frames = tun->rx_batched;
  2826. return 0;
  2827. }
  2828. static int tun_set_coalesce(struct net_device *dev,
  2829. struct ethtool_coalesce *ec)
  2830. {
  2831. struct tun_struct *tun = netdev_priv(dev);
  2832. if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
  2833. tun->rx_batched = NAPI_POLL_WEIGHT;
  2834. else
  2835. tun->rx_batched = ec->rx_max_coalesced_frames;
  2836. return 0;
  2837. }
  2838. static const struct ethtool_ops tun_ethtool_ops = {
  2839. .get_drvinfo = tun_get_drvinfo,
  2840. .get_msglevel = tun_get_msglevel,
  2841. .set_msglevel = tun_set_msglevel,
  2842. .get_link = ethtool_op_get_link,
  2843. .get_ts_info = ethtool_op_get_ts_info,
  2844. .get_coalesce = tun_get_coalesce,
  2845. .set_coalesce = tun_set_coalesce,
  2846. .get_link_ksettings = tun_get_link_ksettings,
  2847. .set_link_ksettings = tun_set_link_ksettings,
  2848. };
  2849. static int tun_queue_resize(struct tun_struct *tun)
  2850. {
  2851. struct net_device *dev = tun->dev;
  2852. struct tun_file *tfile;
  2853. struct ptr_ring **rings;
  2854. int n = tun->numqueues + tun->numdisabled;
  2855. int ret, i;
  2856. rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
  2857. if (!rings)
  2858. return -ENOMEM;
  2859. for (i = 0; i < tun->numqueues; i++) {
  2860. tfile = rtnl_dereference(tun->tfiles[i]);
  2861. rings[i] = &tfile->tx_ring;
  2862. }
  2863. list_for_each_entry(tfile, &tun->disabled, next)
  2864. rings[i++] = &tfile->tx_ring;
  2865. ret = ptr_ring_resize_multiple(rings, n,
  2866. dev->tx_queue_len, GFP_KERNEL,
  2867. tun_ptr_free);
  2868. kfree(rings);
  2869. return ret;
  2870. }
  2871. static int tun_device_event(struct notifier_block *unused,
  2872. unsigned long event, void *ptr)
  2873. {
  2874. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  2875. struct tun_struct *tun = netdev_priv(dev);
  2876. int i;
  2877. if (dev->rtnl_link_ops != &tun_link_ops)
  2878. return NOTIFY_DONE;
  2879. switch (event) {
  2880. case NETDEV_CHANGE_TX_QUEUE_LEN:
  2881. if (tun_queue_resize(tun))
  2882. return NOTIFY_BAD;
  2883. break;
  2884. case NETDEV_UP:
  2885. for (i = 0; i < tun->numqueues; i++) {
  2886. struct tun_file *tfile;
  2887. tfile = rtnl_dereference(tun->tfiles[i]);
  2888. tfile->socket.sk->sk_write_space(tfile->socket.sk);
  2889. }
  2890. break;
  2891. default:
  2892. break;
  2893. }
  2894. return NOTIFY_DONE;
  2895. }
  2896. static struct notifier_block tun_notifier_block __read_mostly = {
  2897. .notifier_call = tun_device_event,
  2898. };
  2899. static int __init tun_init(void)
  2900. {
  2901. int ret = 0;
  2902. pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
  2903. ret = rtnl_link_register(&tun_link_ops);
  2904. if (ret) {
  2905. pr_err("Can't register link_ops\n");
  2906. goto err_linkops;
  2907. }
  2908. ret = misc_register(&tun_miscdev);
  2909. if (ret) {
  2910. pr_err("Can't register misc device %d\n", TUN_MINOR);
  2911. goto err_misc;
  2912. }
  2913. ret = register_netdevice_notifier(&tun_notifier_block);
  2914. if (ret) {
  2915. pr_err("Can't register netdevice notifier\n");
  2916. goto err_notifier;
  2917. }
  2918. return 0;
  2919. err_notifier:
  2920. misc_deregister(&tun_miscdev);
  2921. err_misc:
  2922. rtnl_link_unregister(&tun_link_ops);
  2923. err_linkops:
  2924. return ret;
  2925. }
  2926. static void tun_cleanup(void)
  2927. {
  2928. misc_deregister(&tun_miscdev);
  2929. rtnl_link_unregister(&tun_link_ops);
  2930. unregister_netdevice_notifier(&tun_notifier_block);
  2931. }
  2932. /* Get an underlying socket object from tun file. Returns error unless file is
  2933. * attached to a device. The returned object works like a packet socket, it
  2934. * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
  2935. * holding a reference to the file for as long as the socket is in use. */
  2936. struct socket *tun_get_socket(struct file *file)
  2937. {
  2938. struct tun_file *tfile;
  2939. if (file->f_op != &tun_fops)
  2940. return ERR_PTR(-EINVAL);
  2941. tfile = file->private_data;
  2942. if (!tfile)
  2943. return ERR_PTR(-EBADFD);
  2944. return &tfile->socket;
  2945. }
  2946. EXPORT_SYMBOL_GPL(tun_get_socket);
  2947. struct ptr_ring *tun_get_tx_ring(struct file *file)
  2948. {
  2949. struct tun_file *tfile;
  2950. if (file->f_op != &tun_fops)
  2951. return ERR_PTR(-EINVAL);
  2952. tfile = file->private_data;
  2953. if (!tfile)
  2954. return ERR_PTR(-EBADFD);
  2955. return &tfile->tx_ring;
  2956. }
  2957. EXPORT_SYMBOL_GPL(tun_get_tx_ring);
  2958. module_init(tun_init);
  2959. module_exit(tun_cleanup);
  2960. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  2961. MODULE_AUTHOR(DRV_COPYRIGHT);
  2962. MODULE_LICENSE("GPL");
  2963. MODULE_ALIAS_MISCDEV(TUN_MINOR);
  2964. MODULE_ALIAS("devname:net/tun");