macsec.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * drivers/net/macsec.c - MACsec device
  4. *
  5. * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
  6. */
  7. #include <linux/types.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/socket.h>
  10. #include <linux/module.h>
  11. #include <crypto/aead.h>
  12. #include <linux/etherdevice.h>
  13. #include <linux/rtnetlink.h>
  14. #include <linux/refcount.h>
  15. #include <net/genetlink.h>
  16. #include <net/sock.h>
  17. #include <net/gro_cells.h>
  18. #include <linux/if_arp.h>
  19. #include <uapi/linux/if_macsec.h>
  20. typedef u64 __bitwise sci_t;
  21. #define MACSEC_SCI_LEN 8
  22. /* SecTAG length = macsec_eth_header without the optional SCI */
  23. #define MACSEC_TAG_LEN 6
  24. struct macsec_eth_header {
  25. struct ethhdr eth;
  26. /* SecTAG */
  27. u8 tci_an;
  28. #if defined(__LITTLE_ENDIAN_BITFIELD)
  29. u8 short_length:6,
  30. unused:2;
  31. #elif defined(__BIG_ENDIAN_BITFIELD)
  32. u8 unused:2,
  33. short_length:6;
  34. #else
  35. #error "Please fix <asm/byteorder.h>"
  36. #endif
  37. __be32 packet_number;
  38. u8 secure_channel_id[8]; /* optional */
  39. } __packed;
  40. #define MACSEC_TCI_VERSION 0x80
  41. #define MACSEC_TCI_ES 0x40 /* end station */
  42. #define MACSEC_TCI_SC 0x20 /* SCI present */
  43. #define MACSEC_TCI_SCB 0x10 /* epon */
  44. #define MACSEC_TCI_E 0x08 /* encryption */
  45. #define MACSEC_TCI_C 0x04 /* changed text */
  46. #define MACSEC_AN_MASK 0x03 /* association number */
  47. #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
  48. /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
  49. #define MIN_NON_SHORT_LEN 48
  50. #define GCM_AES_IV_LEN 12
  51. #define DEFAULT_ICV_LEN 16
  52. #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
  53. #define for_each_rxsc(secy, sc) \
  54. for (sc = rcu_dereference_bh(secy->rx_sc); \
  55. sc; \
  56. sc = rcu_dereference_bh(sc->next))
  57. #define for_each_rxsc_rtnl(secy, sc) \
  58. for (sc = rtnl_dereference(secy->rx_sc); \
  59. sc; \
  60. sc = rtnl_dereference(sc->next))
  61. struct gcm_iv {
  62. union {
  63. u8 secure_channel_id[8];
  64. sci_t sci;
  65. };
  66. __be32 pn;
  67. };
  68. /**
  69. * struct macsec_key - SA key
  70. * @id: user-provided key identifier
  71. * @tfm: crypto struct, key storage
  72. */
  73. struct macsec_key {
  74. u8 id[MACSEC_KEYID_LEN];
  75. struct crypto_aead *tfm;
  76. };
  77. struct macsec_rx_sc_stats {
  78. __u64 InOctetsValidated;
  79. __u64 InOctetsDecrypted;
  80. __u64 InPktsUnchecked;
  81. __u64 InPktsDelayed;
  82. __u64 InPktsOK;
  83. __u64 InPktsInvalid;
  84. __u64 InPktsLate;
  85. __u64 InPktsNotValid;
  86. __u64 InPktsNotUsingSA;
  87. __u64 InPktsUnusedSA;
  88. };
  89. struct macsec_rx_sa_stats {
  90. __u32 InPktsOK;
  91. __u32 InPktsInvalid;
  92. __u32 InPktsNotValid;
  93. __u32 InPktsNotUsingSA;
  94. __u32 InPktsUnusedSA;
  95. };
  96. struct macsec_tx_sa_stats {
  97. __u32 OutPktsProtected;
  98. __u32 OutPktsEncrypted;
  99. };
  100. struct macsec_tx_sc_stats {
  101. __u64 OutPktsProtected;
  102. __u64 OutPktsEncrypted;
  103. __u64 OutOctetsProtected;
  104. __u64 OutOctetsEncrypted;
  105. };
  106. struct macsec_dev_stats {
  107. __u64 OutPktsUntagged;
  108. __u64 InPktsUntagged;
  109. __u64 OutPktsTooLong;
  110. __u64 InPktsNoTag;
  111. __u64 InPktsBadTag;
  112. __u64 InPktsUnknownSCI;
  113. __u64 InPktsNoSCI;
  114. __u64 InPktsOverrun;
  115. };
  116. /**
  117. * struct macsec_rx_sa - receive secure association
  118. * @active:
  119. * @next_pn: packet number expected for the next packet
  120. * @lock: protects next_pn manipulations
  121. * @key: key structure
  122. * @stats: per-SA stats
  123. */
  124. struct macsec_rx_sa {
  125. struct macsec_key key;
  126. spinlock_t lock;
  127. u32 next_pn;
  128. refcount_t refcnt;
  129. bool active;
  130. struct macsec_rx_sa_stats __percpu *stats;
  131. struct macsec_rx_sc *sc;
  132. struct rcu_head rcu;
  133. };
  134. struct pcpu_rx_sc_stats {
  135. struct macsec_rx_sc_stats stats;
  136. struct u64_stats_sync syncp;
  137. };
  138. /**
  139. * struct macsec_rx_sc - receive secure channel
  140. * @sci: secure channel identifier for this SC
  141. * @active: channel is active
  142. * @sa: array of secure associations
  143. * @stats: per-SC stats
  144. */
  145. struct macsec_rx_sc {
  146. struct macsec_rx_sc __rcu *next;
  147. sci_t sci;
  148. bool active;
  149. struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
  150. struct pcpu_rx_sc_stats __percpu *stats;
  151. refcount_t refcnt;
  152. struct rcu_head rcu_head;
  153. };
  154. /**
  155. * struct macsec_tx_sa - transmit secure association
  156. * @active:
  157. * @next_pn: packet number to use for the next packet
  158. * @lock: protects next_pn manipulations
  159. * @key: key structure
  160. * @stats: per-SA stats
  161. */
  162. struct macsec_tx_sa {
  163. struct macsec_key key;
  164. spinlock_t lock;
  165. u32 next_pn;
  166. refcount_t refcnt;
  167. bool active;
  168. struct macsec_tx_sa_stats __percpu *stats;
  169. struct rcu_head rcu;
  170. };
  171. struct pcpu_tx_sc_stats {
  172. struct macsec_tx_sc_stats stats;
  173. struct u64_stats_sync syncp;
  174. };
  175. /**
  176. * struct macsec_tx_sc - transmit secure channel
  177. * @active:
  178. * @encoding_sa: association number of the SA currently in use
  179. * @encrypt: encrypt packets on transmit, or authenticate only
  180. * @send_sci: always include the SCI in the SecTAG
  181. * @end_station:
  182. * @scb: single copy broadcast flag
  183. * @sa: array of secure associations
  184. * @stats: stats for this TXSC
  185. */
  186. struct macsec_tx_sc {
  187. bool active;
  188. u8 encoding_sa;
  189. bool encrypt;
  190. bool send_sci;
  191. bool end_station;
  192. bool scb;
  193. struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
  194. struct pcpu_tx_sc_stats __percpu *stats;
  195. };
  196. #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
  197. /**
  198. * struct macsec_secy - MACsec Security Entity
  199. * @netdev: netdevice for this SecY
  200. * @n_rx_sc: number of receive secure channels configured on this SecY
  201. * @sci: secure channel identifier used for tx
  202. * @key_len: length of keys used by the cipher suite
  203. * @icv_len: length of ICV used by the cipher suite
  204. * @validate_frames: validation mode
  205. * @operational: MAC_Operational flag
  206. * @protect_frames: enable protection for this SecY
  207. * @replay_protect: enable packet number checks on receive
  208. * @replay_window: size of the replay window
  209. * @tx_sc: transmit secure channel
  210. * @rx_sc: linked list of receive secure channels
  211. */
  212. struct macsec_secy {
  213. struct net_device *netdev;
  214. unsigned int n_rx_sc;
  215. sci_t sci;
  216. u16 key_len;
  217. u16 icv_len;
  218. enum macsec_validation_type validate_frames;
  219. bool operational;
  220. bool protect_frames;
  221. bool replay_protect;
  222. u32 replay_window;
  223. struct macsec_tx_sc tx_sc;
  224. struct macsec_rx_sc __rcu *rx_sc;
  225. };
  226. struct pcpu_secy_stats {
  227. struct macsec_dev_stats stats;
  228. struct u64_stats_sync syncp;
  229. };
  230. /**
  231. * struct macsec_dev - private data
  232. * @secy: SecY config
  233. * @real_dev: pointer to underlying netdevice
  234. * @stats: MACsec device stats
  235. * @secys: linked list of SecY's on the underlying device
  236. */
  237. struct macsec_dev {
  238. struct macsec_secy secy;
  239. struct net_device *real_dev;
  240. struct pcpu_secy_stats __percpu *stats;
  241. struct list_head secys;
  242. struct gro_cells gro_cells;
  243. };
  244. /**
  245. * struct macsec_rxh_data - rx_handler private argument
  246. * @secys: linked list of SecY's on this underlying device
  247. */
  248. struct macsec_rxh_data {
  249. struct list_head secys;
  250. };
  251. static struct macsec_dev *macsec_priv(const struct net_device *dev)
  252. {
  253. return (struct macsec_dev *)netdev_priv(dev);
  254. }
  255. static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
  256. {
  257. return rcu_dereference_bh(dev->rx_handler_data);
  258. }
  259. static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
  260. {
  261. return rtnl_dereference(dev->rx_handler_data);
  262. }
  263. struct macsec_cb {
  264. struct aead_request *req;
  265. union {
  266. struct macsec_tx_sa *tx_sa;
  267. struct macsec_rx_sa *rx_sa;
  268. };
  269. u8 assoc_num;
  270. bool valid;
  271. bool has_sci;
  272. };
  273. static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
  274. {
  275. struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
  276. if (!sa || !sa->active)
  277. return NULL;
  278. if (!refcount_inc_not_zero(&sa->refcnt))
  279. return NULL;
  280. return sa;
  281. }
  282. static void free_rx_sc_rcu(struct rcu_head *head)
  283. {
  284. struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
  285. free_percpu(rx_sc->stats);
  286. kfree(rx_sc);
  287. }
  288. static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
  289. {
  290. return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
  291. }
  292. static void macsec_rxsc_put(struct macsec_rx_sc *sc)
  293. {
  294. if (refcount_dec_and_test(&sc->refcnt))
  295. call_rcu(&sc->rcu_head, free_rx_sc_rcu);
  296. }
  297. static void free_rxsa(struct rcu_head *head)
  298. {
  299. struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
  300. crypto_free_aead(sa->key.tfm);
  301. free_percpu(sa->stats);
  302. kfree(sa);
  303. }
  304. static void macsec_rxsa_put(struct macsec_rx_sa *sa)
  305. {
  306. if (refcount_dec_and_test(&sa->refcnt))
  307. call_rcu(&sa->rcu, free_rxsa);
  308. }
  309. static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
  310. {
  311. struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
  312. if (!sa || !sa->active)
  313. return NULL;
  314. if (!refcount_inc_not_zero(&sa->refcnt))
  315. return NULL;
  316. return sa;
  317. }
  318. static void free_txsa(struct rcu_head *head)
  319. {
  320. struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
  321. crypto_free_aead(sa->key.tfm);
  322. free_percpu(sa->stats);
  323. kfree(sa);
  324. }
  325. static void macsec_txsa_put(struct macsec_tx_sa *sa)
  326. {
  327. if (refcount_dec_and_test(&sa->refcnt))
  328. call_rcu(&sa->rcu, free_txsa);
  329. }
  330. static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
  331. {
  332. BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
  333. return (struct macsec_cb *)skb->cb;
  334. }
  335. #define MACSEC_PORT_ES (htons(0x0001))
  336. #define MACSEC_PORT_SCB (0x0000)
  337. #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
  338. #define MACSEC_GCM_AES_128_SAK_LEN 16
  339. #define MACSEC_GCM_AES_256_SAK_LEN 32
  340. #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
  341. #define DEFAULT_SEND_SCI true
  342. #define DEFAULT_ENCRYPT false
  343. #define DEFAULT_ENCODING_SA 0
  344. static bool send_sci(const struct macsec_secy *secy)
  345. {
  346. const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  347. return tx_sc->send_sci ||
  348. (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
  349. }
  350. static sci_t make_sci(u8 *addr, __be16 port)
  351. {
  352. sci_t sci;
  353. memcpy(&sci, addr, ETH_ALEN);
  354. memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
  355. return sci;
  356. }
  357. static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
  358. {
  359. sci_t sci;
  360. if (sci_present)
  361. memcpy(&sci, hdr->secure_channel_id,
  362. sizeof(hdr->secure_channel_id));
  363. else
  364. sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
  365. return sci;
  366. }
  367. static unsigned int macsec_sectag_len(bool sci_present)
  368. {
  369. return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
  370. }
  371. static unsigned int macsec_hdr_len(bool sci_present)
  372. {
  373. return macsec_sectag_len(sci_present) + ETH_HLEN;
  374. }
  375. static unsigned int macsec_extra_len(bool sci_present)
  376. {
  377. return macsec_sectag_len(sci_present) + sizeof(__be16);
  378. }
  379. /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
  380. static void macsec_fill_sectag(struct macsec_eth_header *h,
  381. const struct macsec_secy *secy, u32 pn,
  382. bool sci_present)
  383. {
  384. const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  385. memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
  386. h->eth.h_proto = htons(ETH_P_MACSEC);
  387. if (sci_present) {
  388. h->tci_an |= MACSEC_TCI_SC;
  389. memcpy(&h->secure_channel_id, &secy->sci,
  390. sizeof(h->secure_channel_id));
  391. } else {
  392. if (tx_sc->end_station)
  393. h->tci_an |= MACSEC_TCI_ES;
  394. if (tx_sc->scb)
  395. h->tci_an |= MACSEC_TCI_SCB;
  396. }
  397. h->packet_number = htonl(pn);
  398. /* with GCM, C/E clear for !encrypt, both set for encrypt */
  399. if (tx_sc->encrypt)
  400. h->tci_an |= MACSEC_TCI_CONFID;
  401. else if (secy->icv_len != DEFAULT_ICV_LEN)
  402. h->tci_an |= MACSEC_TCI_C;
  403. h->tci_an |= tx_sc->encoding_sa;
  404. }
  405. static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
  406. {
  407. if (data_len < MIN_NON_SHORT_LEN)
  408. h->short_length = data_len;
  409. }
  410. /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
  411. static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
  412. {
  413. struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
  414. int len = skb->len - 2 * ETH_ALEN;
  415. int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
  416. /* a) It comprises at least 17 octets */
  417. if (skb->len <= 16)
  418. return false;
  419. /* b) MACsec EtherType: already checked */
  420. /* c) V bit is clear */
  421. if (h->tci_an & MACSEC_TCI_VERSION)
  422. return false;
  423. /* d) ES or SCB => !SC */
  424. if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
  425. (h->tci_an & MACSEC_TCI_SC))
  426. return false;
  427. /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
  428. if (h->unused)
  429. return false;
  430. /* rx.pn != 0 (figure 10-5) */
  431. if (!h->packet_number)
  432. return false;
  433. /* length check, f) g) h) i) */
  434. if (h->short_length)
  435. return len == extra_len + h->short_length;
  436. return len >= extra_len + MIN_NON_SHORT_LEN;
  437. }
  438. #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
  439. #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
  440. static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
  441. {
  442. struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
  443. gcm_iv->sci = sci;
  444. gcm_iv->pn = htonl(pn);
  445. }
  446. static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
  447. {
  448. return (struct macsec_eth_header *)skb_mac_header(skb);
  449. }
  450. static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
  451. {
  452. u32 pn;
  453. spin_lock_bh(&tx_sa->lock);
  454. pn = tx_sa->next_pn;
  455. tx_sa->next_pn++;
  456. if (tx_sa->next_pn == 0) {
  457. pr_debug("PN wrapped, transitioning to !oper\n");
  458. tx_sa->active = false;
  459. if (secy->protect_frames)
  460. secy->operational = false;
  461. }
  462. spin_unlock_bh(&tx_sa->lock);
  463. return pn;
  464. }
  465. static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
  466. {
  467. struct macsec_dev *macsec = netdev_priv(dev);
  468. skb->dev = macsec->real_dev;
  469. skb_reset_mac_header(skb);
  470. skb->protocol = eth_hdr(skb)->h_proto;
  471. }
  472. static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
  473. struct macsec_tx_sa *tx_sa)
  474. {
  475. struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
  476. u64_stats_update_begin(&txsc_stats->syncp);
  477. if (tx_sc->encrypt) {
  478. txsc_stats->stats.OutOctetsEncrypted += skb->len;
  479. txsc_stats->stats.OutPktsEncrypted++;
  480. this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
  481. } else {
  482. txsc_stats->stats.OutOctetsProtected += skb->len;
  483. txsc_stats->stats.OutPktsProtected++;
  484. this_cpu_inc(tx_sa->stats->OutPktsProtected);
  485. }
  486. u64_stats_update_end(&txsc_stats->syncp);
  487. }
  488. static void count_tx(struct net_device *dev, int ret, int len)
  489. {
  490. if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
  491. struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
  492. u64_stats_update_begin(&stats->syncp);
  493. stats->tx_packets++;
  494. stats->tx_bytes += len;
  495. u64_stats_update_end(&stats->syncp);
  496. }
  497. }
  498. static void macsec_encrypt_done(struct crypto_async_request *base, int err)
  499. {
  500. struct sk_buff *skb = base->data;
  501. struct net_device *dev = skb->dev;
  502. struct macsec_dev *macsec = macsec_priv(dev);
  503. struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
  504. int len, ret;
  505. aead_request_free(macsec_skb_cb(skb)->req);
  506. rcu_read_lock_bh();
  507. macsec_encrypt_finish(skb, dev);
  508. macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
  509. len = skb->len;
  510. ret = dev_queue_xmit(skb);
  511. count_tx(dev, ret, len);
  512. rcu_read_unlock_bh();
  513. macsec_txsa_put(sa);
  514. dev_put(dev);
  515. }
  516. static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
  517. unsigned char **iv,
  518. struct scatterlist **sg,
  519. int num_frags)
  520. {
  521. size_t size, iv_offset, sg_offset;
  522. struct aead_request *req;
  523. void *tmp;
  524. size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
  525. iv_offset = size;
  526. size += GCM_AES_IV_LEN;
  527. size = ALIGN(size, __alignof__(struct scatterlist));
  528. sg_offset = size;
  529. size += sizeof(struct scatterlist) * num_frags;
  530. tmp = kmalloc(size, GFP_ATOMIC);
  531. if (!tmp)
  532. return NULL;
  533. *iv = (unsigned char *)(tmp + iv_offset);
  534. *sg = (struct scatterlist *)(tmp + sg_offset);
  535. req = tmp;
  536. aead_request_set_tfm(req, tfm);
  537. return req;
  538. }
  539. static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
  540. struct net_device *dev)
  541. {
  542. int ret;
  543. struct scatterlist *sg;
  544. struct sk_buff *trailer;
  545. unsigned char *iv;
  546. struct ethhdr *eth;
  547. struct macsec_eth_header *hh;
  548. size_t unprotected_len;
  549. struct aead_request *req;
  550. struct macsec_secy *secy;
  551. struct macsec_tx_sc *tx_sc;
  552. struct macsec_tx_sa *tx_sa;
  553. struct macsec_dev *macsec = macsec_priv(dev);
  554. bool sci_present;
  555. u32 pn;
  556. secy = &macsec->secy;
  557. tx_sc = &secy->tx_sc;
  558. /* 10.5.1 TX SA assignment */
  559. tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
  560. if (!tx_sa) {
  561. secy->operational = false;
  562. kfree_skb(skb);
  563. return ERR_PTR(-EINVAL);
  564. }
  565. if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
  566. skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
  567. struct sk_buff *nskb = skb_copy_expand(skb,
  568. MACSEC_NEEDED_HEADROOM,
  569. MACSEC_NEEDED_TAILROOM,
  570. GFP_ATOMIC);
  571. if (likely(nskb)) {
  572. consume_skb(skb);
  573. skb = nskb;
  574. } else {
  575. macsec_txsa_put(tx_sa);
  576. kfree_skb(skb);
  577. return ERR_PTR(-ENOMEM);
  578. }
  579. } else {
  580. skb = skb_unshare(skb, GFP_ATOMIC);
  581. if (!skb) {
  582. macsec_txsa_put(tx_sa);
  583. return ERR_PTR(-ENOMEM);
  584. }
  585. }
  586. unprotected_len = skb->len;
  587. eth = eth_hdr(skb);
  588. sci_present = send_sci(secy);
  589. hh = skb_push(skb, macsec_extra_len(sci_present));
  590. memmove(hh, eth, 2 * ETH_ALEN);
  591. pn = tx_sa_update_pn(tx_sa, secy);
  592. if (pn == 0) {
  593. macsec_txsa_put(tx_sa);
  594. kfree_skb(skb);
  595. return ERR_PTR(-ENOLINK);
  596. }
  597. macsec_fill_sectag(hh, secy, pn, sci_present);
  598. macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
  599. skb_put(skb, secy->icv_len);
  600. if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
  601. struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
  602. u64_stats_update_begin(&secy_stats->syncp);
  603. secy_stats->stats.OutPktsTooLong++;
  604. u64_stats_update_end(&secy_stats->syncp);
  605. macsec_txsa_put(tx_sa);
  606. kfree_skb(skb);
  607. return ERR_PTR(-EINVAL);
  608. }
  609. ret = skb_cow_data(skb, 0, &trailer);
  610. if (unlikely(ret < 0)) {
  611. macsec_txsa_put(tx_sa);
  612. kfree_skb(skb);
  613. return ERR_PTR(ret);
  614. }
  615. req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
  616. if (!req) {
  617. macsec_txsa_put(tx_sa);
  618. kfree_skb(skb);
  619. return ERR_PTR(-ENOMEM);
  620. }
  621. macsec_fill_iv(iv, secy->sci, pn);
  622. sg_init_table(sg, ret);
  623. ret = skb_to_sgvec(skb, sg, 0, skb->len);
  624. if (unlikely(ret < 0)) {
  625. aead_request_free(req);
  626. macsec_txsa_put(tx_sa);
  627. kfree_skb(skb);
  628. return ERR_PTR(ret);
  629. }
  630. if (tx_sc->encrypt) {
  631. int len = skb->len - macsec_hdr_len(sci_present) -
  632. secy->icv_len;
  633. aead_request_set_crypt(req, sg, sg, len, iv);
  634. aead_request_set_ad(req, macsec_hdr_len(sci_present));
  635. } else {
  636. aead_request_set_crypt(req, sg, sg, 0, iv);
  637. aead_request_set_ad(req, skb->len - secy->icv_len);
  638. }
  639. macsec_skb_cb(skb)->req = req;
  640. macsec_skb_cb(skb)->tx_sa = tx_sa;
  641. aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
  642. dev_hold(skb->dev);
  643. ret = crypto_aead_encrypt(req);
  644. if (ret == -EINPROGRESS) {
  645. return ERR_PTR(ret);
  646. } else if (ret != 0) {
  647. dev_put(skb->dev);
  648. kfree_skb(skb);
  649. aead_request_free(req);
  650. macsec_txsa_put(tx_sa);
  651. return ERR_PTR(-EINVAL);
  652. }
  653. dev_put(skb->dev);
  654. aead_request_free(req);
  655. macsec_txsa_put(tx_sa);
  656. return skb;
  657. }
  658. static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
  659. {
  660. struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
  661. struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
  662. struct macsec_eth_header *hdr = macsec_ethhdr(skb);
  663. u32 lowest_pn = 0;
  664. spin_lock(&rx_sa->lock);
  665. if (rx_sa->next_pn >= secy->replay_window)
  666. lowest_pn = rx_sa->next_pn - secy->replay_window;
  667. /* Now perform replay protection check again
  668. * (see IEEE 802.1AE-2006 figure 10-5)
  669. */
  670. if (secy->replay_protect && pn < lowest_pn) {
  671. spin_unlock(&rx_sa->lock);
  672. u64_stats_update_begin(&rxsc_stats->syncp);
  673. rxsc_stats->stats.InPktsLate++;
  674. u64_stats_update_end(&rxsc_stats->syncp);
  675. return false;
  676. }
  677. if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
  678. u64_stats_update_begin(&rxsc_stats->syncp);
  679. if (hdr->tci_an & MACSEC_TCI_E)
  680. rxsc_stats->stats.InOctetsDecrypted += skb->len;
  681. else
  682. rxsc_stats->stats.InOctetsValidated += skb->len;
  683. u64_stats_update_end(&rxsc_stats->syncp);
  684. }
  685. if (!macsec_skb_cb(skb)->valid) {
  686. spin_unlock(&rx_sa->lock);
  687. /* 10.6.5 */
  688. if (hdr->tci_an & MACSEC_TCI_C ||
  689. secy->validate_frames == MACSEC_VALIDATE_STRICT) {
  690. u64_stats_update_begin(&rxsc_stats->syncp);
  691. rxsc_stats->stats.InPktsNotValid++;
  692. u64_stats_update_end(&rxsc_stats->syncp);
  693. return false;
  694. }
  695. u64_stats_update_begin(&rxsc_stats->syncp);
  696. if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
  697. rxsc_stats->stats.InPktsInvalid++;
  698. this_cpu_inc(rx_sa->stats->InPktsInvalid);
  699. } else if (pn < lowest_pn) {
  700. rxsc_stats->stats.InPktsDelayed++;
  701. } else {
  702. rxsc_stats->stats.InPktsUnchecked++;
  703. }
  704. u64_stats_update_end(&rxsc_stats->syncp);
  705. } else {
  706. u64_stats_update_begin(&rxsc_stats->syncp);
  707. if (pn < lowest_pn) {
  708. rxsc_stats->stats.InPktsDelayed++;
  709. } else {
  710. rxsc_stats->stats.InPktsOK++;
  711. this_cpu_inc(rx_sa->stats->InPktsOK);
  712. }
  713. u64_stats_update_end(&rxsc_stats->syncp);
  714. if (pn >= rx_sa->next_pn)
  715. rx_sa->next_pn = pn + 1;
  716. spin_unlock(&rx_sa->lock);
  717. }
  718. return true;
  719. }
  720. static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
  721. {
  722. skb->pkt_type = PACKET_HOST;
  723. skb->protocol = eth_type_trans(skb, dev);
  724. skb_reset_network_header(skb);
  725. if (!skb_transport_header_was_set(skb))
  726. skb_reset_transport_header(skb);
  727. skb_reset_mac_len(skb);
  728. }
  729. static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
  730. {
  731. skb->ip_summed = CHECKSUM_NONE;
  732. memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
  733. skb_pull(skb, hdr_len);
  734. pskb_trim_unique(skb, skb->len - icv_len);
  735. }
  736. static void count_rx(struct net_device *dev, int len)
  737. {
  738. struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
  739. u64_stats_update_begin(&stats->syncp);
  740. stats->rx_packets++;
  741. stats->rx_bytes += len;
  742. u64_stats_update_end(&stats->syncp);
  743. }
  744. static void macsec_decrypt_done(struct crypto_async_request *base, int err)
  745. {
  746. struct sk_buff *skb = base->data;
  747. struct net_device *dev = skb->dev;
  748. struct macsec_dev *macsec = macsec_priv(dev);
  749. struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
  750. struct macsec_rx_sc *rx_sc = rx_sa->sc;
  751. int len;
  752. u32 pn;
  753. aead_request_free(macsec_skb_cb(skb)->req);
  754. if (!err)
  755. macsec_skb_cb(skb)->valid = true;
  756. rcu_read_lock_bh();
  757. pn = ntohl(macsec_ethhdr(skb)->packet_number);
  758. if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
  759. rcu_read_unlock_bh();
  760. kfree_skb(skb);
  761. goto out;
  762. }
  763. macsec_finalize_skb(skb, macsec->secy.icv_len,
  764. macsec_extra_len(macsec_skb_cb(skb)->has_sci));
  765. macsec_reset_skb(skb, macsec->secy.netdev);
  766. len = skb->len;
  767. if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
  768. count_rx(dev, len);
  769. rcu_read_unlock_bh();
  770. out:
  771. macsec_rxsa_put(rx_sa);
  772. macsec_rxsc_put(rx_sc);
  773. dev_put(dev);
  774. }
  775. static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
  776. struct net_device *dev,
  777. struct macsec_rx_sa *rx_sa,
  778. sci_t sci,
  779. struct macsec_secy *secy)
  780. {
  781. int ret;
  782. struct scatterlist *sg;
  783. struct sk_buff *trailer;
  784. unsigned char *iv;
  785. struct aead_request *req;
  786. struct macsec_eth_header *hdr;
  787. u16 icv_len = secy->icv_len;
  788. macsec_skb_cb(skb)->valid = false;
  789. skb = skb_share_check(skb, GFP_ATOMIC);
  790. if (!skb)
  791. return ERR_PTR(-ENOMEM);
  792. ret = skb_cow_data(skb, 0, &trailer);
  793. if (unlikely(ret < 0)) {
  794. kfree_skb(skb);
  795. return ERR_PTR(ret);
  796. }
  797. req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
  798. if (!req) {
  799. kfree_skb(skb);
  800. return ERR_PTR(-ENOMEM);
  801. }
  802. hdr = (struct macsec_eth_header *)skb->data;
  803. macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
  804. sg_init_table(sg, ret);
  805. ret = skb_to_sgvec(skb, sg, 0, skb->len);
  806. if (unlikely(ret < 0)) {
  807. aead_request_free(req);
  808. kfree_skb(skb);
  809. return ERR_PTR(ret);
  810. }
  811. if (hdr->tci_an & MACSEC_TCI_E) {
  812. /* confidentiality: ethernet + macsec header
  813. * authenticated, encrypted payload
  814. */
  815. int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
  816. aead_request_set_crypt(req, sg, sg, len, iv);
  817. aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
  818. skb = skb_unshare(skb, GFP_ATOMIC);
  819. if (!skb) {
  820. aead_request_free(req);
  821. return ERR_PTR(-ENOMEM);
  822. }
  823. } else {
  824. /* integrity only: all headers + data authenticated */
  825. aead_request_set_crypt(req, sg, sg, icv_len, iv);
  826. aead_request_set_ad(req, skb->len - icv_len);
  827. }
  828. macsec_skb_cb(skb)->req = req;
  829. skb->dev = dev;
  830. aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
  831. dev_hold(dev);
  832. ret = crypto_aead_decrypt(req);
  833. if (ret == -EINPROGRESS) {
  834. return ERR_PTR(ret);
  835. } else if (ret != 0) {
  836. /* decryption/authentication failed
  837. * 10.6 if validateFrames is disabled, deliver anyway
  838. */
  839. if (ret != -EBADMSG) {
  840. kfree_skb(skb);
  841. skb = ERR_PTR(ret);
  842. }
  843. } else {
  844. macsec_skb_cb(skb)->valid = true;
  845. }
  846. dev_put(dev);
  847. aead_request_free(req);
  848. return skb;
  849. }
  850. static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
  851. {
  852. struct macsec_rx_sc *rx_sc;
  853. for_each_rxsc(secy, rx_sc) {
  854. if (rx_sc->sci == sci)
  855. return rx_sc;
  856. }
  857. return NULL;
  858. }
  859. static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
  860. {
  861. struct macsec_rx_sc *rx_sc;
  862. for_each_rxsc_rtnl(secy, rx_sc) {
  863. if (rx_sc->sci == sci)
  864. return rx_sc;
  865. }
  866. return NULL;
  867. }
  868. static void handle_not_macsec(struct sk_buff *skb)
  869. {
  870. struct macsec_rxh_data *rxd;
  871. struct macsec_dev *macsec;
  872. rcu_read_lock();
  873. rxd = macsec_data_rcu(skb->dev);
  874. /* 10.6 If the management control validateFrames is not
  875. * Strict, frames without a SecTAG are received, counted, and
  876. * delivered to the Controlled Port
  877. */
  878. list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
  879. struct sk_buff *nskb;
  880. struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
  881. if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
  882. u64_stats_update_begin(&secy_stats->syncp);
  883. secy_stats->stats.InPktsNoTag++;
  884. u64_stats_update_end(&secy_stats->syncp);
  885. continue;
  886. }
  887. /* deliver on this port */
  888. nskb = skb_clone(skb, GFP_ATOMIC);
  889. if (!nskb)
  890. break;
  891. nskb->dev = macsec->secy.netdev;
  892. if (netif_rx(nskb) == NET_RX_SUCCESS) {
  893. u64_stats_update_begin(&secy_stats->syncp);
  894. secy_stats->stats.InPktsUntagged++;
  895. u64_stats_update_end(&secy_stats->syncp);
  896. }
  897. }
  898. rcu_read_unlock();
  899. }
  900. static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
  901. {
  902. struct sk_buff *skb = *pskb;
  903. struct net_device *dev = skb->dev;
  904. struct macsec_eth_header *hdr;
  905. struct macsec_secy *secy = NULL;
  906. struct macsec_rx_sc *rx_sc;
  907. struct macsec_rx_sa *rx_sa;
  908. struct macsec_rxh_data *rxd;
  909. struct macsec_dev *macsec;
  910. unsigned int len;
  911. sci_t sci;
  912. u32 pn;
  913. bool cbit;
  914. struct pcpu_rx_sc_stats *rxsc_stats;
  915. struct pcpu_secy_stats *secy_stats;
  916. bool pulled_sci;
  917. int ret;
  918. if (skb_headroom(skb) < ETH_HLEN)
  919. goto drop_direct;
  920. hdr = macsec_ethhdr(skb);
  921. if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
  922. handle_not_macsec(skb);
  923. /* and deliver to the uncontrolled port */
  924. return RX_HANDLER_PASS;
  925. }
  926. skb = skb_unshare(skb, GFP_ATOMIC);
  927. *pskb = skb;
  928. if (!skb)
  929. return RX_HANDLER_CONSUMED;
  930. pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
  931. if (!pulled_sci) {
  932. if (!pskb_may_pull(skb, macsec_extra_len(false)))
  933. goto drop_direct;
  934. }
  935. hdr = macsec_ethhdr(skb);
  936. /* Frames with a SecTAG that has the TCI E bit set but the C
  937. * bit clear are discarded, as this reserved encoding is used
  938. * to identify frames with a SecTAG that are not to be
  939. * delivered to the Controlled Port.
  940. */
  941. if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
  942. return RX_HANDLER_PASS;
  943. /* now, pull the extra length */
  944. if (hdr->tci_an & MACSEC_TCI_SC) {
  945. if (!pulled_sci)
  946. goto drop_direct;
  947. }
  948. /* ethernet header is part of crypto processing */
  949. skb_push(skb, ETH_HLEN);
  950. macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
  951. macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
  952. sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
  953. rcu_read_lock();
  954. rxd = macsec_data_rcu(skb->dev);
  955. list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
  956. struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
  957. sc = sc ? macsec_rxsc_get(sc) : NULL;
  958. if (sc) {
  959. secy = &macsec->secy;
  960. rx_sc = sc;
  961. break;
  962. }
  963. }
  964. if (!secy)
  965. goto nosci;
  966. dev = secy->netdev;
  967. macsec = macsec_priv(dev);
  968. secy_stats = this_cpu_ptr(macsec->stats);
  969. rxsc_stats = this_cpu_ptr(rx_sc->stats);
  970. if (!macsec_validate_skb(skb, secy->icv_len)) {
  971. u64_stats_update_begin(&secy_stats->syncp);
  972. secy_stats->stats.InPktsBadTag++;
  973. u64_stats_update_end(&secy_stats->syncp);
  974. goto drop_nosa;
  975. }
  976. rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
  977. if (!rx_sa) {
  978. /* 10.6.1 if the SA is not in use */
  979. /* If validateFrames is Strict or the C bit in the
  980. * SecTAG is set, discard
  981. */
  982. if (hdr->tci_an & MACSEC_TCI_C ||
  983. secy->validate_frames == MACSEC_VALIDATE_STRICT) {
  984. u64_stats_update_begin(&rxsc_stats->syncp);
  985. rxsc_stats->stats.InPktsNotUsingSA++;
  986. u64_stats_update_end(&rxsc_stats->syncp);
  987. goto drop_nosa;
  988. }
  989. /* not Strict, the frame (with the SecTAG and ICV
  990. * removed) is delivered to the Controlled Port.
  991. */
  992. u64_stats_update_begin(&rxsc_stats->syncp);
  993. rxsc_stats->stats.InPktsUnusedSA++;
  994. u64_stats_update_end(&rxsc_stats->syncp);
  995. goto deliver;
  996. }
  997. /* First, PN check to avoid decrypting obviously wrong packets */
  998. pn = ntohl(hdr->packet_number);
  999. if (secy->replay_protect) {
  1000. bool late;
  1001. spin_lock(&rx_sa->lock);
  1002. late = rx_sa->next_pn >= secy->replay_window &&
  1003. pn < (rx_sa->next_pn - secy->replay_window);
  1004. spin_unlock(&rx_sa->lock);
  1005. if (late) {
  1006. u64_stats_update_begin(&rxsc_stats->syncp);
  1007. rxsc_stats->stats.InPktsLate++;
  1008. u64_stats_update_end(&rxsc_stats->syncp);
  1009. goto drop;
  1010. }
  1011. }
  1012. macsec_skb_cb(skb)->rx_sa = rx_sa;
  1013. /* Disabled && !changed text => skip validation */
  1014. if (hdr->tci_an & MACSEC_TCI_C ||
  1015. secy->validate_frames != MACSEC_VALIDATE_DISABLED)
  1016. skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
  1017. if (IS_ERR(skb)) {
  1018. /* the decrypt callback needs the reference */
  1019. if (PTR_ERR(skb) != -EINPROGRESS) {
  1020. macsec_rxsa_put(rx_sa);
  1021. macsec_rxsc_put(rx_sc);
  1022. }
  1023. rcu_read_unlock();
  1024. *pskb = NULL;
  1025. return RX_HANDLER_CONSUMED;
  1026. }
  1027. if (!macsec_post_decrypt(skb, secy, pn))
  1028. goto drop;
  1029. deliver:
  1030. macsec_finalize_skb(skb, secy->icv_len,
  1031. macsec_extra_len(macsec_skb_cb(skb)->has_sci));
  1032. macsec_reset_skb(skb, secy->netdev);
  1033. if (rx_sa)
  1034. macsec_rxsa_put(rx_sa);
  1035. macsec_rxsc_put(rx_sc);
  1036. skb_orphan(skb);
  1037. len = skb->len;
  1038. ret = gro_cells_receive(&macsec->gro_cells, skb);
  1039. if (ret == NET_RX_SUCCESS)
  1040. count_rx(dev, len);
  1041. else
  1042. macsec->secy.netdev->stats.rx_dropped++;
  1043. rcu_read_unlock();
  1044. *pskb = NULL;
  1045. return RX_HANDLER_CONSUMED;
  1046. drop:
  1047. macsec_rxsa_put(rx_sa);
  1048. drop_nosa:
  1049. macsec_rxsc_put(rx_sc);
  1050. rcu_read_unlock();
  1051. drop_direct:
  1052. kfree_skb(skb);
  1053. *pskb = NULL;
  1054. return RX_HANDLER_CONSUMED;
  1055. nosci:
  1056. /* 10.6.1 if the SC is not found */
  1057. cbit = !!(hdr->tci_an & MACSEC_TCI_C);
  1058. if (!cbit)
  1059. macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
  1060. macsec_extra_len(macsec_skb_cb(skb)->has_sci));
  1061. list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
  1062. struct sk_buff *nskb;
  1063. secy_stats = this_cpu_ptr(macsec->stats);
  1064. /* If validateFrames is Strict or the C bit in the
  1065. * SecTAG is set, discard
  1066. */
  1067. if (cbit ||
  1068. macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
  1069. u64_stats_update_begin(&secy_stats->syncp);
  1070. secy_stats->stats.InPktsNoSCI++;
  1071. u64_stats_update_end(&secy_stats->syncp);
  1072. continue;
  1073. }
  1074. /* not strict, the frame (with the SecTAG and ICV
  1075. * removed) is delivered to the Controlled Port.
  1076. */
  1077. nskb = skb_clone(skb, GFP_ATOMIC);
  1078. if (!nskb)
  1079. break;
  1080. macsec_reset_skb(nskb, macsec->secy.netdev);
  1081. ret = netif_rx(nskb);
  1082. if (ret == NET_RX_SUCCESS) {
  1083. u64_stats_update_begin(&secy_stats->syncp);
  1084. secy_stats->stats.InPktsUnknownSCI++;
  1085. u64_stats_update_end(&secy_stats->syncp);
  1086. } else {
  1087. macsec->secy.netdev->stats.rx_dropped++;
  1088. }
  1089. }
  1090. rcu_read_unlock();
  1091. *pskb = skb;
  1092. return RX_HANDLER_PASS;
  1093. }
  1094. static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
  1095. {
  1096. struct crypto_aead *tfm;
  1097. int ret;
  1098. /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
  1099. tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
  1100. if (IS_ERR(tfm))
  1101. return tfm;
  1102. ret = crypto_aead_setkey(tfm, key, key_len);
  1103. if (ret < 0)
  1104. goto fail;
  1105. ret = crypto_aead_setauthsize(tfm, icv_len);
  1106. if (ret < 0)
  1107. goto fail;
  1108. return tfm;
  1109. fail:
  1110. crypto_free_aead(tfm);
  1111. return ERR_PTR(ret);
  1112. }
  1113. static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
  1114. int icv_len)
  1115. {
  1116. rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
  1117. if (!rx_sa->stats)
  1118. return -ENOMEM;
  1119. rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
  1120. if (IS_ERR(rx_sa->key.tfm)) {
  1121. free_percpu(rx_sa->stats);
  1122. return PTR_ERR(rx_sa->key.tfm);
  1123. }
  1124. rx_sa->active = false;
  1125. rx_sa->next_pn = 1;
  1126. refcount_set(&rx_sa->refcnt, 1);
  1127. spin_lock_init(&rx_sa->lock);
  1128. return 0;
  1129. }
  1130. static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
  1131. {
  1132. rx_sa->active = false;
  1133. macsec_rxsa_put(rx_sa);
  1134. }
  1135. static void free_rx_sc(struct macsec_rx_sc *rx_sc)
  1136. {
  1137. int i;
  1138. for (i = 0; i < MACSEC_NUM_AN; i++) {
  1139. struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
  1140. RCU_INIT_POINTER(rx_sc->sa[i], NULL);
  1141. if (sa)
  1142. clear_rx_sa(sa);
  1143. }
  1144. macsec_rxsc_put(rx_sc);
  1145. }
  1146. static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
  1147. {
  1148. struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
  1149. for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
  1150. rx_sc;
  1151. rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
  1152. if (rx_sc->sci == sci) {
  1153. if (rx_sc->active)
  1154. secy->n_rx_sc--;
  1155. rcu_assign_pointer(*rx_scp, rx_sc->next);
  1156. return rx_sc;
  1157. }
  1158. }
  1159. return NULL;
  1160. }
  1161. static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
  1162. {
  1163. struct macsec_rx_sc *rx_sc;
  1164. struct macsec_dev *macsec;
  1165. struct net_device *real_dev = macsec_priv(dev)->real_dev;
  1166. struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
  1167. struct macsec_secy *secy;
  1168. list_for_each_entry(macsec, &rxd->secys, secys) {
  1169. if (find_rx_sc_rtnl(&macsec->secy, sci))
  1170. return ERR_PTR(-EEXIST);
  1171. }
  1172. rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
  1173. if (!rx_sc)
  1174. return ERR_PTR(-ENOMEM);
  1175. rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
  1176. if (!rx_sc->stats) {
  1177. kfree(rx_sc);
  1178. return ERR_PTR(-ENOMEM);
  1179. }
  1180. rx_sc->sci = sci;
  1181. rx_sc->active = true;
  1182. refcount_set(&rx_sc->refcnt, 1);
  1183. secy = &macsec_priv(dev)->secy;
  1184. rcu_assign_pointer(rx_sc->next, secy->rx_sc);
  1185. rcu_assign_pointer(secy->rx_sc, rx_sc);
  1186. if (rx_sc->active)
  1187. secy->n_rx_sc++;
  1188. return rx_sc;
  1189. }
  1190. static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
  1191. int icv_len)
  1192. {
  1193. tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
  1194. if (!tx_sa->stats)
  1195. return -ENOMEM;
  1196. tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
  1197. if (IS_ERR(tx_sa->key.tfm)) {
  1198. free_percpu(tx_sa->stats);
  1199. return PTR_ERR(tx_sa->key.tfm);
  1200. }
  1201. tx_sa->active = false;
  1202. refcount_set(&tx_sa->refcnt, 1);
  1203. spin_lock_init(&tx_sa->lock);
  1204. return 0;
  1205. }
  1206. static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
  1207. {
  1208. tx_sa->active = false;
  1209. macsec_txsa_put(tx_sa);
  1210. }
  1211. static struct genl_family macsec_fam;
  1212. static struct net_device *get_dev_from_nl(struct net *net,
  1213. struct nlattr **attrs)
  1214. {
  1215. int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
  1216. struct net_device *dev;
  1217. dev = __dev_get_by_index(net, ifindex);
  1218. if (!dev)
  1219. return ERR_PTR(-ENODEV);
  1220. if (!netif_is_macsec(dev))
  1221. return ERR_PTR(-ENODEV);
  1222. return dev;
  1223. }
  1224. static sci_t nla_get_sci(const struct nlattr *nla)
  1225. {
  1226. return (__force sci_t)nla_get_u64(nla);
  1227. }
  1228. static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
  1229. int padattr)
  1230. {
  1231. return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
  1232. }
  1233. static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
  1234. struct nlattr **attrs,
  1235. struct nlattr **tb_sa,
  1236. struct net_device **devp,
  1237. struct macsec_secy **secyp,
  1238. struct macsec_tx_sc **scp,
  1239. u8 *assoc_num)
  1240. {
  1241. struct net_device *dev;
  1242. struct macsec_secy *secy;
  1243. struct macsec_tx_sc *tx_sc;
  1244. struct macsec_tx_sa *tx_sa;
  1245. if (!tb_sa[MACSEC_SA_ATTR_AN])
  1246. return ERR_PTR(-EINVAL);
  1247. *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
  1248. dev = get_dev_from_nl(net, attrs);
  1249. if (IS_ERR(dev))
  1250. return ERR_CAST(dev);
  1251. if (*assoc_num >= MACSEC_NUM_AN)
  1252. return ERR_PTR(-EINVAL);
  1253. secy = &macsec_priv(dev)->secy;
  1254. tx_sc = &secy->tx_sc;
  1255. tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
  1256. if (!tx_sa)
  1257. return ERR_PTR(-ENODEV);
  1258. *devp = dev;
  1259. *scp = tx_sc;
  1260. *secyp = secy;
  1261. return tx_sa;
  1262. }
  1263. static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
  1264. struct nlattr **attrs,
  1265. struct nlattr **tb_rxsc,
  1266. struct net_device **devp,
  1267. struct macsec_secy **secyp)
  1268. {
  1269. struct net_device *dev;
  1270. struct macsec_secy *secy;
  1271. struct macsec_rx_sc *rx_sc;
  1272. sci_t sci;
  1273. dev = get_dev_from_nl(net, attrs);
  1274. if (IS_ERR(dev))
  1275. return ERR_CAST(dev);
  1276. secy = &macsec_priv(dev)->secy;
  1277. if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
  1278. return ERR_PTR(-EINVAL);
  1279. sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
  1280. rx_sc = find_rx_sc_rtnl(secy, sci);
  1281. if (!rx_sc)
  1282. return ERR_PTR(-ENODEV);
  1283. *secyp = secy;
  1284. *devp = dev;
  1285. return rx_sc;
  1286. }
  1287. static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
  1288. struct nlattr **attrs,
  1289. struct nlattr **tb_rxsc,
  1290. struct nlattr **tb_sa,
  1291. struct net_device **devp,
  1292. struct macsec_secy **secyp,
  1293. struct macsec_rx_sc **scp,
  1294. u8 *assoc_num)
  1295. {
  1296. struct macsec_rx_sc *rx_sc;
  1297. struct macsec_rx_sa *rx_sa;
  1298. if (!tb_sa[MACSEC_SA_ATTR_AN])
  1299. return ERR_PTR(-EINVAL);
  1300. *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
  1301. if (*assoc_num >= MACSEC_NUM_AN)
  1302. return ERR_PTR(-EINVAL);
  1303. rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
  1304. if (IS_ERR(rx_sc))
  1305. return ERR_CAST(rx_sc);
  1306. rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
  1307. if (!rx_sa)
  1308. return ERR_PTR(-ENODEV);
  1309. *scp = rx_sc;
  1310. return rx_sa;
  1311. }
  1312. static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
  1313. [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
  1314. [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
  1315. [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
  1316. };
  1317. static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
  1318. [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
  1319. [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
  1320. };
  1321. static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
  1322. [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
  1323. [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
  1324. [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
  1325. [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
  1326. .len = MACSEC_KEYID_LEN, },
  1327. [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
  1328. .len = MACSEC_MAX_KEY_LEN, },
  1329. };
  1330. static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
  1331. {
  1332. if (!attrs[MACSEC_ATTR_SA_CONFIG])
  1333. return -EINVAL;
  1334. if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
  1335. return -EINVAL;
  1336. return 0;
  1337. }
  1338. static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
  1339. {
  1340. if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
  1341. return -EINVAL;
  1342. if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
  1343. return -EINVAL;
  1344. return 0;
  1345. }
  1346. static bool validate_add_rxsa(struct nlattr **attrs)
  1347. {
  1348. if (!attrs[MACSEC_SA_ATTR_AN] ||
  1349. !attrs[MACSEC_SA_ATTR_KEY] ||
  1350. !attrs[MACSEC_SA_ATTR_KEYID])
  1351. return false;
  1352. if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
  1353. return false;
  1354. if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
  1355. return false;
  1356. if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
  1357. if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
  1358. return false;
  1359. }
  1360. if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
  1361. return false;
  1362. return true;
  1363. }
  1364. static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
  1365. {
  1366. struct net_device *dev;
  1367. struct nlattr **attrs = info->attrs;
  1368. struct macsec_secy *secy;
  1369. struct macsec_rx_sc *rx_sc;
  1370. struct macsec_rx_sa *rx_sa;
  1371. unsigned char assoc_num;
  1372. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1373. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1374. int err;
  1375. if (!attrs[MACSEC_ATTR_IFINDEX])
  1376. return -EINVAL;
  1377. if (parse_sa_config(attrs, tb_sa))
  1378. return -EINVAL;
  1379. if (parse_rxsc_config(attrs, tb_rxsc))
  1380. return -EINVAL;
  1381. if (!validate_add_rxsa(tb_sa))
  1382. return -EINVAL;
  1383. rtnl_lock();
  1384. rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
  1385. if (IS_ERR(rx_sc)) {
  1386. rtnl_unlock();
  1387. return PTR_ERR(rx_sc);
  1388. }
  1389. assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
  1390. if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
  1391. pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
  1392. nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
  1393. rtnl_unlock();
  1394. return -EINVAL;
  1395. }
  1396. rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
  1397. if (rx_sa) {
  1398. rtnl_unlock();
  1399. return -EBUSY;
  1400. }
  1401. rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
  1402. if (!rx_sa) {
  1403. rtnl_unlock();
  1404. return -ENOMEM;
  1405. }
  1406. err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
  1407. secy->key_len, secy->icv_len);
  1408. if (err < 0) {
  1409. kfree(rx_sa);
  1410. rtnl_unlock();
  1411. return err;
  1412. }
  1413. if (tb_sa[MACSEC_SA_ATTR_PN]) {
  1414. spin_lock_bh(&rx_sa->lock);
  1415. rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
  1416. spin_unlock_bh(&rx_sa->lock);
  1417. }
  1418. if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
  1419. rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
  1420. nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
  1421. rx_sa->sc = rx_sc;
  1422. rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
  1423. rtnl_unlock();
  1424. return 0;
  1425. }
  1426. static bool validate_add_rxsc(struct nlattr **attrs)
  1427. {
  1428. if (!attrs[MACSEC_RXSC_ATTR_SCI])
  1429. return false;
  1430. if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
  1431. if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
  1432. return false;
  1433. }
  1434. return true;
  1435. }
  1436. static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
  1437. {
  1438. struct net_device *dev;
  1439. sci_t sci = MACSEC_UNDEF_SCI;
  1440. struct nlattr **attrs = info->attrs;
  1441. struct macsec_rx_sc *rx_sc;
  1442. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1443. if (!attrs[MACSEC_ATTR_IFINDEX])
  1444. return -EINVAL;
  1445. if (parse_rxsc_config(attrs, tb_rxsc))
  1446. return -EINVAL;
  1447. if (!validate_add_rxsc(tb_rxsc))
  1448. return -EINVAL;
  1449. rtnl_lock();
  1450. dev = get_dev_from_nl(genl_info_net(info), attrs);
  1451. if (IS_ERR(dev)) {
  1452. rtnl_unlock();
  1453. return PTR_ERR(dev);
  1454. }
  1455. sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
  1456. rx_sc = create_rx_sc(dev, sci);
  1457. if (IS_ERR(rx_sc)) {
  1458. rtnl_unlock();
  1459. return PTR_ERR(rx_sc);
  1460. }
  1461. if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
  1462. rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
  1463. rtnl_unlock();
  1464. return 0;
  1465. }
  1466. static bool validate_add_txsa(struct nlattr **attrs)
  1467. {
  1468. if (!attrs[MACSEC_SA_ATTR_AN] ||
  1469. !attrs[MACSEC_SA_ATTR_PN] ||
  1470. !attrs[MACSEC_SA_ATTR_KEY] ||
  1471. !attrs[MACSEC_SA_ATTR_KEYID])
  1472. return false;
  1473. if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
  1474. return false;
  1475. if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
  1476. return false;
  1477. if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
  1478. if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
  1479. return false;
  1480. }
  1481. if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
  1482. return false;
  1483. return true;
  1484. }
  1485. static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
  1486. {
  1487. struct net_device *dev;
  1488. struct nlattr **attrs = info->attrs;
  1489. struct macsec_secy *secy;
  1490. struct macsec_tx_sc *tx_sc;
  1491. struct macsec_tx_sa *tx_sa;
  1492. unsigned char assoc_num;
  1493. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1494. int err;
  1495. if (!attrs[MACSEC_ATTR_IFINDEX])
  1496. return -EINVAL;
  1497. if (parse_sa_config(attrs, tb_sa))
  1498. return -EINVAL;
  1499. if (!validate_add_txsa(tb_sa))
  1500. return -EINVAL;
  1501. rtnl_lock();
  1502. dev = get_dev_from_nl(genl_info_net(info), attrs);
  1503. if (IS_ERR(dev)) {
  1504. rtnl_unlock();
  1505. return PTR_ERR(dev);
  1506. }
  1507. secy = &macsec_priv(dev)->secy;
  1508. tx_sc = &secy->tx_sc;
  1509. assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
  1510. if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
  1511. pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
  1512. nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
  1513. rtnl_unlock();
  1514. return -EINVAL;
  1515. }
  1516. tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
  1517. if (tx_sa) {
  1518. rtnl_unlock();
  1519. return -EBUSY;
  1520. }
  1521. tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
  1522. if (!tx_sa) {
  1523. rtnl_unlock();
  1524. return -ENOMEM;
  1525. }
  1526. err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
  1527. secy->key_len, secy->icv_len);
  1528. if (err < 0) {
  1529. kfree(tx_sa);
  1530. rtnl_unlock();
  1531. return err;
  1532. }
  1533. nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
  1534. spin_lock_bh(&tx_sa->lock);
  1535. tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
  1536. spin_unlock_bh(&tx_sa->lock);
  1537. if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
  1538. tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
  1539. if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
  1540. secy->operational = true;
  1541. rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
  1542. rtnl_unlock();
  1543. return 0;
  1544. }
  1545. static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
  1546. {
  1547. struct nlattr **attrs = info->attrs;
  1548. struct net_device *dev;
  1549. struct macsec_secy *secy;
  1550. struct macsec_rx_sc *rx_sc;
  1551. struct macsec_rx_sa *rx_sa;
  1552. u8 assoc_num;
  1553. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1554. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1555. if (!attrs[MACSEC_ATTR_IFINDEX])
  1556. return -EINVAL;
  1557. if (parse_sa_config(attrs, tb_sa))
  1558. return -EINVAL;
  1559. if (parse_rxsc_config(attrs, tb_rxsc))
  1560. return -EINVAL;
  1561. rtnl_lock();
  1562. rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
  1563. &dev, &secy, &rx_sc, &assoc_num);
  1564. if (IS_ERR(rx_sa)) {
  1565. rtnl_unlock();
  1566. return PTR_ERR(rx_sa);
  1567. }
  1568. if (rx_sa->active) {
  1569. rtnl_unlock();
  1570. return -EBUSY;
  1571. }
  1572. RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
  1573. clear_rx_sa(rx_sa);
  1574. rtnl_unlock();
  1575. return 0;
  1576. }
  1577. static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
  1578. {
  1579. struct nlattr **attrs = info->attrs;
  1580. struct net_device *dev;
  1581. struct macsec_secy *secy;
  1582. struct macsec_rx_sc *rx_sc;
  1583. sci_t sci;
  1584. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1585. if (!attrs[MACSEC_ATTR_IFINDEX])
  1586. return -EINVAL;
  1587. if (parse_rxsc_config(attrs, tb_rxsc))
  1588. return -EINVAL;
  1589. if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
  1590. return -EINVAL;
  1591. rtnl_lock();
  1592. dev = get_dev_from_nl(genl_info_net(info), info->attrs);
  1593. if (IS_ERR(dev)) {
  1594. rtnl_unlock();
  1595. return PTR_ERR(dev);
  1596. }
  1597. secy = &macsec_priv(dev)->secy;
  1598. sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
  1599. rx_sc = del_rx_sc(secy, sci);
  1600. if (!rx_sc) {
  1601. rtnl_unlock();
  1602. return -ENODEV;
  1603. }
  1604. free_rx_sc(rx_sc);
  1605. rtnl_unlock();
  1606. return 0;
  1607. }
  1608. static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
  1609. {
  1610. struct nlattr **attrs = info->attrs;
  1611. struct net_device *dev;
  1612. struct macsec_secy *secy;
  1613. struct macsec_tx_sc *tx_sc;
  1614. struct macsec_tx_sa *tx_sa;
  1615. u8 assoc_num;
  1616. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1617. if (!attrs[MACSEC_ATTR_IFINDEX])
  1618. return -EINVAL;
  1619. if (parse_sa_config(attrs, tb_sa))
  1620. return -EINVAL;
  1621. rtnl_lock();
  1622. tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
  1623. &dev, &secy, &tx_sc, &assoc_num);
  1624. if (IS_ERR(tx_sa)) {
  1625. rtnl_unlock();
  1626. return PTR_ERR(tx_sa);
  1627. }
  1628. if (tx_sa->active) {
  1629. rtnl_unlock();
  1630. return -EBUSY;
  1631. }
  1632. RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
  1633. clear_tx_sa(tx_sa);
  1634. rtnl_unlock();
  1635. return 0;
  1636. }
  1637. static bool validate_upd_sa(struct nlattr **attrs)
  1638. {
  1639. if (!attrs[MACSEC_SA_ATTR_AN] ||
  1640. attrs[MACSEC_SA_ATTR_KEY] ||
  1641. attrs[MACSEC_SA_ATTR_KEYID])
  1642. return false;
  1643. if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
  1644. return false;
  1645. if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
  1646. return false;
  1647. if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
  1648. if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
  1649. return false;
  1650. }
  1651. return true;
  1652. }
  1653. static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
  1654. {
  1655. struct nlattr **attrs = info->attrs;
  1656. struct net_device *dev;
  1657. struct macsec_secy *secy;
  1658. struct macsec_tx_sc *tx_sc;
  1659. struct macsec_tx_sa *tx_sa;
  1660. u8 assoc_num;
  1661. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1662. if (!attrs[MACSEC_ATTR_IFINDEX])
  1663. return -EINVAL;
  1664. if (parse_sa_config(attrs, tb_sa))
  1665. return -EINVAL;
  1666. if (!validate_upd_sa(tb_sa))
  1667. return -EINVAL;
  1668. rtnl_lock();
  1669. tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
  1670. &dev, &secy, &tx_sc, &assoc_num);
  1671. if (IS_ERR(tx_sa)) {
  1672. rtnl_unlock();
  1673. return PTR_ERR(tx_sa);
  1674. }
  1675. if (tb_sa[MACSEC_SA_ATTR_PN]) {
  1676. spin_lock_bh(&tx_sa->lock);
  1677. tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
  1678. spin_unlock_bh(&tx_sa->lock);
  1679. }
  1680. if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
  1681. tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
  1682. if (assoc_num == tx_sc->encoding_sa)
  1683. secy->operational = tx_sa->active;
  1684. rtnl_unlock();
  1685. return 0;
  1686. }
  1687. static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
  1688. {
  1689. struct nlattr **attrs = info->attrs;
  1690. struct net_device *dev;
  1691. struct macsec_secy *secy;
  1692. struct macsec_rx_sc *rx_sc;
  1693. struct macsec_rx_sa *rx_sa;
  1694. u8 assoc_num;
  1695. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1696. struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
  1697. if (!attrs[MACSEC_ATTR_IFINDEX])
  1698. return -EINVAL;
  1699. if (parse_rxsc_config(attrs, tb_rxsc))
  1700. return -EINVAL;
  1701. if (parse_sa_config(attrs, tb_sa))
  1702. return -EINVAL;
  1703. if (!validate_upd_sa(tb_sa))
  1704. return -EINVAL;
  1705. rtnl_lock();
  1706. rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
  1707. &dev, &secy, &rx_sc, &assoc_num);
  1708. if (IS_ERR(rx_sa)) {
  1709. rtnl_unlock();
  1710. return PTR_ERR(rx_sa);
  1711. }
  1712. if (tb_sa[MACSEC_SA_ATTR_PN]) {
  1713. spin_lock_bh(&rx_sa->lock);
  1714. rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
  1715. spin_unlock_bh(&rx_sa->lock);
  1716. }
  1717. if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
  1718. rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
  1719. rtnl_unlock();
  1720. return 0;
  1721. }
  1722. static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
  1723. {
  1724. struct nlattr **attrs = info->attrs;
  1725. struct net_device *dev;
  1726. struct macsec_secy *secy;
  1727. struct macsec_rx_sc *rx_sc;
  1728. struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
  1729. if (!attrs[MACSEC_ATTR_IFINDEX])
  1730. return -EINVAL;
  1731. if (parse_rxsc_config(attrs, tb_rxsc))
  1732. return -EINVAL;
  1733. if (!validate_add_rxsc(tb_rxsc))
  1734. return -EINVAL;
  1735. rtnl_lock();
  1736. rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
  1737. if (IS_ERR(rx_sc)) {
  1738. rtnl_unlock();
  1739. return PTR_ERR(rx_sc);
  1740. }
  1741. if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
  1742. bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
  1743. if (rx_sc->active != new)
  1744. secy->n_rx_sc += new ? 1 : -1;
  1745. rx_sc->active = new;
  1746. }
  1747. rtnl_unlock();
  1748. return 0;
  1749. }
  1750. static int copy_tx_sa_stats(struct sk_buff *skb,
  1751. struct macsec_tx_sa_stats __percpu *pstats)
  1752. {
  1753. struct macsec_tx_sa_stats sum = {0, };
  1754. int cpu;
  1755. for_each_possible_cpu(cpu) {
  1756. const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
  1757. sum.OutPktsProtected += stats->OutPktsProtected;
  1758. sum.OutPktsEncrypted += stats->OutPktsEncrypted;
  1759. }
  1760. if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
  1761. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
  1762. return -EMSGSIZE;
  1763. return 0;
  1764. }
  1765. static noinline_for_stack int
  1766. copy_rx_sa_stats(struct sk_buff *skb,
  1767. struct macsec_rx_sa_stats __percpu *pstats)
  1768. {
  1769. struct macsec_rx_sa_stats sum = {0, };
  1770. int cpu;
  1771. for_each_possible_cpu(cpu) {
  1772. const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
  1773. sum.InPktsOK += stats->InPktsOK;
  1774. sum.InPktsInvalid += stats->InPktsInvalid;
  1775. sum.InPktsNotValid += stats->InPktsNotValid;
  1776. sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
  1777. sum.InPktsUnusedSA += stats->InPktsUnusedSA;
  1778. }
  1779. if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
  1780. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
  1781. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
  1782. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
  1783. nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
  1784. return -EMSGSIZE;
  1785. return 0;
  1786. }
  1787. static noinline_for_stack int
  1788. copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
  1789. {
  1790. struct macsec_rx_sc_stats sum = {0, };
  1791. int cpu;
  1792. for_each_possible_cpu(cpu) {
  1793. const struct pcpu_rx_sc_stats *stats;
  1794. struct macsec_rx_sc_stats tmp;
  1795. unsigned int start;
  1796. stats = per_cpu_ptr(pstats, cpu);
  1797. do {
  1798. start = u64_stats_fetch_begin_irq(&stats->syncp);
  1799. memcpy(&tmp, &stats->stats, sizeof(tmp));
  1800. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  1801. sum.InOctetsValidated += tmp.InOctetsValidated;
  1802. sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
  1803. sum.InPktsUnchecked += tmp.InPktsUnchecked;
  1804. sum.InPktsDelayed += tmp.InPktsDelayed;
  1805. sum.InPktsOK += tmp.InPktsOK;
  1806. sum.InPktsInvalid += tmp.InPktsInvalid;
  1807. sum.InPktsLate += tmp.InPktsLate;
  1808. sum.InPktsNotValid += tmp.InPktsNotValid;
  1809. sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
  1810. sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
  1811. }
  1812. if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
  1813. sum.InOctetsValidated,
  1814. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1815. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
  1816. sum.InOctetsDecrypted,
  1817. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1818. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
  1819. sum.InPktsUnchecked,
  1820. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1821. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
  1822. sum.InPktsDelayed,
  1823. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1824. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
  1825. sum.InPktsOK,
  1826. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1827. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
  1828. sum.InPktsInvalid,
  1829. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1830. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
  1831. sum.InPktsLate,
  1832. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1833. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
  1834. sum.InPktsNotValid,
  1835. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1836. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
  1837. sum.InPktsNotUsingSA,
  1838. MACSEC_RXSC_STATS_ATTR_PAD) ||
  1839. nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
  1840. sum.InPktsUnusedSA,
  1841. MACSEC_RXSC_STATS_ATTR_PAD))
  1842. return -EMSGSIZE;
  1843. return 0;
  1844. }
  1845. static noinline_for_stack int
  1846. copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
  1847. {
  1848. struct macsec_tx_sc_stats sum = {0, };
  1849. int cpu;
  1850. for_each_possible_cpu(cpu) {
  1851. const struct pcpu_tx_sc_stats *stats;
  1852. struct macsec_tx_sc_stats tmp;
  1853. unsigned int start;
  1854. stats = per_cpu_ptr(pstats, cpu);
  1855. do {
  1856. start = u64_stats_fetch_begin_irq(&stats->syncp);
  1857. memcpy(&tmp, &stats->stats, sizeof(tmp));
  1858. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  1859. sum.OutPktsProtected += tmp.OutPktsProtected;
  1860. sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
  1861. sum.OutOctetsProtected += tmp.OutOctetsProtected;
  1862. sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
  1863. }
  1864. if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
  1865. sum.OutPktsProtected,
  1866. MACSEC_TXSC_STATS_ATTR_PAD) ||
  1867. nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
  1868. sum.OutPktsEncrypted,
  1869. MACSEC_TXSC_STATS_ATTR_PAD) ||
  1870. nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
  1871. sum.OutOctetsProtected,
  1872. MACSEC_TXSC_STATS_ATTR_PAD) ||
  1873. nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
  1874. sum.OutOctetsEncrypted,
  1875. MACSEC_TXSC_STATS_ATTR_PAD))
  1876. return -EMSGSIZE;
  1877. return 0;
  1878. }
  1879. static noinline_for_stack int
  1880. copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
  1881. {
  1882. struct macsec_dev_stats sum = {0, };
  1883. int cpu;
  1884. for_each_possible_cpu(cpu) {
  1885. const struct pcpu_secy_stats *stats;
  1886. struct macsec_dev_stats tmp;
  1887. unsigned int start;
  1888. stats = per_cpu_ptr(pstats, cpu);
  1889. do {
  1890. start = u64_stats_fetch_begin_irq(&stats->syncp);
  1891. memcpy(&tmp, &stats->stats, sizeof(tmp));
  1892. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  1893. sum.OutPktsUntagged += tmp.OutPktsUntagged;
  1894. sum.InPktsUntagged += tmp.InPktsUntagged;
  1895. sum.OutPktsTooLong += tmp.OutPktsTooLong;
  1896. sum.InPktsNoTag += tmp.InPktsNoTag;
  1897. sum.InPktsBadTag += tmp.InPktsBadTag;
  1898. sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
  1899. sum.InPktsNoSCI += tmp.InPktsNoSCI;
  1900. sum.InPktsOverrun += tmp.InPktsOverrun;
  1901. }
  1902. if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
  1903. sum.OutPktsUntagged,
  1904. MACSEC_SECY_STATS_ATTR_PAD) ||
  1905. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
  1906. sum.InPktsUntagged,
  1907. MACSEC_SECY_STATS_ATTR_PAD) ||
  1908. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
  1909. sum.OutPktsTooLong,
  1910. MACSEC_SECY_STATS_ATTR_PAD) ||
  1911. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
  1912. sum.InPktsNoTag,
  1913. MACSEC_SECY_STATS_ATTR_PAD) ||
  1914. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
  1915. sum.InPktsBadTag,
  1916. MACSEC_SECY_STATS_ATTR_PAD) ||
  1917. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
  1918. sum.InPktsUnknownSCI,
  1919. MACSEC_SECY_STATS_ATTR_PAD) ||
  1920. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
  1921. sum.InPktsNoSCI,
  1922. MACSEC_SECY_STATS_ATTR_PAD) ||
  1923. nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
  1924. sum.InPktsOverrun,
  1925. MACSEC_SECY_STATS_ATTR_PAD))
  1926. return -EMSGSIZE;
  1927. return 0;
  1928. }
  1929. static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
  1930. {
  1931. struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  1932. struct nlattr *secy_nest = nla_nest_start_noflag(skb,
  1933. MACSEC_ATTR_SECY);
  1934. u64 csid;
  1935. if (!secy_nest)
  1936. return 1;
  1937. switch (secy->key_len) {
  1938. case MACSEC_GCM_AES_128_SAK_LEN:
  1939. csid = MACSEC_DEFAULT_CIPHER_ID;
  1940. break;
  1941. case MACSEC_GCM_AES_256_SAK_LEN:
  1942. csid = MACSEC_CIPHER_ID_GCM_AES_256;
  1943. break;
  1944. default:
  1945. goto cancel;
  1946. }
  1947. if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
  1948. MACSEC_SECY_ATTR_PAD) ||
  1949. nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
  1950. csid, MACSEC_SECY_ATTR_PAD) ||
  1951. nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
  1952. nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
  1953. nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
  1954. nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
  1955. nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
  1956. nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
  1957. nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
  1958. nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
  1959. nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
  1960. nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
  1961. goto cancel;
  1962. if (secy->replay_protect) {
  1963. if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
  1964. goto cancel;
  1965. }
  1966. nla_nest_end(skb, secy_nest);
  1967. return 0;
  1968. cancel:
  1969. nla_nest_cancel(skb, secy_nest);
  1970. return 1;
  1971. }
  1972. static noinline_for_stack int
  1973. dump_secy(struct macsec_secy *secy, struct net_device *dev,
  1974. struct sk_buff *skb, struct netlink_callback *cb)
  1975. {
  1976. struct macsec_rx_sc *rx_sc;
  1977. struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  1978. struct nlattr *txsa_list, *rxsc_list;
  1979. int i, j;
  1980. void *hdr;
  1981. struct nlattr *attr;
  1982. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  1983. &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
  1984. if (!hdr)
  1985. return -EMSGSIZE;
  1986. genl_dump_check_consistent(cb, hdr);
  1987. if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
  1988. goto nla_put_failure;
  1989. if (nla_put_secy(secy, skb))
  1990. goto nla_put_failure;
  1991. attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
  1992. if (!attr)
  1993. goto nla_put_failure;
  1994. if (copy_tx_sc_stats(skb, tx_sc->stats)) {
  1995. nla_nest_cancel(skb, attr);
  1996. goto nla_put_failure;
  1997. }
  1998. nla_nest_end(skb, attr);
  1999. attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
  2000. if (!attr)
  2001. goto nla_put_failure;
  2002. if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
  2003. nla_nest_cancel(skb, attr);
  2004. goto nla_put_failure;
  2005. }
  2006. nla_nest_end(skb, attr);
  2007. txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
  2008. if (!txsa_list)
  2009. goto nla_put_failure;
  2010. for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
  2011. struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
  2012. struct nlattr *txsa_nest;
  2013. if (!tx_sa)
  2014. continue;
  2015. txsa_nest = nla_nest_start_noflag(skb, j++);
  2016. if (!txsa_nest) {
  2017. nla_nest_cancel(skb, txsa_list);
  2018. goto nla_put_failure;
  2019. }
  2020. if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
  2021. nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
  2022. nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
  2023. nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
  2024. nla_nest_cancel(skb, txsa_nest);
  2025. nla_nest_cancel(skb, txsa_list);
  2026. goto nla_put_failure;
  2027. }
  2028. attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
  2029. if (!attr) {
  2030. nla_nest_cancel(skb, txsa_nest);
  2031. nla_nest_cancel(skb, txsa_list);
  2032. goto nla_put_failure;
  2033. }
  2034. if (copy_tx_sa_stats(skb, tx_sa->stats)) {
  2035. nla_nest_cancel(skb, attr);
  2036. nla_nest_cancel(skb, txsa_nest);
  2037. nla_nest_cancel(skb, txsa_list);
  2038. goto nla_put_failure;
  2039. }
  2040. nla_nest_end(skb, attr);
  2041. nla_nest_end(skb, txsa_nest);
  2042. }
  2043. nla_nest_end(skb, txsa_list);
  2044. rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
  2045. if (!rxsc_list)
  2046. goto nla_put_failure;
  2047. j = 1;
  2048. for_each_rxsc_rtnl(secy, rx_sc) {
  2049. int k;
  2050. struct nlattr *rxsa_list;
  2051. struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
  2052. if (!rxsc_nest) {
  2053. nla_nest_cancel(skb, rxsc_list);
  2054. goto nla_put_failure;
  2055. }
  2056. if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
  2057. nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
  2058. MACSEC_RXSC_ATTR_PAD)) {
  2059. nla_nest_cancel(skb, rxsc_nest);
  2060. nla_nest_cancel(skb, rxsc_list);
  2061. goto nla_put_failure;
  2062. }
  2063. attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
  2064. if (!attr) {
  2065. nla_nest_cancel(skb, rxsc_nest);
  2066. nla_nest_cancel(skb, rxsc_list);
  2067. goto nla_put_failure;
  2068. }
  2069. if (copy_rx_sc_stats(skb, rx_sc->stats)) {
  2070. nla_nest_cancel(skb, attr);
  2071. nla_nest_cancel(skb, rxsc_nest);
  2072. nla_nest_cancel(skb, rxsc_list);
  2073. goto nla_put_failure;
  2074. }
  2075. nla_nest_end(skb, attr);
  2076. rxsa_list = nla_nest_start_noflag(skb,
  2077. MACSEC_RXSC_ATTR_SA_LIST);
  2078. if (!rxsa_list) {
  2079. nla_nest_cancel(skb, rxsc_nest);
  2080. nla_nest_cancel(skb, rxsc_list);
  2081. goto nla_put_failure;
  2082. }
  2083. for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
  2084. struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
  2085. struct nlattr *rxsa_nest;
  2086. if (!rx_sa)
  2087. continue;
  2088. rxsa_nest = nla_nest_start_noflag(skb, k++);
  2089. if (!rxsa_nest) {
  2090. nla_nest_cancel(skb, rxsa_list);
  2091. nla_nest_cancel(skb, rxsc_nest);
  2092. nla_nest_cancel(skb, rxsc_list);
  2093. goto nla_put_failure;
  2094. }
  2095. attr = nla_nest_start_noflag(skb,
  2096. MACSEC_SA_ATTR_STATS);
  2097. if (!attr) {
  2098. nla_nest_cancel(skb, rxsa_list);
  2099. nla_nest_cancel(skb, rxsc_nest);
  2100. nla_nest_cancel(skb, rxsc_list);
  2101. goto nla_put_failure;
  2102. }
  2103. if (copy_rx_sa_stats(skb, rx_sa->stats)) {
  2104. nla_nest_cancel(skb, attr);
  2105. nla_nest_cancel(skb, rxsa_list);
  2106. nla_nest_cancel(skb, rxsc_nest);
  2107. nla_nest_cancel(skb, rxsc_list);
  2108. goto nla_put_failure;
  2109. }
  2110. nla_nest_end(skb, attr);
  2111. if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
  2112. nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
  2113. nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
  2114. nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
  2115. nla_nest_cancel(skb, rxsa_nest);
  2116. nla_nest_cancel(skb, rxsc_nest);
  2117. nla_nest_cancel(skb, rxsc_list);
  2118. goto nla_put_failure;
  2119. }
  2120. nla_nest_end(skb, rxsa_nest);
  2121. }
  2122. nla_nest_end(skb, rxsa_list);
  2123. nla_nest_end(skb, rxsc_nest);
  2124. }
  2125. nla_nest_end(skb, rxsc_list);
  2126. genlmsg_end(skb, hdr);
  2127. return 0;
  2128. nla_put_failure:
  2129. genlmsg_cancel(skb, hdr);
  2130. return -EMSGSIZE;
  2131. }
  2132. static int macsec_generation = 1; /* protected by RTNL */
  2133. static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
  2134. {
  2135. struct net *net = sock_net(skb->sk);
  2136. struct net_device *dev;
  2137. int dev_idx, d;
  2138. dev_idx = cb->args[0];
  2139. d = 0;
  2140. rtnl_lock();
  2141. cb->seq = macsec_generation;
  2142. for_each_netdev(net, dev) {
  2143. struct macsec_secy *secy;
  2144. if (d < dev_idx)
  2145. goto next;
  2146. if (!netif_is_macsec(dev))
  2147. goto next;
  2148. secy = &macsec_priv(dev)->secy;
  2149. if (dump_secy(secy, dev, skb, cb) < 0)
  2150. goto done;
  2151. next:
  2152. d++;
  2153. }
  2154. done:
  2155. rtnl_unlock();
  2156. cb->args[0] = d;
  2157. return skb->len;
  2158. }
  2159. static const struct genl_ops macsec_genl_ops[] = {
  2160. {
  2161. .cmd = MACSEC_CMD_GET_TXSC,
  2162. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2163. .dumpit = macsec_dump_txsc,
  2164. },
  2165. {
  2166. .cmd = MACSEC_CMD_ADD_RXSC,
  2167. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2168. .doit = macsec_add_rxsc,
  2169. .flags = GENL_ADMIN_PERM,
  2170. },
  2171. {
  2172. .cmd = MACSEC_CMD_DEL_RXSC,
  2173. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2174. .doit = macsec_del_rxsc,
  2175. .flags = GENL_ADMIN_PERM,
  2176. },
  2177. {
  2178. .cmd = MACSEC_CMD_UPD_RXSC,
  2179. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2180. .doit = macsec_upd_rxsc,
  2181. .flags = GENL_ADMIN_PERM,
  2182. },
  2183. {
  2184. .cmd = MACSEC_CMD_ADD_TXSA,
  2185. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2186. .doit = macsec_add_txsa,
  2187. .flags = GENL_ADMIN_PERM,
  2188. },
  2189. {
  2190. .cmd = MACSEC_CMD_DEL_TXSA,
  2191. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2192. .doit = macsec_del_txsa,
  2193. .flags = GENL_ADMIN_PERM,
  2194. },
  2195. {
  2196. .cmd = MACSEC_CMD_UPD_TXSA,
  2197. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2198. .doit = macsec_upd_txsa,
  2199. .flags = GENL_ADMIN_PERM,
  2200. },
  2201. {
  2202. .cmd = MACSEC_CMD_ADD_RXSA,
  2203. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2204. .doit = macsec_add_rxsa,
  2205. .flags = GENL_ADMIN_PERM,
  2206. },
  2207. {
  2208. .cmd = MACSEC_CMD_DEL_RXSA,
  2209. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2210. .doit = macsec_del_rxsa,
  2211. .flags = GENL_ADMIN_PERM,
  2212. },
  2213. {
  2214. .cmd = MACSEC_CMD_UPD_RXSA,
  2215. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2216. .doit = macsec_upd_rxsa,
  2217. .flags = GENL_ADMIN_PERM,
  2218. },
  2219. };
  2220. static struct genl_family macsec_fam __ro_after_init = {
  2221. .name = MACSEC_GENL_NAME,
  2222. .hdrsize = 0,
  2223. .version = MACSEC_GENL_VERSION,
  2224. .maxattr = MACSEC_ATTR_MAX,
  2225. .policy = macsec_genl_policy,
  2226. .netnsok = true,
  2227. .module = THIS_MODULE,
  2228. .ops = macsec_genl_ops,
  2229. .n_ops = ARRAY_SIZE(macsec_genl_ops),
  2230. };
  2231. static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
  2232. struct net_device *dev)
  2233. {
  2234. struct macsec_dev *macsec = netdev_priv(dev);
  2235. struct macsec_secy *secy = &macsec->secy;
  2236. struct pcpu_secy_stats *secy_stats;
  2237. int ret, len;
  2238. /* 10.5 */
  2239. if (!secy->protect_frames) {
  2240. secy_stats = this_cpu_ptr(macsec->stats);
  2241. u64_stats_update_begin(&secy_stats->syncp);
  2242. secy_stats->stats.OutPktsUntagged++;
  2243. u64_stats_update_end(&secy_stats->syncp);
  2244. skb->dev = macsec->real_dev;
  2245. len = skb->len;
  2246. ret = dev_queue_xmit(skb);
  2247. count_tx(dev, ret, len);
  2248. return ret;
  2249. }
  2250. if (!secy->operational) {
  2251. kfree_skb(skb);
  2252. dev->stats.tx_dropped++;
  2253. return NETDEV_TX_OK;
  2254. }
  2255. skb = macsec_encrypt(skb, dev);
  2256. if (IS_ERR(skb)) {
  2257. if (PTR_ERR(skb) != -EINPROGRESS)
  2258. dev->stats.tx_dropped++;
  2259. return NETDEV_TX_OK;
  2260. }
  2261. macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
  2262. macsec_encrypt_finish(skb, dev);
  2263. len = skb->len;
  2264. ret = dev_queue_xmit(skb);
  2265. count_tx(dev, ret, len);
  2266. return ret;
  2267. }
  2268. #define MACSEC_FEATURES \
  2269. (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
  2270. static int macsec_dev_init(struct net_device *dev)
  2271. {
  2272. struct macsec_dev *macsec = macsec_priv(dev);
  2273. struct net_device *real_dev = macsec->real_dev;
  2274. int err;
  2275. dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  2276. if (!dev->tstats)
  2277. return -ENOMEM;
  2278. err = gro_cells_init(&macsec->gro_cells, dev);
  2279. if (err) {
  2280. free_percpu(dev->tstats);
  2281. return err;
  2282. }
  2283. dev->features = real_dev->features & MACSEC_FEATURES;
  2284. dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
  2285. dev->needed_headroom = real_dev->needed_headroom +
  2286. MACSEC_NEEDED_HEADROOM;
  2287. dev->needed_tailroom = real_dev->needed_tailroom +
  2288. MACSEC_NEEDED_TAILROOM;
  2289. if (is_zero_ether_addr(dev->dev_addr))
  2290. eth_hw_addr_inherit(dev, real_dev);
  2291. if (is_zero_ether_addr(dev->broadcast))
  2292. memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
  2293. return 0;
  2294. }
  2295. static void macsec_dev_uninit(struct net_device *dev)
  2296. {
  2297. struct macsec_dev *macsec = macsec_priv(dev);
  2298. gro_cells_destroy(&macsec->gro_cells);
  2299. free_percpu(dev->tstats);
  2300. }
  2301. static netdev_features_t macsec_fix_features(struct net_device *dev,
  2302. netdev_features_t features)
  2303. {
  2304. struct macsec_dev *macsec = macsec_priv(dev);
  2305. struct net_device *real_dev = macsec->real_dev;
  2306. features &= (real_dev->features & MACSEC_FEATURES) |
  2307. NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
  2308. features |= NETIF_F_LLTX;
  2309. return features;
  2310. }
  2311. static int macsec_dev_open(struct net_device *dev)
  2312. {
  2313. struct macsec_dev *macsec = macsec_priv(dev);
  2314. struct net_device *real_dev = macsec->real_dev;
  2315. int err;
  2316. err = dev_uc_add(real_dev, dev->dev_addr);
  2317. if (err < 0)
  2318. return err;
  2319. if (dev->flags & IFF_ALLMULTI) {
  2320. err = dev_set_allmulti(real_dev, 1);
  2321. if (err < 0)
  2322. goto del_unicast;
  2323. }
  2324. if (dev->flags & IFF_PROMISC) {
  2325. err = dev_set_promiscuity(real_dev, 1);
  2326. if (err < 0)
  2327. goto clear_allmulti;
  2328. }
  2329. if (netif_carrier_ok(real_dev))
  2330. netif_carrier_on(dev);
  2331. return 0;
  2332. clear_allmulti:
  2333. if (dev->flags & IFF_ALLMULTI)
  2334. dev_set_allmulti(real_dev, -1);
  2335. del_unicast:
  2336. dev_uc_del(real_dev, dev->dev_addr);
  2337. netif_carrier_off(dev);
  2338. return err;
  2339. }
  2340. static int macsec_dev_stop(struct net_device *dev)
  2341. {
  2342. struct macsec_dev *macsec = macsec_priv(dev);
  2343. struct net_device *real_dev = macsec->real_dev;
  2344. netif_carrier_off(dev);
  2345. dev_mc_unsync(real_dev, dev);
  2346. dev_uc_unsync(real_dev, dev);
  2347. if (dev->flags & IFF_ALLMULTI)
  2348. dev_set_allmulti(real_dev, -1);
  2349. if (dev->flags & IFF_PROMISC)
  2350. dev_set_promiscuity(real_dev, -1);
  2351. dev_uc_del(real_dev, dev->dev_addr);
  2352. return 0;
  2353. }
  2354. static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
  2355. {
  2356. struct net_device *real_dev = macsec_priv(dev)->real_dev;
  2357. if (!(dev->flags & IFF_UP))
  2358. return;
  2359. if (change & IFF_ALLMULTI)
  2360. dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
  2361. if (change & IFF_PROMISC)
  2362. dev_set_promiscuity(real_dev,
  2363. dev->flags & IFF_PROMISC ? 1 : -1);
  2364. }
  2365. static void macsec_dev_set_rx_mode(struct net_device *dev)
  2366. {
  2367. struct net_device *real_dev = macsec_priv(dev)->real_dev;
  2368. dev_mc_sync(real_dev, dev);
  2369. dev_uc_sync(real_dev, dev);
  2370. }
  2371. static sci_t dev_to_sci(struct net_device *dev, __be16 port)
  2372. {
  2373. return make_sci(dev->dev_addr, port);
  2374. }
  2375. static int macsec_set_mac_address(struct net_device *dev, void *p)
  2376. {
  2377. struct macsec_dev *macsec = macsec_priv(dev);
  2378. struct net_device *real_dev = macsec->real_dev;
  2379. struct sockaddr *addr = p;
  2380. int err;
  2381. if (!is_valid_ether_addr(addr->sa_data))
  2382. return -EADDRNOTAVAIL;
  2383. if (!(dev->flags & IFF_UP))
  2384. goto out;
  2385. err = dev_uc_add(real_dev, addr->sa_data);
  2386. if (err < 0)
  2387. return err;
  2388. dev_uc_del(real_dev, dev->dev_addr);
  2389. out:
  2390. ether_addr_copy(dev->dev_addr, addr->sa_data);
  2391. macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
  2392. return 0;
  2393. }
  2394. static int macsec_change_mtu(struct net_device *dev, int new_mtu)
  2395. {
  2396. struct macsec_dev *macsec = macsec_priv(dev);
  2397. unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
  2398. if (macsec->real_dev->mtu - extra < new_mtu)
  2399. return -ERANGE;
  2400. dev->mtu = new_mtu;
  2401. return 0;
  2402. }
  2403. static void macsec_get_stats64(struct net_device *dev,
  2404. struct rtnl_link_stats64 *s)
  2405. {
  2406. int cpu;
  2407. if (!dev->tstats)
  2408. return;
  2409. for_each_possible_cpu(cpu) {
  2410. struct pcpu_sw_netstats *stats;
  2411. struct pcpu_sw_netstats tmp;
  2412. int start;
  2413. stats = per_cpu_ptr(dev->tstats, cpu);
  2414. do {
  2415. start = u64_stats_fetch_begin_irq(&stats->syncp);
  2416. tmp.rx_packets = stats->rx_packets;
  2417. tmp.rx_bytes = stats->rx_bytes;
  2418. tmp.tx_packets = stats->tx_packets;
  2419. tmp.tx_bytes = stats->tx_bytes;
  2420. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  2421. s->rx_packets += tmp.rx_packets;
  2422. s->rx_bytes += tmp.rx_bytes;
  2423. s->tx_packets += tmp.tx_packets;
  2424. s->tx_bytes += tmp.tx_bytes;
  2425. }
  2426. s->rx_dropped = dev->stats.rx_dropped;
  2427. s->tx_dropped = dev->stats.tx_dropped;
  2428. }
  2429. static int macsec_get_iflink(const struct net_device *dev)
  2430. {
  2431. return macsec_priv(dev)->real_dev->ifindex;
  2432. }
  2433. static const struct net_device_ops macsec_netdev_ops = {
  2434. .ndo_init = macsec_dev_init,
  2435. .ndo_uninit = macsec_dev_uninit,
  2436. .ndo_open = macsec_dev_open,
  2437. .ndo_stop = macsec_dev_stop,
  2438. .ndo_fix_features = macsec_fix_features,
  2439. .ndo_change_mtu = macsec_change_mtu,
  2440. .ndo_set_rx_mode = macsec_dev_set_rx_mode,
  2441. .ndo_change_rx_flags = macsec_dev_change_rx_flags,
  2442. .ndo_set_mac_address = macsec_set_mac_address,
  2443. .ndo_start_xmit = macsec_start_xmit,
  2444. .ndo_get_stats64 = macsec_get_stats64,
  2445. .ndo_get_iflink = macsec_get_iflink,
  2446. };
  2447. static const struct device_type macsec_type = {
  2448. .name = "macsec",
  2449. };
  2450. static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
  2451. [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
  2452. [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
  2453. [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
  2454. [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
  2455. [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
  2456. [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
  2457. [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
  2458. [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
  2459. [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
  2460. [IFLA_MACSEC_ES] = { .type = NLA_U8 },
  2461. [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
  2462. [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
  2463. [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
  2464. };
  2465. static void macsec_free_netdev(struct net_device *dev)
  2466. {
  2467. struct macsec_dev *macsec = macsec_priv(dev);
  2468. free_percpu(macsec->stats);
  2469. free_percpu(macsec->secy.tx_sc.stats);
  2470. }
  2471. static void macsec_setup(struct net_device *dev)
  2472. {
  2473. ether_setup(dev);
  2474. dev->min_mtu = 0;
  2475. dev->max_mtu = ETH_MAX_MTU;
  2476. dev->priv_flags |= IFF_NO_QUEUE;
  2477. dev->netdev_ops = &macsec_netdev_ops;
  2478. dev->needs_free_netdev = true;
  2479. dev->priv_destructor = macsec_free_netdev;
  2480. SET_NETDEV_DEVTYPE(dev, &macsec_type);
  2481. eth_zero_addr(dev->broadcast);
  2482. }
  2483. static int macsec_changelink_common(struct net_device *dev,
  2484. struct nlattr *data[])
  2485. {
  2486. struct macsec_secy *secy;
  2487. struct macsec_tx_sc *tx_sc;
  2488. secy = &macsec_priv(dev)->secy;
  2489. tx_sc = &secy->tx_sc;
  2490. if (data[IFLA_MACSEC_ENCODING_SA]) {
  2491. struct macsec_tx_sa *tx_sa;
  2492. tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
  2493. tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
  2494. secy->operational = tx_sa && tx_sa->active;
  2495. }
  2496. if (data[IFLA_MACSEC_WINDOW])
  2497. secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
  2498. if (data[IFLA_MACSEC_ENCRYPT])
  2499. tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
  2500. if (data[IFLA_MACSEC_PROTECT])
  2501. secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
  2502. if (data[IFLA_MACSEC_INC_SCI])
  2503. tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
  2504. if (data[IFLA_MACSEC_ES])
  2505. tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
  2506. if (data[IFLA_MACSEC_SCB])
  2507. tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
  2508. if (data[IFLA_MACSEC_REPLAY_PROTECT])
  2509. secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
  2510. if (data[IFLA_MACSEC_VALIDATION])
  2511. secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
  2512. if (data[IFLA_MACSEC_CIPHER_SUITE]) {
  2513. switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
  2514. case MACSEC_CIPHER_ID_GCM_AES_128:
  2515. case MACSEC_DEFAULT_CIPHER_ID:
  2516. secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
  2517. break;
  2518. case MACSEC_CIPHER_ID_GCM_AES_256:
  2519. secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
  2520. break;
  2521. default:
  2522. return -EINVAL;
  2523. }
  2524. }
  2525. return 0;
  2526. }
  2527. static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
  2528. struct nlattr *data[],
  2529. struct netlink_ext_ack *extack)
  2530. {
  2531. if (!data)
  2532. return 0;
  2533. if (data[IFLA_MACSEC_CIPHER_SUITE] ||
  2534. data[IFLA_MACSEC_ICV_LEN] ||
  2535. data[IFLA_MACSEC_SCI] ||
  2536. data[IFLA_MACSEC_PORT])
  2537. return -EINVAL;
  2538. return macsec_changelink_common(dev, data);
  2539. }
  2540. static void macsec_del_dev(struct macsec_dev *macsec)
  2541. {
  2542. int i;
  2543. while (macsec->secy.rx_sc) {
  2544. struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
  2545. rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
  2546. free_rx_sc(rx_sc);
  2547. }
  2548. for (i = 0; i < MACSEC_NUM_AN; i++) {
  2549. struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
  2550. if (sa) {
  2551. RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
  2552. clear_tx_sa(sa);
  2553. }
  2554. }
  2555. }
  2556. static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
  2557. {
  2558. struct macsec_dev *macsec = macsec_priv(dev);
  2559. struct net_device *real_dev = macsec->real_dev;
  2560. unregister_netdevice_queue(dev, head);
  2561. list_del_rcu(&macsec->secys);
  2562. macsec_del_dev(macsec);
  2563. netdev_upper_dev_unlink(real_dev, dev);
  2564. macsec_generation++;
  2565. }
  2566. static void macsec_dellink(struct net_device *dev, struct list_head *head)
  2567. {
  2568. struct macsec_dev *macsec = macsec_priv(dev);
  2569. struct net_device *real_dev = macsec->real_dev;
  2570. struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
  2571. macsec_common_dellink(dev, head);
  2572. if (list_empty(&rxd->secys)) {
  2573. netdev_rx_handler_unregister(real_dev);
  2574. kfree(rxd);
  2575. }
  2576. }
  2577. static int register_macsec_dev(struct net_device *real_dev,
  2578. struct net_device *dev)
  2579. {
  2580. struct macsec_dev *macsec = macsec_priv(dev);
  2581. struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
  2582. if (!rxd) {
  2583. int err;
  2584. rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
  2585. if (!rxd)
  2586. return -ENOMEM;
  2587. INIT_LIST_HEAD(&rxd->secys);
  2588. err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
  2589. rxd);
  2590. if (err < 0) {
  2591. kfree(rxd);
  2592. return err;
  2593. }
  2594. }
  2595. list_add_tail_rcu(&macsec->secys, &rxd->secys);
  2596. return 0;
  2597. }
  2598. static bool sci_exists(struct net_device *dev, sci_t sci)
  2599. {
  2600. struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
  2601. struct macsec_dev *macsec;
  2602. list_for_each_entry(macsec, &rxd->secys, secys) {
  2603. if (macsec->secy.sci == sci)
  2604. return true;
  2605. }
  2606. return false;
  2607. }
  2608. static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
  2609. {
  2610. struct macsec_dev *macsec = macsec_priv(dev);
  2611. struct macsec_secy *secy = &macsec->secy;
  2612. macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
  2613. if (!macsec->stats)
  2614. return -ENOMEM;
  2615. secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
  2616. if (!secy->tx_sc.stats) {
  2617. free_percpu(macsec->stats);
  2618. return -ENOMEM;
  2619. }
  2620. if (sci == MACSEC_UNDEF_SCI)
  2621. sci = dev_to_sci(dev, MACSEC_PORT_ES);
  2622. secy->netdev = dev;
  2623. secy->operational = true;
  2624. secy->key_len = DEFAULT_SAK_LEN;
  2625. secy->icv_len = icv_len;
  2626. secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
  2627. secy->protect_frames = true;
  2628. secy->replay_protect = false;
  2629. secy->sci = sci;
  2630. secy->tx_sc.active = true;
  2631. secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
  2632. secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
  2633. secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
  2634. secy->tx_sc.end_station = false;
  2635. secy->tx_sc.scb = false;
  2636. return 0;
  2637. }
  2638. static int macsec_newlink(struct net *net, struct net_device *dev,
  2639. struct nlattr *tb[], struct nlattr *data[],
  2640. struct netlink_ext_ack *extack)
  2641. {
  2642. struct macsec_dev *macsec = macsec_priv(dev);
  2643. rx_handler_func_t *rx_handler;
  2644. u8 icv_len = DEFAULT_ICV_LEN;
  2645. struct net_device *real_dev;
  2646. int err, mtu;
  2647. sci_t sci;
  2648. if (!tb[IFLA_LINK])
  2649. return -EINVAL;
  2650. real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
  2651. if (!real_dev)
  2652. return -ENODEV;
  2653. if (real_dev->type != ARPHRD_ETHER)
  2654. return -EINVAL;
  2655. dev->priv_flags |= IFF_MACSEC;
  2656. macsec->real_dev = real_dev;
  2657. if (data && data[IFLA_MACSEC_ICV_LEN])
  2658. icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
  2659. mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
  2660. if (mtu < 0)
  2661. dev->mtu = 0;
  2662. else
  2663. dev->mtu = mtu;
  2664. rx_handler = rtnl_dereference(real_dev->rx_handler);
  2665. if (rx_handler && rx_handler != macsec_handle_frame)
  2666. return -EBUSY;
  2667. err = register_netdevice(dev);
  2668. if (err < 0)
  2669. return err;
  2670. err = netdev_upper_dev_link(real_dev, dev, extack);
  2671. if (err < 0)
  2672. goto unregister;
  2673. /* need to be already registered so that ->init has run and
  2674. * the MAC addr is set
  2675. */
  2676. if (data && data[IFLA_MACSEC_SCI])
  2677. sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
  2678. else if (data && data[IFLA_MACSEC_PORT])
  2679. sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
  2680. else
  2681. sci = dev_to_sci(dev, MACSEC_PORT_ES);
  2682. if (rx_handler && sci_exists(real_dev, sci)) {
  2683. err = -EBUSY;
  2684. goto unlink;
  2685. }
  2686. err = macsec_add_dev(dev, sci, icv_len);
  2687. if (err)
  2688. goto unlink;
  2689. if (data) {
  2690. err = macsec_changelink_common(dev, data);
  2691. if (err)
  2692. goto del_dev;
  2693. }
  2694. err = register_macsec_dev(real_dev, dev);
  2695. if (err < 0)
  2696. goto del_dev;
  2697. netif_stacked_transfer_operstate(real_dev, dev);
  2698. linkwatch_fire_event(dev);
  2699. macsec_generation++;
  2700. return 0;
  2701. del_dev:
  2702. macsec_del_dev(macsec);
  2703. unlink:
  2704. netdev_upper_dev_unlink(real_dev, dev);
  2705. unregister:
  2706. unregister_netdevice(dev);
  2707. return err;
  2708. }
  2709. static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
  2710. struct netlink_ext_ack *extack)
  2711. {
  2712. u64 csid = MACSEC_DEFAULT_CIPHER_ID;
  2713. u8 icv_len = DEFAULT_ICV_LEN;
  2714. int flag;
  2715. bool es, scb, sci;
  2716. if (!data)
  2717. return 0;
  2718. if (data[IFLA_MACSEC_CIPHER_SUITE])
  2719. csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
  2720. if (data[IFLA_MACSEC_ICV_LEN]) {
  2721. icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
  2722. if (icv_len != DEFAULT_ICV_LEN) {
  2723. char dummy_key[DEFAULT_SAK_LEN] = { 0 };
  2724. struct crypto_aead *dummy_tfm;
  2725. dummy_tfm = macsec_alloc_tfm(dummy_key,
  2726. DEFAULT_SAK_LEN,
  2727. icv_len);
  2728. if (IS_ERR(dummy_tfm))
  2729. return PTR_ERR(dummy_tfm);
  2730. crypto_free_aead(dummy_tfm);
  2731. }
  2732. }
  2733. switch (csid) {
  2734. case MACSEC_CIPHER_ID_GCM_AES_128:
  2735. case MACSEC_CIPHER_ID_GCM_AES_256:
  2736. case MACSEC_DEFAULT_CIPHER_ID:
  2737. if (icv_len < MACSEC_MIN_ICV_LEN ||
  2738. icv_len > MACSEC_STD_ICV_LEN)
  2739. return -EINVAL;
  2740. break;
  2741. default:
  2742. return -EINVAL;
  2743. }
  2744. if (data[IFLA_MACSEC_ENCODING_SA]) {
  2745. if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
  2746. return -EINVAL;
  2747. }
  2748. for (flag = IFLA_MACSEC_ENCODING_SA + 1;
  2749. flag < IFLA_MACSEC_VALIDATION;
  2750. flag++) {
  2751. if (data[flag]) {
  2752. if (nla_get_u8(data[flag]) > 1)
  2753. return -EINVAL;
  2754. }
  2755. }
  2756. es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
  2757. sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
  2758. scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
  2759. if ((sci && (scb || es)) || (scb && es))
  2760. return -EINVAL;
  2761. if (data[IFLA_MACSEC_VALIDATION] &&
  2762. nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
  2763. return -EINVAL;
  2764. if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
  2765. nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
  2766. !data[IFLA_MACSEC_WINDOW])
  2767. return -EINVAL;
  2768. return 0;
  2769. }
  2770. static struct net *macsec_get_link_net(const struct net_device *dev)
  2771. {
  2772. return dev_net(macsec_priv(dev)->real_dev);
  2773. }
  2774. static size_t macsec_get_size(const struct net_device *dev)
  2775. {
  2776. return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
  2777. nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
  2778. nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
  2779. nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
  2780. nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
  2781. nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
  2782. nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
  2783. nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
  2784. nla_total_size(1) + /* IFLA_MACSEC_ES */
  2785. nla_total_size(1) + /* IFLA_MACSEC_SCB */
  2786. nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
  2787. nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
  2788. 0;
  2789. }
  2790. static int macsec_fill_info(struct sk_buff *skb,
  2791. const struct net_device *dev)
  2792. {
  2793. struct macsec_secy *secy = &macsec_priv(dev)->secy;
  2794. struct macsec_tx_sc *tx_sc = &secy->tx_sc;
  2795. u64 csid;
  2796. switch (secy->key_len) {
  2797. case MACSEC_GCM_AES_128_SAK_LEN:
  2798. csid = MACSEC_DEFAULT_CIPHER_ID;
  2799. break;
  2800. case MACSEC_GCM_AES_256_SAK_LEN:
  2801. csid = MACSEC_CIPHER_ID_GCM_AES_256;
  2802. break;
  2803. default:
  2804. goto nla_put_failure;
  2805. }
  2806. if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
  2807. IFLA_MACSEC_PAD) ||
  2808. nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
  2809. nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
  2810. csid, IFLA_MACSEC_PAD) ||
  2811. nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
  2812. nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
  2813. nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
  2814. nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
  2815. nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
  2816. nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
  2817. nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
  2818. nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
  2819. 0)
  2820. goto nla_put_failure;
  2821. if (secy->replay_protect) {
  2822. if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
  2823. goto nla_put_failure;
  2824. }
  2825. return 0;
  2826. nla_put_failure:
  2827. return -EMSGSIZE;
  2828. }
  2829. static struct rtnl_link_ops macsec_link_ops __read_mostly = {
  2830. .kind = "macsec",
  2831. .priv_size = sizeof(struct macsec_dev),
  2832. .maxtype = IFLA_MACSEC_MAX,
  2833. .policy = macsec_rtnl_policy,
  2834. .setup = macsec_setup,
  2835. .validate = macsec_validate_attr,
  2836. .newlink = macsec_newlink,
  2837. .changelink = macsec_changelink,
  2838. .dellink = macsec_dellink,
  2839. .get_size = macsec_get_size,
  2840. .fill_info = macsec_fill_info,
  2841. .get_link_net = macsec_get_link_net,
  2842. };
  2843. static bool is_macsec_master(struct net_device *dev)
  2844. {
  2845. return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
  2846. }
  2847. static int macsec_notify(struct notifier_block *this, unsigned long event,
  2848. void *ptr)
  2849. {
  2850. struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
  2851. LIST_HEAD(head);
  2852. if (!is_macsec_master(real_dev))
  2853. return NOTIFY_DONE;
  2854. switch (event) {
  2855. case NETDEV_DOWN:
  2856. case NETDEV_UP:
  2857. case NETDEV_CHANGE: {
  2858. struct macsec_dev *m, *n;
  2859. struct macsec_rxh_data *rxd;
  2860. rxd = macsec_data_rtnl(real_dev);
  2861. list_for_each_entry_safe(m, n, &rxd->secys, secys) {
  2862. struct net_device *dev = m->secy.netdev;
  2863. netif_stacked_transfer_operstate(real_dev, dev);
  2864. }
  2865. break;
  2866. }
  2867. case NETDEV_UNREGISTER: {
  2868. struct macsec_dev *m, *n;
  2869. struct macsec_rxh_data *rxd;
  2870. rxd = macsec_data_rtnl(real_dev);
  2871. list_for_each_entry_safe(m, n, &rxd->secys, secys) {
  2872. macsec_common_dellink(m->secy.netdev, &head);
  2873. }
  2874. netdev_rx_handler_unregister(real_dev);
  2875. kfree(rxd);
  2876. unregister_netdevice_many(&head);
  2877. break;
  2878. }
  2879. case NETDEV_CHANGEMTU: {
  2880. struct macsec_dev *m;
  2881. struct macsec_rxh_data *rxd;
  2882. rxd = macsec_data_rtnl(real_dev);
  2883. list_for_each_entry(m, &rxd->secys, secys) {
  2884. struct net_device *dev = m->secy.netdev;
  2885. unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
  2886. macsec_extra_len(true));
  2887. if (dev->mtu > mtu)
  2888. dev_set_mtu(dev, mtu);
  2889. }
  2890. }
  2891. }
  2892. return NOTIFY_OK;
  2893. }
  2894. static struct notifier_block macsec_notifier = {
  2895. .notifier_call = macsec_notify,
  2896. };
  2897. static int __init macsec_init(void)
  2898. {
  2899. int err;
  2900. pr_info("MACsec IEEE 802.1AE\n");
  2901. err = register_netdevice_notifier(&macsec_notifier);
  2902. if (err)
  2903. return err;
  2904. err = rtnl_link_register(&macsec_link_ops);
  2905. if (err)
  2906. goto notifier;
  2907. err = genl_register_family(&macsec_fam);
  2908. if (err)
  2909. goto rtnl;
  2910. return 0;
  2911. rtnl:
  2912. rtnl_link_unregister(&macsec_link_ops);
  2913. notifier:
  2914. unregister_netdevice_notifier(&macsec_notifier);
  2915. return err;
  2916. }
  2917. static void __exit macsec_exit(void)
  2918. {
  2919. genl_unregister_family(&macsec_fam);
  2920. rtnl_link_unregister(&macsec_link_ops);
  2921. unregister_netdevice_notifier(&macsec_notifier);
  2922. rcu_barrier();
  2923. }
  2924. module_init(macsec_init);
  2925. module_exit(macsec_exit);
  2926. MODULE_ALIAS_RTNL_LINK("macsec");
  2927. MODULE_ALIAS_GENL_FAMILY("macsec");
  2928. MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
  2929. MODULE_LICENSE("GPL v2");