flow_dissector.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. #include <linux/kernel.h>
  2. #include <linux/skbuff.h>
  3. #include <linux/export.h>
  4. #include <linux/ip.h>
  5. #include <linux/ipv6.h>
  6. #include <linux/if_vlan.h>
  7. #include <net/ip.h>
  8. #include <net/ipv6.h>
  9. #include <linux/igmp.h>
  10. #include <linux/icmp.h>
  11. #include <linux/sctp.h>
  12. #include <linux/dccp.h>
  13. #include <linux/if_tunnel.h>
  14. #include <linux/if_pppox.h>
  15. #include <linux/ppp_defs.h>
  16. #include <linux/stddef.h>
  17. #include <linux/if_ether.h>
  18. #include <linux/mpls.h>
  19. #include <net/flow_dissector.h>
  20. #include <scsi/fc/fc_fcoe.h>
  21. static bool skb_flow_dissector_uses_key(struct flow_dissector *flow_dissector,
  22. enum flow_dissector_key_id key_id)
  23. {
  24. return flow_dissector->used_keys & (1 << key_id);
  25. }
  26. static void skb_flow_dissector_set_key(struct flow_dissector *flow_dissector,
  27. enum flow_dissector_key_id key_id)
  28. {
  29. flow_dissector->used_keys |= (1 << key_id);
  30. }
  31. static void *skb_flow_dissector_target(struct flow_dissector *flow_dissector,
  32. enum flow_dissector_key_id key_id,
  33. void *target_container)
  34. {
  35. return ((char *) target_container) + flow_dissector->offset[key_id];
  36. }
  37. void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
  38. const struct flow_dissector_key *key,
  39. unsigned int key_count)
  40. {
  41. unsigned int i;
  42. memset(flow_dissector, 0, sizeof(*flow_dissector));
  43. for (i = 0; i < key_count; i++, key++) {
  44. /* User should make sure that every key target offset is withing
  45. * boundaries of unsigned short.
  46. */
  47. BUG_ON(key->offset > USHRT_MAX);
  48. BUG_ON(skb_flow_dissector_uses_key(flow_dissector,
  49. key->key_id));
  50. skb_flow_dissector_set_key(flow_dissector, key->key_id);
  51. flow_dissector->offset[key->key_id] = key->offset;
  52. }
  53. /* Ensure that the dissector always includes control and basic key.
  54. * That way we are able to avoid handling lack of these in fast path.
  55. */
  56. BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
  57. FLOW_DISSECTOR_KEY_CONTROL));
  58. BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
  59. FLOW_DISSECTOR_KEY_BASIC));
  60. }
  61. EXPORT_SYMBOL(skb_flow_dissector_init);
  62. /**
  63. * __skb_flow_get_ports - extract the upper layer ports and return them
  64. * @skb: sk_buff to extract the ports from
  65. * @thoff: transport header offset
  66. * @ip_proto: protocol for which to get port offset
  67. * @data: raw buffer pointer to the packet, if NULL use skb->data
  68. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  69. *
  70. * The function will try to retrieve the ports at offset thoff + poff where poff
  71. * is the protocol port offset returned from proto_ports_offset
  72. */
  73. __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
  74. void *data, int hlen)
  75. {
  76. int poff = proto_ports_offset(ip_proto);
  77. if (!data) {
  78. data = skb->data;
  79. hlen = skb_headlen(skb);
  80. }
  81. if (poff >= 0) {
  82. __be32 *ports, _ports;
  83. ports = __skb_header_pointer(skb, thoff + poff,
  84. sizeof(_ports), data, hlen, &_ports);
  85. if (ports)
  86. return *ports;
  87. }
  88. return 0;
  89. }
  90. EXPORT_SYMBOL(__skb_flow_get_ports);
  91. /**
  92. * __skb_flow_dissect - extract the flow_keys struct and return it
  93. * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
  94. * @flow_dissector: list of keys to dissect
  95. * @target_container: target structure to put dissected values into
  96. * @data: raw buffer pointer to the packet, if NULL use skb->data
  97. * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
  98. * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
  99. * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  100. *
  101. * The function will try to retrieve individual keys into target specified
  102. * by flow_dissector from either the skbuff or a raw buffer specified by the
  103. * rest parameters.
  104. *
  105. * Caller must take care of zeroing target container memory.
  106. */
  107. bool __skb_flow_dissect(const struct sk_buff *skb,
  108. struct flow_dissector *flow_dissector,
  109. void *target_container,
  110. void *data, __be16 proto, int nhoff, int hlen)
  111. {
  112. struct flow_dissector_key_control *key_control;
  113. struct flow_dissector_key_basic *key_basic;
  114. struct flow_dissector_key_addrs *key_addrs;
  115. struct flow_dissector_key_ports *key_ports;
  116. struct flow_dissector_key_tags *key_tags;
  117. struct flow_dissector_key_keyid *key_keyid;
  118. u8 ip_proto = 0;
  119. if (!data) {
  120. data = skb->data;
  121. proto = skb->protocol;
  122. nhoff = skb_network_offset(skb);
  123. hlen = skb_headlen(skb);
  124. }
  125. /* It is ensured by skb_flow_dissector_init() that control key will
  126. * be always present.
  127. */
  128. key_control = skb_flow_dissector_target(flow_dissector,
  129. FLOW_DISSECTOR_KEY_CONTROL,
  130. target_container);
  131. /* It is ensured by skb_flow_dissector_init() that basic key will
  132. * be always present.
  133. */
  134. key_basic = skb_flow_dissector_target(flow_dissector,
  135. FLOW_DISSECTOR_KEY_BASIC,
  136. target_container);
  137. if (skb_flow_dissector_uses_key(flow_dissector,
  138. FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  139. struct ethhdr *eth = eth_hdr(skb);
  140. struct flow_dissector_key_eth_addrs *key_eth_addrs;
  141. key_eth_addrs = skb_flow_dissector_target(flow_dissector,
  142. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  143. target_container);
  144. memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
  145. }
  146. again:
  147. switch (proto) {
  148. case htons(ETH_P_IP): {
  149. const struct iphdr *iph;
  150. struct iphdr _iph;
  151. ip:
  152. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  153. if (!iph || iph->ihl < 5)
  154. return false;
  155. nhoff += iph->ihl * 4;
  156. ip_proto = iph->protocol;
  157. if (ip_is_fragment(iph))
  158. ip_proto = 0;
  159. if (!skb_flow_dissector_uses_key(flow_dissector,
  160. FLOW_DISSECTOR_KEY_IPV4_ADDRS))
  161. break;
  162. key_addrs = skb_flow_dissector_target(flow_dissector,
  163. FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container);
  164. memcpy(&key_addrs->v4addrs, &iph->saddr,
  165. sizeof(key_addrs->v4addrs));
  166. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  167. break;
  168. }
  169. case htons(ETH_P_IPV6): {
  170. const struct ipv6hdr *iph;
  171. struct ipv6hdr _iph;
  172. __be32 flow_label;
  173. ipv6:
  174. iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
  175. if (!iph)
  176. return false;
  177. ip_proto = iph->nexthdr;
  178. nhoff += sizeof(struct ipv6hdr);
  179. if (skb_flow_dissector_uses_key(flow_dissector,
  180. FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
  181. struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs;
  182. key_ipv6_addrs = skb_flow_dissector_target(flow_dissector,
  183. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  184. target_container);
  185. memcpy(key_ipv6_addrs, &iph->saddr, sizeof(*key_ipv6_addrs));
  186. key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  187. }
  188. flow_label = ip6_flowlabel(iph);
  189. if (flow_label) {
  190. if (skb_flow_dissector_uses_key(flow_dissector,
  191. FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
  192. key_tags = skb_flow_dissector_target(flow_dissector,
  193. FLOW_DISSECTOR_KEY_FLOW_LABEL,
  194. target_container);
  195. key_tags->flow_label = ntohl(flow_label);
  196. }
  197. }
  198. break;
  199. }
  200. case htons(ETH_P_8021AD):
  201. case htons(ETH_P_8021Q): {
  202. const struct vlan_hdr *vlan;
  203. struct vlan_hdr _vlan;
  204. vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
  205. if (!vlan)
  206. return false;
  207. if (skb_flow_dissector_uses_key(flow_dissector,
  208. FLOW_DISSECTOR_KEY_VLANID)) {
  209. key_tags = skb_flow_dissector_target(flow_dissector,
  210. FLOW_DISSECTOR_KEY_VLANID,
  211. target_container);
  212. key_tags->vlan_id = skb_vlan_tag_get_id(skb);
  213. }
  214. proto = vlan->h_vlan_encapsulated_proto;
  215. nhoff += sizeof(*vlan);
  216. goto again;
  217. }
  218. case htons(ETH_P_PPP_SES): {
  219. struct {
  220. struct pppoe_hdr hdr;
  221. __be16 proto;
  222. } *hdr, _hdr;
  223. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  224. if (!hdr)
  225. return false;
  226. proto = hdr->proto;
  227. nhoff += PPPOE_SES_HLEN;
  228. switch (proto) {
  229. case htons(PPP_IP):
  230. goto ip;
  231. case htons(PPP_IPV6):
  232. goto ipv6;
  233. default:
  234. return false;
  235. }
  236. }
  237. case htons(ETH_P_TIPC): {
  238. struct {
  239. __be32 pre[3];
  240. __be32 srcnode;
  241. } *hdr, _hdr;
  242. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  243. if (!hdr)
  244. return false;
  245. key_basic->n_proto = proto;
  246. key_control->thoff = (u16)nhoff;
  247. if (skb_flow_dissector_uses_key(flow_dissector,
  248. FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
  249. key_addrs = skb_flow_dissector_target(flow_dissector,
  250. FLOW_DISSECTOR_KEY_TIPC_ADDRS,
  251. target_container);
  252. key_addrs->tipcaddrs.srcnode = hdr->srcnode;
  253. key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
  254. }
  255. return true;
  256. }
  257. case htons(ETH_P_MPLS_UC):
  258. case htons(ETH_P_MPLS_MC): {
  259. struct mpls_label *hdr, _hdr[2];
  260. mpls:
  261. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
  262. hlen, &_hdr);
  263. if (!hdr)
  264. return false;
  265. if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
  266. MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
  267. if (skb_flow_dissector_uses_key(flow_dissector,
  268. FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
  269. key_keyid = skb_flow_dissector_target(flow_dissector,
  270. FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
  271. target_container);
  272. key_keyid->keyid = hdr[1].entry &
  273. htonl(MPLS_LS_LABEL_MASK);
  274. }
  275. key_basic->n_proto = proto;
  276. key_basic->ip_proto = ip_proto;
  277. key_control->thoff = (u16)nhoff;
  278. return true;
  279. }
  280. return true;
  281. }
  282. case htons(ETH_P_FCOE):
  283. key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
  284. /* fall through */
  285. default:
  286. return false;
  287. }
  288. ip_proto_again:
  289. switch (ip_proto) {
  290. case IPPROTO_GRE: {
  291. struct gre_hdr {
  292. __be16 flags;
  293. __be16 proto;
  294. } *hdr, _hdr;
  295. hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
  296. if (!hdr)
  297. return false;
  298. /*
  299. * Only look inside GRE if version zero and no
  300. * routing
  301. */
  302. if (hdr->flags & (GRE_VERSION | GRE_ROUTING))
  303. break;
  304. proto = hdr->proto;
  305. nhoff += 4;
  306. if (hdr->flags & GRE_CSUM)
  307. nhoff += 4;
  308. if (hdr->flags & GRE_KEY) {
  309. const __be32 *keyid;
  310. __be32 _keyid;
  311. keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid),
  312. data, hlen, &_keyid);
  313. if (!keyid)
  314. return false;
  315. if (skb_flow_dissector_uses_key(flow_dissector,
  316. FLOW_DISSECTOR_KEY_GRE_KEYID)) {
  317. key_keyid = skb_flow_dissector_target(flow_dissector,
  318. FLOW_DISSECTOR_KEY_GRE_KEYID,
  319. target_container);
  320. key_keyid->keyid = *keyid;
  321. }
  322. nhoff += 4;
  323. }
  324. if (hdr->flags & GRE_SEQ)
  325. nhoff += 4;
  326. if (proto == htons(ETH_P_TEB)) {
  327. const struct ethhdr *eth;
  328. struct ethhdr _eth;
  329. eth = __skb_header_pointer(skb, nhoff,
  330. sizeof(_eth),
  331. data, hlen, &_eth);
  332. if (!eth)
  333. return false;
  334. proto = eth->h_proto;
  335. nhoff += sizeof(*eth);
  336. }
  337. goto again;
  338. }
  339. case NEXTHDR_HOP:
  340. case NEXTHDR_ROUTING:
  341. case NEXTHDR_DEST: {
  342. u8 _opthdr[2], *opthdr;
  343. if (proto != htons(ETH_P_IPV6))
  344. break;
  345. opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
  346. data, hlen, &_opthdr);
  347. if (!opthdr)
  348. return false;
  349. ip_proto = opthdr[0];
  350. nhoff += (opthdr[1] + 1) << 3;
  351. goto ip_proto_again;
  352. }
  353. case IPPROTO_IPIP:
  354. proto = htons(ETH_P_IP);
  355. goto ip;
  356. case IPPROTO_IPV6:
  357. proto = htons(ETH_P_IPV6);
  358. goto ipv6;
  359. case IPPROTO_MPLS:
  360. proto = htons(ETH_P_MPLS_UC);
  361. goto mpls;
  362. default:
  363. break;
  364. }
  365. key_basic->n_proto = proto;
  366. key_basic->ip_proto = ip_proto;
  367. key_control->thoff = (u16)nhoff;
  368. if (skb_flow_dissector_uses_key(flow_dissector,
  369. FLOW_DISSECTOR_KEY_PORTS)) {
  370. key_ports = skb_flow_dissector_target(flow_dissector,
  371. FLOW_DISSECTOR_KEY_PORTS,
  372. target_container);
  373. key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
  374. data, hlen);
  375. }
  376. return true;
  377. }
  378. EXPORT_SYMBOL(__skb_flow_dissect);
  379. static u32 hashrnd __read_mostly;
  380. static __always_inline void __flow_hash_secret_init(void)
  381. {
  382. net_get_random_once(&hashrnd, sizeof(hashrnd));
  383. }
  384. static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval)
  385. {
  386. return jhash2(words, length, keyval);
  387. }
  388. static inline void *flow_keys_hash_start(struct flow_keys *flow)
  389. {
  390. BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
  391. return (void *)flow + FLOW_KEYS_HASH_OFFSET;
  392. }
  393. static inline size_t flow_keys_hash_length(struct flow_keys *flow)
  394. {
  395. size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
  396. BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
  397. BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
  398. sizeof(*flow) - sizeof(flow->addrs));
  399. switch (flow->control.addr_type) {
  400. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  401. diff -= sizeof(flow->addrs.v4addrs);
  402. break;
  403. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  404. diff -= sizeof(flow->addrs.v6addrs);
  405. break;
  406. case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
  407. diff -= sizeof(flow->addrs.tipcaddrs);
  408. break;
  409. }
  410. return (sizeof(*flow) - diff) / sizeof(u32);
  411. }
  412. __be32 flow_get_u32_src(const struct flow_keys *flow)
  413. {
  414. switch (flow->control.addr_type) {
  415. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  416. return flow->addrs.v4addrs.src;
  417. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  418. return (__force __be32)ipv6_addr_hash(
  419. &flow->addrs.v6addrs.src);
  420. case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
  421. return flow->addrs.tipcaddrs.srcnode;
  422. default:
  423. return 0;
  424. }
  425. }
  426. EXPORT_SYMBOL(flow_get_u32_src);
  427. __be32 flow_get_u32_dst(const struct flow_keys *flow)
  428. {
  429. switch (flow->control.addr_type) {
  430. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  431. return flow->addrs.v4addrs.dst;
  432. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  433. return (__force __be32)ipv6_addr_hash(
  434. &flow->addrs.v6addrs.dst);
  435. default:
  436. return 0;
  437. }
  438. }
  439. EXPORT_SYMBOL(flow_get_u32_dst);
  440. static inline void __flow_hash_consistentify(struct flow_keys *keys)
  441. {
  442. int addr_diff, i;
  443. switch (keys->control.addr_type) {
  444. case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
  445. addr_diff = (__force u32)keys->addrs.v4addrs.dst -
  446. (__force u32)keys->addrs.v4addrs.src;
  447. if ((addr_diff < 0) ||
  448. (addr_diff == 0 &&
  449. ((__force u16)keys->ports.dst <
  450. (__force u16)keys->ports.src))) {
  451. swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
  452. swap(keys->ports.src, keys->ports.dst);
  453. }
  454. break;
  455. case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
  456. addr_diff = memcmp(&keys->addrs.v6addrs.dst,
  457. &keys->addrs.v6addrs.src,
  458. sizeof(keys->addrs.v6addrs.dst));
  459. if ((addr_diff < 0) ||
  460. (addr_diff == 0 &&
  461. ((__force u16)keys->ports.dst <
  462. (__force u16)keys->ports.src))) {
  463. for (i = 0; i < 4; i++)
  464. swap(keys->addrs.v6addrs.src.s6_addr32[i],
  465. keys->addrs.v6addrs.dst.s6_addr32[i]);
  466. swap(keys->ports.src, keys->ports.dst);
  467. }
  468. break;
  469. }
  470. }
  471. static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
  472. {
  473. u32 hash;
  474. __flow_hash_consistentify(keys);
  475. hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys),
  476. flow_keys_hash_length(keys), keyval);
  477. if (!hash)
  478. hash = 1;
  479. return hash;
  480. }
  481. u32 flow_hash_from_keys(struct flow_keys *keys)
  482. {
  483. __flow_hash_secret_init();
  484. return __flow_hash_from_keys(keys, hashrnd);
  485. }
  486. EXPORT_SYMBOL(flow_hash_from_keys);
  487. static inline u32 ___skb_get_hash(const struct sk_buff *skb,
  488. struct flow_keys *keys, u32 keyval)
  489. {
  490. if (!skb_flow_dissect_flow_keys(skb, keys))
  491. return 0;
  492. return __flow_hash_from_keys(keys, keyval);
  493. }
  494. struct _flow_keys_digest_data {
  495. __be16 n_proto;
  496. u8 ip_proto;
  497. u8 padding;
  498. __be32 ports;
  499. __be32 src;
  500. __be32 dst;
  501. };
  502. void make_flow_keys_digest(struct flow_keys_digest *digest,
  503. const struct flow_keys *flow)
  504. {
  505. struct _flow_keys_digest_data *data =
  506. (struct _flow_keys_digest_data *)digest;
  507. BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
  508. memset(digest, 0, sizeof(*digest));
  509. data->n_proto = flow->basic.n_proto;
  510. data->ip_proto = flow->basic.ip_proto;
  511. data->ports = flow->ports.ports;
  512. data->src = flow->addrs.v4addrs.src;
  513. data->dst = flow->addrs.v4addrs.dst;
  514. }
  515. EXPORT_SYMBOL(make_flow_keys_digest);
  516. /**
  517. * __skb_get_hash: calculate a flow hash
  518. * @skb: sk_buff to calculate flow hash from
  519. *
  520. * This function calculates a flow hash based on src/dst addresses
  521. * and src/dst port numbers. Sets hash in skb to non-zero hash value
  522. * on success, zero indicates no valid hash. Also, sets l4_hash in skb
  523. * if hash is a canonical 4-tuple hash over transport ports.
  524. */
  525. void __skb_get_hash(struct sk_buff *skb)
  526. {
  527. struct flow_keys keys;
  528. u32 hash;
  529. __flow_hash_secret_init();
  530. hash = ___skb_get_hash(skb, &keys, hashrnd);
  531. if (!hash)
  532. return;
  533. if (keys.ports.ports)
  534. skb->l4_hash = 1;
  535. skb->sw_hash = 1;
  536. skb->hash = hash;
  537. }
  538. EXPORT_SYMBOL(__skb_get_hash);
  539. __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
  540. {
  541. struct flow_keys keys;
  542. return ___skb_get_hash(skb, &keys, perturb);
  543. }
  544. EXPORT_SYMBOL(skb_get_hash_perturb);
  545. u32 __skb_get_poff(const struct sk_buff *skb, void *data,
  546. const struct flow_keys *keys, int hlen)
  547. {
  548. u32 poff = keys->control.thoff;
  549. switch (keys->basic.ip_proto) {
  550. case IPPROTO_TCP: {
  551. /* access doff as u8 to avoid unaligned access */
  552. const u8 *doff;
  553. u8 _doff;
  554. doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
  555. data, hlen, &_doff);
  556. if (!doff)
  557. return poff;
  558. poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
  559. break;
  560. }
  561. case IPPROTO_UDP:
  562. case IPPROTO_UDPLITE:
  563. poff += sizeof(struct udphdr);
  564. break;
  565. /* For the rest, we do not really care about header
  566. * extensions at this point for now.
  567. */
  568. case IPPROTO_ICMP:
  569. poff += sizeof(struct icmphdr);
  570. break;
  571. case IPPROTO_ICMPV6:
  572. poff += sizeof(struct icmp6hdr);
  573. break;
  574. case IPPROTO_IGMP:
  575. poff += sizeof(struct igmphdr);
  576. break;
  577. case IPPROTO_DCCP:
  578. poff += sizeof(struct dccp_hdr);
  579. break;
  580. case IPPROTO_SCTP:
  581. poff += sizeof(struct sctphdr);
  582. break;
  583. }
  584. return poff;
  585. }
  586. /**
  587. * skb_get_poff - get the offset to the payload
  588. * @skb: sk_buff to get the payload offset from
  589. *
  590. * The function will get the offset to the payload as far as it could
  591. * be dissected. The main user is currently BPF, so that we can dynamically
  592. * truncate packets without needing to push actual payload to the user
  593. * space and can analyze headers only, instead.
  594. */
  595. u32 skb_get_poff(const struct sk_buff *skb)
  596. {
  597. struct flow_keys keys;
  598. if (!skb_flow_dissect_flow_keys(skb, &keys))
  599. return 0;
  600. return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
  601. }
  602. static const struct flow_dissector_key flow_keys_dissector_keys[] = {
  603. {
  604. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  605. .offset = offsetof(struct flow_keys, control),
  606. },
  607. {
  608. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  609. .offset = offsetof(struct flow_keys, basic),
  610. },
  611. {
  612. .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  613. .offset = offsetof(struct flow_keys, addrs.v4addrs),
  614. },
  615. {
  616. .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  617. .offset = offsetof(struct flow_keys, addrs.v6addrs),
  618. },
  619. {
  620. .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS,
  621. .offset = offsetof(struct flow_keys, addrs.tipcaddrs),
  622. },
  623. {
  624. .key_id = FLOW_DISSECTOR_KEY_PORTS,
  625. .offset = offsetof(struct flow_keys, ports),
  626. },
  627. {
  628. .key_id = FLOW_DISSECTOR_KEY_VLANID,
  629. .offset = offsetof(struct flow_keys, tags),
  630. },
  631. {
  632. .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
  633. .offset = offsetof(struct flow_keys, tags),
  634. },
  635. {
  636. .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
  637. .offset = offsetof(struct flow_keys, keyid),
  638. },
  639. };
  640. static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
  641. {
  642. .key_id = FLOW_DISSECTOR_KEY_CONTROL,
  643. .offset = offsetof(struct flow_keys, control),
  644. },
  645. {
  646. .key_id = FLOW_DISSECTOR_KEY_BASIC,
  647. .offset = offsetof(struct flow_keys, basic),
  648. },
  649. };
  650. struct flow_dissector flow_keys_dissector __read_mostly;
  651. EXPORT_SYMBOL(flow_keys_dissector);
  652. struct flow_dissector flow_keys_buf_dissector __read_mostly;
  653. static int __init init_default_flow_dissectors(void)
  654. {
  655. skb_flow_dissector_init(&flow_keys_dissector,
  656. flow_keys_dissector_keys,
  657. ARRAY_SIZE(flow_keys_dissector_keys));
  658. skb_flow_dissector_init(&flow_keys_buf_dissector,
  659. flow_keys_buf_dissector_keys,
  660. ARRAY_SIZE(flow_keys_buf_dissector_keys));
  661. return 0;
  662. }
  663. late_initcall_sync(init_default_flow_dissectors);