nft_payload.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /*
  2. * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Development of this code funded by Astaro AG (http://www.astaro.com/)
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/if_vlan.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/netlink.h>
  15. #include <linux/netfilter.h>
  16. #include <linux/netfilter/nf_tables.h>
  17. #include <net/netfilter/nf_tables_core.h>
  18. #include <net/netfilter/nf_tables.h>
  19. /* add vlan header into the user buffer for if tag was removed by offloads */
  20. static bool
  21. nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
  22. {
  23. int mac_off = skb_mac_header(skb) - skb->data;
  24. u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
  25. struct vlan_ethhdr veth;
  26. vlanh = (u8 *) &veth;
  27. if (offset < ETH_HLEN) {
  28. u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
  29. if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
  30. return false;
  31. veth.h_vlan_proto = skb->vlan_proto;
  32. memcpy(dst_u8, vlanh + offset, ethlen);
  33. len -= ethlen;
  34. if (len == 0)
  35. return true;
  36. dst_u8 += ethlen;
  37. offset = ETH_HLEN;
  38. } else if (offset >= VLAN_ETH_HLEN) {
  39. offset -= VLAN_HLEN;
  40. goto skip;
  41. }
  42. veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
  43. veth.h_vlan_encapsulated_proto = skb->protocol;
  44. vlanh += offset;
  45. vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
  46. memcpy(dst_u8, vlanh, vlan_len);
  47. len -= vlan_len;
  48. if (!len)
  49. return true;
  50. dst_u8 += vlan_len;
  51. skip:
  52. return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
  53. }
  54. static void nft_payload_eval(const struct nft_expr *expr,
  55. struct nft_regs *regs,
  56. const struct nft_pktinfo *pkt)
  57. {
  58. const struct nft_payload *priv = nft_expr_priv(expr);
  59. const struct sk_buff *skb = pkt->skb;
  60. u32 *dest = &regs->data[priv->dreg];
  61. int offset;
  62. dest[priv->len / NFT_REG32_SIZE] = 0;
  63. switch (priv->base) {
  64. case NFT_PAYLOAD_LL_HEADER:
  65. if (!skb_mac_header_was_set(skb))
  66. goto err;
  67. if (skb_vlan_tag_present(skb)) {
  68. if (!nft_payload_copy_vlan(dest, skb,
  69. priv->offset, priv->len))
  70. goto err;
  71. return;
  72. }
  73. offset = skb_mac_header(skb) - skb->data;
  74. break;
  75. case NFT_PAYLOAD_NETWORK_HEADER:
  76. offset = skb_network_offset(skb);
  77. break;
  78. case NFT_PAYLOAD_TRANSPORT_HEADER:
  79. if (!pkt->tprot_set)
  80. goto err;
  81. offset = pkt->xt.thoff;
  82. break;
  83. default:
  84. BUG();
  85. }
  86. offset += priv->offset;
  87. if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
  88. goto err;
  89. return;
  90. err:
  91. regs->verdict.code = NFT_BREAK;
  92. }
  93. static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
  94. [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
  95. [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
  96. [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
  97. [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
  98. [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
  99. [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
  100. [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
  101. };
  102. static int nft_payload_init(const struct nft_ctx *ctx,
  103. const struct nft_expr *expr,
  104. const struct nlattr * const tb[])
  105. {
  106. struct nft_payload *priv = nft_expr_priv(expr);
  107. priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
  108. priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
  109. priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
  110. priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
  111. return nft_validate_register_store(ctx, priv->dreg, NULL,
  112. NFT_DATA_VALUE, priv->len);
  113. }
  114. static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
  115. {
  116. const struct nft_payload *priv = nft_expr_priv(expr);
  117. if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
  118. nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
  119. nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
  120. nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
  121. goto nla_put_failure;
  122. return 0;
  123. nla_put_failure:
  124. return -1;
  125. }
  126. static struct nft_expr_type nft_payload_type;
  127. static const struct nft_expr_ops nft_payload_ops = {
  128. .type = &nft_payload_type,
  129. .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
  130. .eval = nft_payload_eval,
  131. .init = nft_payload_init,
  132. .dump = nft_payload_dump,
  133. };
  134. const struct nft_expr_ops nft_payload_fast_ops = {
  135. .type = &nft_payload_type,
  136. .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
  137. .eval = nft_payload_eval,
  138. .init = nft_payload_init,
  139. .dump = nft_payload_dump,
  140. };
  141. static void nft_payload_set_eval(const struct nft_expr *expr,
  142. struct nft_regs *regs,
  143. const struct nft_pktinfo *pkt)
  144. {
  145. const struct nft_payload_set *priv = nft_expr_priv(expr);
  146. struct sk_buff *skb = pkt->skb;
  147. const u32 *src = &regs->data[priv->sreg];
  148. int offset, csum_offset;
  149. __wsum fsum, tsum;
  150. __sum16 sum;
  151. switch (priv->base) {
  152. case NFT_PAYLOAD_LL_HEADER:
  153. if (!skb_mac_header_was_set(skb))
  154. goto err;
  155. offset = skb_mac_header(skb) - skb->data;
  156. break;
  157. case NFT_PAYLOAD_NETWORK_HEADER:
  158. offset = skb_network_offset(skb);
  159. break;
  160. case NFT_PAYLOAD_TRANSPORT_HEADER:
  161. if (!pkt->tprot_set)
  162. goto err;
  163. offset = pkt->xt.thoff;
  164. break;
  165. default:
  166. BUG();
  167. }
  168. csum_offset = offset + priv->csum_offset;
  169. offset += priv->offset;
  170. if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
  171. (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
  172. skb->ip_summed != CHECKSUM_PARTIAL)) {
  173. if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
  174. goto err;
  175. fsum = skb_checksum(skb, offset, priv->len, 0);
  176. tsum = csum_partial(src, priv->len, 0);
  177. sum = csum_fold(csum_add(csum_sub(~csum_unfold(sum), fsum),
  178. tsum));
  179. if (sum == 0)
  180. sum = CSUM_MANGLED_0;
  181. if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
  182. skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
  183. goto err;
  184. }
  185. if (!skb_make_writable(skb, max(offset + priv->len, 0)) ||
  186. skb_store_bits(skb, offset, src, priv->len) < 0)
  187. goto err;
  188. return;
  189. err:
  190. regs->verdict.code = NFT_BREAK;
  191. }
  192. static int nft_payload_set_init(const struct nft_ctx *ctx,
  193. const struct nft_expr *expr,
  194. const struct nlattr * const tb[])
  195. {
  196. struct nft_payload_set *priv = nft_expr_priv(expr);
  197. priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
  198. priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
  199. priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
  200. priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
  201. if (tb[NFTA_PAYLOAD_CSUM_TYPE])
  202. priv->csum_type =
  203. ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
  204. if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
  205. priv->csum_offset =
  206. ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
  207. switch (priv->csum_type) {
  208. case NFT_PAYLOAD_CSUM_NONE:
  209. case NFT_PAYLOAD_CSUM_INET:
  210. break;
  211. default:
  212. return -EOPNOTSUPP;
  213. }
  214. return nft_validate_register_load(priv->sreg, priv->len);
  215. }
  216. static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
  217. {
  218. const struct nft_payload_set *priv = nft_expr_priv(expr);
  219. if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
  220. nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
  221. nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
  222. nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
  223. nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
  224. nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
  225. htonl(priv->csum_offset)))
  226. goto nla_put_failure;
  227. return 0;
  228. nla_put_failure:
  229. return -1;
  230. }
  231. static const struct nft_expr_ops nft_payload_set_ops = {
  232. .type = &nft_payload_type,
  233. .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
  234. .eval = nft_payload_set_eval,
  235. .init = nft_payload_set_init,
  236. .dump = nft_payload_set_dump,
  237. };
  238. static const struct nft_expr_ops *
  239. nft_payload_select_ops(const struct nft_ctx *ctx,
  240. const struct nlattr * const tb[])
  241. {
  242. enum nft_payload_bases base;
  243. unsigned int offset, len;
  244. if (tb[NFTA_PAYLOAD_BASE] == NULL ||
  245. tb[NFTA_PAYLOAD_OFFSET] == NULL ||
  246. tb[NFTA_PAYLOAD_LEN] == NULL)
  247. return ERR_PTR(-EINVAL);
  248. base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
  249. switch (base) {
  250. case NFT_PAYLOAD_LL_HEADER:
  251. case NFT_PAYLOAD_NETWORK_HEADER:
  252. case NFT_PAYLOAD_TRANSPORT_HEADER:
  253. break;
  254. default:
  255. return ERR_PTR(-EOPNOTSUPP);
  256. }
  257. if (tb[NFTA_PAYLOAD_SREG] != NULL) {
  258. if (tb[NFTA_PAYLOAD_DREG] != NULL)
  259. return ERR_PTR(-EINVAL);
  260. return &nft_payload_set_ops;
  261. }
  262. if (tb[NFTA_PAYLOAD_DREG] == NULL)
  263. return ERR_PTR(-EINVAL);
  264. offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
  265. len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
  266. if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
  267. base != NFT_PAYLOAD_LL_HEADER)
  268. return &nft_payload_fast_ops;
  269. else
  270. return &nft_payload_ops;
  271. }
  272. static struct nft_expr_type nft_payload_type __read_mostly = {
  273. .name = "payload",
  274. .select_ops = nft_payload_select_ops,
  275. .policy = nft_payload_policy,
  276. .maxattr = NFTA_PAYLOAD_MAX,
  277. .owner = THIS_MODULE,
  278. };
  279. int __init nft_payload_module_init(void)
  280. {
  281. return nft_register_expr(&nft_payload_type);
  282. }
  283. void nft_payload_module_exit(void)
  284. {
  285. nft_unregister_expr(&nft_payload_type);
  286. }