~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/netfilter/nft_payload.c

Version: ~ [ linux-5.18-rc6 ] ~ [ linux-5.17.6 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.38 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.114 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.192 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.241 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.277 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.312 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
  4  * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
  5  *
  6  * Development of this code funded by Astaro AG (http://www.astaro.com/)
  7  */
  8 
  9 #include <linux/kernel.h>
 10 #include <linux/if_vlan.h>
 11 #include <linux/init.h>
 12 #include <linux/module.h>
 13 #include <linux/netlink.h>
 14 #include <linux/netfilter.h>
 15 #include <linux/netfilter/nf_tables.h>
 16 #include <net/netfilter/nf_tables_core.h>
 17 #include <net/netfilter/nf_tables.h>
 18 #include <net/netfilter/nf_tables_offload.h>
 19 /* For layer 4 checksum field offset. */
 20 #include <linux/tcp.h>
 21 #include <linux/udp.h>
 22 #include <linux/icmpv6.h>
 23 #include <linux/ip.h>
 24 #include <linux/ipv6.h>
 25 
 26 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
 27                                          struct vlan_ethhdr *veth)
 28 {
 29         if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
 30                 return false;
 31 
 32         veth->h_vlan_proto = skb->vlan_proto;
 33         veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
 34         veth->h_vlan_encapsulated_proto = skb->protocol;
 35 
 36         return true;
 37 }
 38 
 39 /* add vlan header into the user buffer for if tag was removed by offloads */
 40 static bool
 41 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
 42 {
 43         int mac_off = skb_mac_header(skb) - skb->data;
 44         u8 *vlanh, *dst_u8 = (u8 *) d;
 45         struct vlan_ethhdr veth;
 46         u8 vlan_hlen = 0;
 47 
 48         if ((skb->protocol == htons(ETH_P_8021AD) ||
 49              skb->protocol == htons(ETH_P_8021Q)) &&
 50             offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
 51                 vlan_hlen += VLAN_HLEN;
 52 
 53         vlanh = (u8 *) &veth;
 54         if (offset < VLAN_ETH_HLEN + vlan_hlen) {
 55                 u8 ethlen = len;
 56 
 57                 if (vlan_hlen &&
 58                     skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
 59                         return false;
 60                 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
 61                         return false;
 62 
 63                 if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
 64                         ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
 65 
 66                 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
 67 
 68                 len -= ethlen;
 69                 if (len == 0)
 70                         return true;
 71 
 72                 dst_u8 += ethlen;
 73                 offset = ETH_HLEN + vlan_hlen;
 74         } else {
 75                 offset -= VLAN_HLEN + vlan_hlen;
 76         }
 77 
 78         return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
 79 }
 80 
 81 void nft_payload_eval(const struct nft_expr *expr,
 82                       struct nft_regs *regs,
 83                       const struct nft_pktinfo *pkt)
 84 {
 85         const struct nft_payload *priv = nft_expr_priv(expr);
 86         const struct sk_buff *skb = pkt->skb;
 87         u32 *dest = &regs->data[priv->dreg];
 88         int offset;
 89 
 90         if (priv->len % NFT_REG32_SIZE)
 91                 dest[priv->len / NFT_REG32_SIZE] = 0;
 92 
 93         switch (priv->base) {
 94         case NFT_PAYLOAD_LL_HEADER:
 95                 if (!skb_mac_header_was_set(skb))
 96                         goto err;
 97 
 98                 if (skb_vlan_tag_present(skb)) {
 99                         if (!nft_payload_copy_vlan(dest, skb,
100                                                    priv->offset, priv->len))
101                                 goto err;
102                         return;
103                 }
104                 offset = skb_mac_header(skb) - skb->data;
105                 break;
106         case NFT_PAYLOAD_NETWORK_HEADER:
107                 offset = skb_network_offset(skb);
108                 break;
109         case NFT_PAYLOAD_TRANSPORT_HEADER:
110                 if (!pkt->tprot_set)
111                         goto err;
112                 offset = pkt->xt.thoff;
113                 break;
114         default:
115                 BUG();
116         }
117         offset += priv->offset;
118 
119         if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
120                 goto err;
121         return;
122 err:
123         regs->verdict.code = NFT_BREAK;
124 }
125 
126 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
127         [NFTA_PAYLOAD_SREG]             = { .type = NLA_U32 },
128         [NFTA_PAYLOAD_DREG]             = { .type = NLA_U32 },
129         [NFTA_PAYLOAD_BASE]             = { .type = NLA_U32 },
130         [NFTA_PAYLOAD_OFFSET]           = { .type = NLA_U32 },
131         [NFTA_PAYLOAD_LEN]              = { .type = NLA_U32 },
132         [NFTA_PAYLOAD_CSUM_TYPE]        = { .type = NLA_U32 },
133         [NFTA_PAYLOAD_CSUM_OFFSET]      = { .type = NLA_U32 },
134         [NFTA_PAYLOAD_CSUM_FLAGS]       = { .type = NLA_U32 },
135 };
136 
137 static int nft_payload_init(const struct nft_ctx *ctx,
138                             const struct nft_expr *expr,
139                             const struct nlattr * const tb[])
140 {
141         struct nft_payload *priv = nft_expr_priv(expr);
142 
143         priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
144         priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
145         priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
146         priv->dreg   = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
147 
148         return nft_validate_register_store(ctx, priv->dreg, NULL,
149                                            NFT_DATA_VALUE, priv->len);
150 }
151 
152 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
153 {
154         const struct nft_payload *priv = nft_expr_priv(expr);
155 
156         if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
157             nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
158             nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
159             nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
160                 goto nla_put_failure;
161         return 0;
162 
163 nla_put_failure:
164         return -1;
165 }
166 
167 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
168                                   struct nft_flow_rule *flow,
169                                   const struct nft_payload *priv)
170 {
171         struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
172 
173         switch (priv->offset) {
174         case offsetof(struct ethhdr, h_source):
175                 if (priv->len != ETH_ALEN)
176                         return -EOPNOTSUPP;
177 
178                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
179                                   src, ETH_ALEN, reg);
180                 break;
181         case offsetof(struct ethhdr, h_dest):
182                 if (priv->len != ETH_ALEN)
183                         return -EOPNOTSUPP;
184 
185                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
186                                   dst, ETH_ALEN, reg);
187                 break;
188         case offsetof(struct ethhdr, h_proto):
189                 if (priv->len != sizeof(__be16))
190                         return -EOPNOTSUPP;
191 
192                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
193                                   n_proto, sizeof(__be16), reg);
194                 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
195                 break;
196         case offsetof(struct vlan_ethhdr, h_vlan_TCI):
197                 if (priv->len != sizeof(__be16))
198                         return -EOPNOTSUPP;
199 
200                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
201                                   vlan_tci, sizeof(__be16), reg);
202                 break;
203         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
204                 if (priv->len != sizeof(__be16))
205                         return -EOPNOTSUPP;
206 
207                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
208                                   vlan_tpid, sizeof(__be16), reg);
209                 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
210                 break;
211         case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
212                 if (priv->len != sizeof(__be16))
213                         return -EOPNOTSUPP;
214 
215                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
216                                   vlan_tci, sizeof(__be16), reg);
217                 break;
218         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
219                                                         sizeof(struct vlan_hdr):
220                 if (priv->len != sizeof(__be16))
221                         return -EOPNOTSUPP;
222 
223                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
224                                   vlan_tpid, sizeof(__be16), reg);
225                 break;
226         default:
227                 return -EOPNOTSUPP;
228         }
229 
230         return 0;
231 }
232 
233 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
234                                   struct nft_flow_rule *flow,
235                                   const struct nft_payload *priv)
236 {
237         struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
238 
239         switch (priv->offset) {
240         case offsetof(struct iphdr, saddr):
241                 if (priv->len != sizeof(struct in_addr))
242                         return -EOPNOTSUPP;
243 
244                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
245                                   sizeof(struct in_addr), reg);
246                 break;
247         case offsetof(struct iphdr, daddr):
248                 if (priv->len != sizeof(struct in_addr))
249                         return -EOPNOTSUPP;
250 
251                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
252                                   sizeof(struct in_addr), reg);
253                 break;
254         case offsetof(struct iphdr, protocol):
255                 if (priv->len != sizeof(__u8))
256                         return -EOPNOTSUPP;
257 
258                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
259                                   sizeof(__u8), reg);
260                 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
261                 break;
262         default:
263                 return -EOPNOTSUPP;
264         }
265 
266         return 0;
267 }
268 
269 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
270                                   struct nft_flow_rule *flow,
271                                   const struct nft_payload *priv)
272 {
273         struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
274 
275         switch (priv->offset) {
276         case offsetof(struct ipv6hdr, saddr):
277                 if (priv->len != sizeof(struct in6_addr))
278                         return -EOPNOTSUPP;
279 
280                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
281                                   sizeof(struct in6_addr), reg);
282                 break;
283         case offsetof(struct ipv6hdr, daddr):
284                 if (priv->len != sizeof(struct in6_addr))
285                         return -EOPNOTSUPP;
286 
287                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
288                                   sizeof(struct in6_addr), reg);
289                 break;
290         case offsetof(struct ipv6hdr, nexthdr):
291                 if (priv->len != sizeof(__u8))
292                         return -EOPNOTSUPP;
293 
294                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
295                                   sizeof(__u8), reg);
296                 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
297                 break;
298         default:
299                 return -EOPNOTSUPP;
300         }
301 
302         return 0;
303 }
304 
305 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
306                                   struct nft_flow_rule *flow,
307                                   const struct nft_payload *priv)
308 {
309         int err;
310 
311         switch (ctx->dep.l3num) {
312         case htons(ETH_P_IP):
313                 err = nft_payload_offload_ip(ctx, flow, priv);
314                 break;
315         case htons(ETH_P_IPV6):
316                 err = nft_payload_offload_ip6(ctx, flow, priv);
317                 break;
318         default:
319                 return -EOPNOTSUPP;
320         }
321 
322         return err;
323 }
324 
325 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
326                                    struct nft_flow_rule *flow,
327                                    const struct nft_payload *priv)
328 {
329         struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
330 
331         switch (priv->offset) {
332         case offsetof(struct tcphdr, source):
333                 if (priv->len != sizeof(__be16))
334                         return -EOPNOTSUPP;
335 
336                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
337                                   sizeof(__be16), reg);
338                 break;
339         case offsetof(struct tcphdr, dest):
340                 if (priv->len != sizeof(__be16))
341                         return -EOPNOTSUPP;
342 
343                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
344                                   sizeof(__be16), reg);
345                 break;
346         default:
347                 return -EOPNOTSUPP;
348         }
349 
350         return 0;
351 }
352 
353 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
354                                    struct nft_flow_rule *flow,
355                                    const struct nft_payload *priv)
356 {
357         struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
358 
359         switch (priv->offset) {
360         case offsetof(struct udphdr, source):
361                 if (priv->len != sizeof(__be16))
362                         return -EOPNOTSUPP;
363 
364                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
365                                   sizeof(__be16), reg);
366                 break;
367         case offsetof(struct udphdr, dest):
368                 if (priv->len != sizeof(__be16))
369                         return -EOPNOTSUPP;
370 
371                 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
372                                   sizeof(__be16), reg);
373                 break;
374         default:
375                 return -EOPNOTSUPP;
376         }
377 
378         return 0;
379 }
380 
381 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
382                                   struct nft_flow_rule *flow,
383                                   const struct nft_payload *priv)
384 {
385         int err;
386 
387         switch (ctx->dep.protonum) {
388         case IPPROTO_TCP:
389                 err = nft_payload_offload_tcp(ctx, flow, priv);
390                 break;
391         case IPPROTO_UDP:
392                 err = nft_payload_offload_udp(ctx, flow, priv);
393                 break;
394         default:
395                 return -EOPNOTSUPP;
396         }
397 
398         return err;
399 }
400 
401 static int nft_payload_offload(struct nft_offload_ctx *ctx,
402                                struct nft_flow_rule *flow,
403                                const struct nft_expr *expr)
404 {
405         const struct nft_payload *priv = nft_expr_priv(expr);
406         int err;
407 
408         switch (priv->base) {
409         case NFT_PAYLOAD_LL_HEADER:
410                 err = nft_payload_offload_ll(ctx, flow, priv);
411                 break;
412         case NFT_PAYLOAD_NETWORK_HEADER:
413                 err = nft_payload_offload_nh(ctx, flow, priv);
414                 break;
415         case NFT_PAYLOAD_TRANSPORT_HEADER:
416                 err = nft_payload_offload_th(ctx, flow, priv);
417                 break;
418         default:
419                 err = -EOPNOTSUPP;
420                 break;
421         }
422         return err;
423 }
424 
425 static const struct nft_expr_ops nft_payload_ops = {
426         .type           = &nft_payload_type,
427         .size           = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
428         .eval           = nft_payload_eval,
429         .init           = nft_payload_init,
430         .dump           = nft_payload_dump,
431         .offload        = nft_payload_offload,
432 };
433 
434 const struct nft_expr_ops nft_payload_fast_ops = {
435         .type           = &nft_payload_type,
436         .size           = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
437         .eval           = nft_payload_eval,
438         .init           = nft_payload_init,
439         .dump           = nft_payload_dump,
440         .offload        = nft_payload_offload,
441 };
442 
443 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
444 {
445         *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
446         if (*sum == 0)
447                 *sum = CSUM_MANGLED_0;
448 }
449 
450 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
451 {
452         struct udphdr *uh, _uh;
453 
454         uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
455         if (!uh)
456                 return false;
457 
458         return (__force bool)uh->check;
459 }
460 
461 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
462                                      struct sk_buff *skb,
463                                      unsigned int *l4csum_offset)
464 {
465         switch (pkt->tprot) {
466         case IPPROTO_TCP:
467                 *l4csum_offset = offsetof(struct tcphdr, check);
468                 break;
469         case IPPROTO_UDP:
470                 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
471                         return -1;
472                 /* Fall through. */
473         case IPPROTO_UDPLITE:
474                 *l4csum_offset = offsetof(struct udphdr, check);
475                 break;
476         case IPPROTO_ICMPV6:
477                 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
478                 break;
479         default:
480                 return -1;
481         }
482 
483         *l4csum_offset += pkt->xt.thoff;
484         return 0;
485 }
486 
487 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
488                                      struct sk_buff *skb,
489                                      __wsum fsum, __wsum tsum)
490 {
491         int l4csum_offset;
492         __sum16 sum;
493 
494         /* If we cannot determine layer 4 checksum offset or this packet doesn't
495          * require layer 4 checksum recalculation, skip this packet.
496          */
497         if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
498                 return 0;
499 
500         if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
501                 return -1;
502 
503         /* Checksum mangling for an arbitrary amount of bytes, based on
504          * inet_proto_csum_replace*() functions.
505          */
506         if (skb->ip_summed != CHECKSUM_PARTIAL) {
507                 nft_csum_replace(&sum, fsum, tsum);
508                 if (skb->ip_summed == CHECKSUM_COMPLETE) {
509                         skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
510                                               tsum);
511                 }
512         } else {
513                 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
514                                           tsum));
515         }
516 
517         if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
518             skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
519                 return -1;
520 
521         return 0;
522 }
523 
524 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
525                                  __wsum fsum, __wsum tsum, int csum_offset)
526 {
527         __sum16 sum;
528 
529         if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
530                 return -1;
531 
532         nft_csum_replace(&sum, fsum, tsum);
533         if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
534             skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
535                 return -1;
536 
537         return 0;
538 }
539 
540 static void nft_payload_set_eval(const struct nft_expr *expr,
541                                  struct nft_regs *regs,
542                                  const struct nft_pktinfo *pkt)
543 {
544         const struct nft_payload_set *priv = nft_expr_priv(expr);
545         struct sk_buff *skb = pkt->skb;
546         const u32 *src = &regs->data[priv->sreg];
547         int offset, csum_offset;
548         __wsum fsum, tsum;
549 
550         switch (priv->base) {
551         case NFT_PAYLOAD_LL_HEADER:
552                 if (!skb_mac_header_was_set(skb))
553                         goto err;
554                 offset = skb_mac_header(skb) - skb->data;
555                 break;
556         case NFT_PAYLOAD_NETWORK_HEADER:
557                 offset = skb_network_offset(skb);
558                 break;
559         case NFT_PAYLOAD_TRANSPORT_HEADER:
560                 if (!pkt->tprot_set)
561                         goto err;
562                 offset = pkt->xt.thoff;
563                 break;
564         default:
565                 BUG();
566         }
567 
568         csum_offset = offset + priv->csum_offset;
569         offset += priv->offset;
570 
571         if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
572             (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
573              skb->ip_summed != CHECKSUM_PARTIAL)) {
574                 fsum = skb_checksum(skb, offset, priv->len, 0);
575                 tsum = csum_partial(src, priv->len, 0);
576 
577                 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
578                     nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
579                         goto err;
580 
581                 if (priv->csum_flags &&
582                     nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
583                         goto err;
584         }
585 
586         if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
587             skb_store_bits(skb, offset, src, priv->len) < 0)
588                 goto err;
589 
590         return;
591 err:
592         regs->verdict.code = NFT_BREAK;
593 }
594 
595 static int nft_payload_set_init(const struct nft_ctx *ctx,
596                                 const struct nft_expr *expr,
597                                 const struct nlattr * const tb[])
598 {
599         struct nft_payload_set *priv = nft_expr_priv(expr);
600 
601         priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
602         priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
603         priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
604         priv->sreg        = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
605 
606         if (tb[NFTA_PAYLOAD_CSUM_TYPE])
607                 priv->csum_type =
608                         ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
609         if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
610                 priv->csum_offset =
611                         ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
612         if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
613                 u32 flags;
614 
615                 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
616                 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
617                         return -EINVAL;
618 
619                 priv->csum_flags = flags;
620         }
621 
622         switch (priv->csum_type) {
623         case NFT_PAYLOAD_CSUM_NONE:
624         case NFT_PAYLOAD_CSUM_INET:
625                 break;
626         default:
627                 return -EOPNOTSUPP;
628         }
629 
630         return nft_validate_register_load(priv->sreg, priv->len);
631 }
632 
633 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
634 {
635         const struct nft_payload_set *priv = nft_expr_priv(expr);
636 
637         if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
638             nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
639             nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
640             nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
641             nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
642             nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
643                          htonl(priv->csum_offset)) ||
644             nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
645                 goto nla_put_failure;
646         return 0;
647 
648 nla_put_failure:
649         return -1;
650 }
651 
652 static const struct nft_expr_ops nft_payload_set_ops = {
653         .type           = &nft_payload_type,
654         .size           = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
655         .eval           = nft_payload_set_eval,
656         .init           = nft_payload_set_init,
657         .dump           = nft_payload_set_dump,
658 };
659 
660 static const struct nft_expr_ops *
661 nft_payload_select_ops(const struct nft_ctx *ctx,
662                        const struct nlattr * const tb[])
663 {
664         enum nft_payload_bases base;
665         unsigned int offset, len;
666 
667         if (tb[NFTA_PAYLOAD_BASE] == NULL ||
668             tb[NFTA_PAYLOAD_OFFSET] == NULL ||
669             tb[NFTA_PAYLOAD_LEN] == NULL)
670                 return ERR_PTR(-EINVAL);
671 
672         base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
673         switch (base) {
674         case NFT_PAYLOAD_LL_HEADER:
675         case NFT_PAYLOAD_NETWORK_HEADER:
676         case NFT_PAYLOAD_TRANSPORT_HEADER:
677                 break;
678         default:
679                 return ERR_PTR(-EOPNOTSUPP);
680         }
681 
682         if (tb[NFTA_PAYLOAD_SREG] != NULL) {
683                 if (tb[NFTA_PAYLOAD_DREG] != NULL)
684                         return ERR_PTR(-EINVAL);
685                 return &nft_payload_set_ops;
686         }
687 
688         if (tb[NFTA_PAYLOAD_DREG] == NULL)
689                 return ERR_PTR(-EINVAL);
690 
691         offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
692         len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
693 
694         if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
695             base != NFT_PAYLOAD_LL_HEADER)
696                 return &nft_payload_fast_ops;
697         else
698                 return &nft_payload_ops;
699 }
700 
701 struct nft_expr_type nft_payload_type __read_mostly = {
702         .name           = "payload",
703         .select_ops     = nft_payload_select_ops,
704         .policy         = nft_payload_policy,
705         .maxattr        = NFTA_PAYLOAD_MAX,
706         .owner          = THIS_MODULE,
707 };
708 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp