~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/xfrm/xfrm_device.c

Version: ~ [ linux-5.2-rc1 ] ~ [ linux-5.1.2 ] ~ [ linux-5.0.16 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.43 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.119 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.176 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.179 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.139 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.67 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * xfrm_device.c - IPsec device offloading code.
  3  *
  4  * Copyright (c) 2015 secunet Security Networks AG
  5  *
  6  * Author:
  7  * Steffen Klassert <steffen.klassert@secunet.com>
  8  *
  9  * This program is free software; you can redistribute it and/or
 10  * modify it under the terms of the GNU General Public License
 11  * as published by the Free Software Foundation; either version
 12  * 2 of the License, or (at your option) any later version.
 13  */
 14 
 15 #include <linux/errno.h>
 16 #include <linux/module.h>
 17 #include <linux/netdevice.h>
 18 #include <linux/skbuff.h>
 19 #include <linux/slab.h>
 20 #include <linux/spinlock.h>
 21 #include <net/dst.h>
 22 #include <net/xfrm.h>
 23 #include <linux/notifier.h>
 24 
 25 #ifdef CONFIG_XFRM_OFFLOAD
 26 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
 27                                   unsigned int hsize)
 28 {
 29         struct xfrm_offload *xo = xfrm_offload(skb);
 30 
 31         skb_reset_mac_len(skb);
 32         pskb_pull(skb, skb->mac_len + hsize + x->props.header_len);
 33 
 34         if (xo->flags & XFRM_GSO_SEGMENT) {
 35                 skb_reset_transport_header(skb);
 36                 skb->transport_header -= x->props.header_len;
 37         }
 38 }
 39 
 40 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
 41                                     unsigned int hsize)
 42 
 43 {
 44         struct xfrm_offload *xo = xfrm_offload(skb);
 45 
 46         if (xo->flags & XFRM_GSO_SEGMENT)
 47                 skb->transport_header = skb->network_header + hsize;
 48 
 49         skb_reset_mac_len(skb);
 50         pskb_pull(skb, skb->mac_len + x->props.header_len);
 51 }
 52 
 53 /* Adjust pointers into the packet when IPsec is done at layer2 */
 54 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
 55 {
 56         switch (x->outer_mode.encap) {
 57         case XFRM_MODE_TUNNEL:
 58                 if (x->outer_mode.family == AF_INET)
 59                         return __xfrm_mode_tunnel_prep(x, skb,
 60                                                        sizeof(struct iphdr));
 61                 if (x->outer_mode.family == AF_INET6)
 62                         return __xfrm_mode_tunnel_prep(x, skb,
 63                                                        sizeof(struct ipv6hdr));
 64                 break;
 65         case XFRM_MODE_TRANSPORT:
 66                 if (x->outer_mode.family == AF_INET)
 67                         return __xfrm_transport_prep(x, skb,
 68                                                      sizeof(struct iphdr));
 69                 if (x->outer_mode.family == AF_INET6)
 70                         return __xfrm_transport_prep(x, skb,
 71                                                      sizeof(struct ipv6hdr));
 72                 break;
 73         case XFRM_MODE_ROUTEOPTIMIZATION:
 74         case XFRM_MODE_IN_TRIGGER:
 75         case XFRM_MODE_BEET:
 76                 break;
 77         }
 78 }
 79 
 80 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
 81 {
 82         int err;
 83         unsigned long flags;
 84         struct xfrm_state *x;
 85         struct sk_buff *skb2;
 86         struct softnet_data *sd;
 87         netdev_features_t esp_features = features;
 88         struct xfrm_offload *xo = xfrm_offload(skb);
 89         struct sec_path *sp;
 90 
 91         if (!xo)
 92                 return skb;
 93 
 94         if (!(features & NETIF_F_HW_ESP))
 95                 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 96 
 97         sp = skb_sec_path(skb);
 98         x = sp->xvec[sp->len - 1];
 99         if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
100                 return skb;
101 
102         local_irq_save(flags);
103         sd = this_cpu_ptr(&softnet_data);
104         err = !skb_queue_empty(&sd->xfrm_backlog);
105         local_irq_restore(flags);
106 
107         if (err) {
108                 *again = true;
109                 return skb;
110         }
111 
112         if (skb_is_gso(skb)) {
113                 struct net_device *dev = skb->dev;
114 
115                 if (unlikely(x->xso.dev != dev)) {
116                         struct sk_buff *segs;
117 
118                         /* Packet got rerouted, fixup features and segment it. */
119                         esp_features = esp_features & ~(NETIF_F_HW_ESP
120                                                         | NETIF_F_GSO_ESP);
121 
122                         segs = skb_gso_segment(skb, esp_features);
123                         if (IS_ERR(segs)) {
124                                 kfree_skb(skb);
125                                 atomic_long_inc(&dev->tx_dropped);
126                                 return NULL;
127                         } else {
128                                 consume_skb(skb);
129                                 skb = segs;
130                         }
131                 }
132         }
133 
134         if (!skb->next) {
135                 esp_features |= skb->dev->gso_partial_features;
136                 xfrm_outer_mode_prep(x, skb);
137 
138                 xo->flags |= XFRM_DEV_RESUME;
139 
140                 err = x->type_offload->xmit(x, skb, esp_features);
141                 if (err) {
142                         if (err == -EINPROGRESS)
143                                 return NULL;
144 
145                         XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
146                         kfree_skb(skb);
147                         return NULL;
148                 }
149 
150                 skb_push(skb, skb->data - skb_mac_header(skb));
151 
152                 return skb;
153         }
154 
155         skb2 = skb;
156 
157         do {
158                 struct sk_buff *nskb = skb2->next;
159 
160                 esp_features |= skb->dev->gso_partial_features;
161                 skb_mark_not_on_list(skb2);
162 
163                 xo = xfrm_offload(skb2);
164                 xo->flags |= XFRM_DEV_RESUME;
165 
166                 xfrm_outer_mode_prep(x, skb2);
167 
168                 err = x->type_offload->xmit(x, skb2, esp_features);
169                 if (!err) {
170                         skb2->next = nskb;
171                 } else if (err != -EINPROGRESS) {
172                         XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
173                         skb2->next = nskb;
174                         kfree_skb_list(skb2);
175                         return NULL;
176                 } else {
177                         if (skb == skb2)
178                                 skb = nskb;
179 
180                         if (!skb)
181                                 return NULL;
182 
183                         goto skip_push;
184                 }
185 
186                 skb_push(skb2, skb2->data - skb_mac_header(skb2));
187 
188 skip_push:
189                 skb2 = nskb;
190         } while (skb2);
191 
192         return skb;
193 }
194 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
195 
196 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
197                        struct xfrm_user_offload *xuo)
198 {
199         int err;
200         struct dst_entry *dst;
201         struct net_device *dev;
202         struct xfrm_state_offload *xso = &x->xso;
203         xfrm_address_t *saddr;
204         xfrm_address_t *daddr;
205 
206         if (!x->type_offload)
207                 return -EINVAL;
208 
209         /* We don't yet support UDP encapsulation and TFC padding. */
210         if (x->encap || x->tfcpad)
211                 return -EINVAL;
212 
213         dev = dev_get_by_index(net, xuo->ifindex);
214         if (!dev) {
215                 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
216                         saddr = &x->props.saddr;
217                         daddr = &x->id.daddr;
218                 } else {
219                         saddr = &x->id.daddr;
220                         daddr = &x->props.saddr;
221                 }
222 
223                 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
224                                         x->props.family,
225                                         xfrm_smark_get(0, x));
226                 if (IS_ERR(dst))
227                         return 0;
228 
229                 dev = dst->dev;
230 
231                 dev_hold(dev);
232                 dst_release(dst);
233         }
234 
235         if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
236                 xso->dev = NULL;
237                 dev_put(dev);
238                 return 0;
239         }
240 
241         if (x->props.flags & XFRM_STATE_ESN &&
242             !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
243                 xso->dev = NULL;
244                 dev_put(dev);
245                 return -EINVAL;
246         }
247 
248         xso->dev = dev;
249         xso->num_exthdrs = 1;
250         xso->flags = xuo->flags;
251 
252         err = dev->xfrmdev_ops->xdo_dev_state_add(x);
253         if (err) {
254                 xso->num_exthdrs = 0;
255                 xso->flags = 0;
256                 xso->dev = NULL;
257                 dev_put(dev);
258 
259                 if (err != -EOPNOTSUPP)
260                         return err;
261         }
262 
263         return 0;
264 }
265 EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
266 
267 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
268 {
269         int mtu;
270         struct dst_entry *dst = skb_dst(skb);
271         struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
272         struct net_device *dev = x->xso.dev;
273 
274         if (!x->type_offload || x->encap)
275                 return false;
276 
277         if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
278             (!xdst->child->xfrm && x->type->get_mtu)) {
279                 mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
280 
281                 if (skb->len <= mtu)
282                         goto ok;
283 
284                 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
285                         goto ok;
286         }
287 
288         return false;
289 
290 ok:
291         if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
292                 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
293 
294         return true;
295 }
296 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
297 
298 void xfrm_dev_resume(struct sk_buff *skb)
299 {
300         struct net_device *dev = skb->dev;
301         int ret = NETDEV_TX_BUSY;
302         struct netdev_queue *txq;
303         struct softnet_data *sd;
304         unsigned long flags;
305 
306         rcu_read_lock();
307         txq = netdev_core_pick_tx(dev, skb, NULL);
308 
309         HARD_TX_LOCK(dev, txq, smp_processor_id());
310         if (!netif_xmit_frozen_or_stopped(txq))
311                 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
312         HARD_TX_UNLOCK(dev, txq);
313 
314         if (!dev_xmit_complete(ret)) {
315                 local_irq_save(flags);
316                 sd = this_cpu_ptr(&softnet_data);
317                 skb_queue_tail(&sd->xfrm_backlog, skb);
318                 raise_softirq_irqoff(NET_TX_SOFTIRQ);
319                 local_irq_restore(flags);
320         }
321         rcu_read_unlock();
322 }
323 EXPORT_SYMBOL_GPL(xfrm_dev_resume);
324 
325 void xfrm_dev_backlog(struct softnet_data *sd)
326 {
327         struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
328         struct sk_buff_head list;
329         struct sk_buff *skb;
330 
331         if (skb_queue_empty(xfrm_backlog))
332                 return;
333 
334         __skb_queue_head_init(&list);
335 
336         spin_lock(&xfrm_backlog->lock);
337         skb_queue_splice_init(xfrm_backlog, &list);
338         spin_unlock(&xfrm_backlog->lock);
339 
340         while (!skb_queue_empty(&list)) {
341                 skb = __skb_dequeue(&list);
342                 xfrm_dev_resume(skb);
343         }
344 
345 }
346 #endif
347 
348 static int xfrm_api_check(struct net_device *dev)
349 {
350 #ifdef CONFIG_XFRM_OFFLOAD
351         if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
352             !(dev->features & NETIF_F_HW_ESP))
353                 return NOTIFY_BAD;
354 
355         if ((dev->features & NETIF_F_HW_ESP) &&
356             (!(dev->xfrmdev_ops &&
357                dev->xfrmdev_ops->xdo_dev_state_add &&
358                dev->xfrmdev_ops->xdo_dev_state_delete)))
359                 return NOTIFY_BAD;
360 #else
361         if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
362                 return NOTIFY_BAD;
363 #endif
364 
365         return NOTIFY_DONE;
366 }
367 
368 static int xfrm_dev_register(struct net_device *dev)
369 {
370         return xfrm_api_check(dev);
371 }
372 
373 static int xfrm_dev_feat_change(struct net_device *dev)
374 {
375         return xfrm_api_check(dev);
376 }
377 
378 static int xfrm_dev_down(struct net_device *dev)
379 {
380         if (dev->features & NETIF_F_HW_ESP)
381                 xfrm_dev_state_flush(dev_net(dev), dev, true);
382 
383         return NOTIFY_DONE;
384 }
385 
386 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
387 {
388         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
389 
390         switch (event) {
391         case NETDEV_REGISTER:
392                 return xfrm_dev_register(dev);
393 
394         case NETDEV_FEAT_CHANGE:
395                 return xfrm_dev_feat_change(dev);
396 
397         case NETDEV_DOWN:
398                 return xfrm_dev_down(dev);
399         }
400         return NOTIFY_DONE;
401 }
402 
403 static struct notifier_block xfrm_dev_notifier = {
404         .notifier_call  = xfrm_dev_event,
405 };
406 
407 void __init xfrm_dev_init(void)
408 {
409         register_netdevice_notifier(&xfrm_dev_notifier);
410 }
411 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp