~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bpf/test_run.c

Version: ~ [ linux-5.11 ] ~ [ linux-5.10.17 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.99 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.176 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.221 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.257 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.257 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /* Copyright (c) 2017 Facebook
  3  */
  4 #include <linux/bpf.h>
  5 #include <linux/slab.h>
  6 #include <linux/vmalloc.h>
  7 #include <linux/etherdevice.h>
  8 #include <linux/filter.h>
  9 #include <linux/sched/signal.h>
 10 #include <net/bpf_sk_storage.h>
 11 #include <net/sock.h>
 12 #include <net/tcp.h>
 13 
 14 #define CREATE_TRACE_POINTS
 15 #include <trace/events/bpf_test_run.h>
 16 
 17 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
 18                         u32 *retval, u32 *time)
 19 {
 20         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
 21         enum bpf_cgroup_storage_type stype;
 22         u64 time_start, time_spent = 0;
 23         int ret = 0;
 24         u32 i;
 25 
 26         for_each_cgroup_storage_type(stype) {
 27                 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
 28                 if (IS_ERR(storage[stype])) {
 29                         storage[stype] = NULL;
 30                         for_each_cgroup_storage_type(stype)
 31                                 bpf_cgroup_storage_free(storage[stype]);
 32                         return -ENOMEM;
 33                 }
 34         }
 35 
 36         if (!repeat)
 37                 repeat = 1;
 38 
 39         rcu_read_lock();
 40         preempt_disable();
 41         time_start = ktime_get_ns();
 42         for (i = 0; i < repeat; i++) {
 43                 bpf_cgroup_storage_set(storage);
 44                 *retval = BPF_PROG_RUN(prog, ctx);
 45 
 46                 if (signal_pending(current)) {
 47                         ret = -EINTR;
 48                         break;
 49                 }
 50 
 51                 if (need_resched()) {
 52                         time_spent += ktime_get_ns() - time_start;
 53                         preempt_enable();
 54                         rcu_read_unlock();
 55 
 56                         cond_resched();
 57 
 58                         rcu_read_lock();
 59                         preempt_disable();
 60                         time_start = ktime_get_ns();
 61                 }
 62         }
 63         time_spent += ktime_get_ns() - time_start;
 64         preempt_enable();
 65         rcu_read_unlock();
 66 
 67         do_div(time_spent, repeat);
 68         *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
 69 
 70         for_each_cgroup_storage_type(stype)
 71                 bpf_cgroup_storage_free(storage[stype]);
 72 
 73         return ret;
 74 }
 75 
 76 static int bpf_test_finish(const union bpf_attr *kattr,
 77                            union bpf_attr __user *uattr, const void *data,
 78                            u32 size, u32 retval, u32 duration)
 79 {
 80         void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
 81         int err = -EFAULT;
 82         u32 copy_size = size;
 83 
 84         /* Clamp copy if the user has provided a size hint, but copy the full
 85          * buffer if not to retain old behaviour.
 86          */
 87         if (kattr->test.data_size_out &&
 88             copy_size > kattr->test.data_size_out) {
 89                 copy_size = kattr->test.data_size_out;
 90                 err = -ENOSPC;
 91         }
 92 
 93         if (data_out && copy_to_user(data_out, data, copy_size))
 94                 goto out;
 95         if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
 96                 goto out;
 97         if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
 98                 goto out;
 99         if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
100                 goto out;
101         if (err != -ENOSPC)
102                 err = 0;
103 out:
104         trace_bpf_test_finish(&err);
105         return err;
106 }
107 
108 /* Integer types of various sizes and pointer combinations cover variety of
109  * architecture dependent calling conventions. 7+ can be supported in the
110  * future.
111  */
112 int noinline bpf_fentry_test1(int a)
113 {
114         return a + 1;
115 }
116 
117 int noinline bpf_fentry_test2(int a, u64 b)
118 {
119         return a + b;
120 }
121 
122 int noinline bpf_fentry_test3(char a, int b, u64 c)
123 {
124         return a + b + c;
125 }
126 
127 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
128 {
129         return (long)a + b + c + d;
130 }
131 
132 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
133 {
134         return a + (long)b + c + d + e;
135 }
136 
137 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
138 {
139         return a + (long)b + c + d + (long)e + f;
140 }
141 
142 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
143                            u32 headroom, u32 tailroom)
144 {
145         void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
146         void *data;
147 
148         if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
149                 return ERR_PTR(-EINVAL);
150 
151         data = kzalloc(size + headroom + tailroom, GFP_USER);
152         if (!data)
153                 return ERR_PTR(-ENOMEM);
154 
155         if (copy_from_user(data + headroom, data_in, size)) {
156                 kfree(data);
157                 return ERR_PTR(-EFAULT);
158         }
159         if (bpf_fentry_test1(1) != 2 ||
160             bpf_fentry_test2(2, 3) != 5 ||
161             bpf_fentry_test3(4, 5, 6) != 15 ||
162             bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
163             bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
164             bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111) {
165                 kfree(data);
166                 return ERR_PTR(-EFAULT);
167         }
168         return data;
169 }
170 
171 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
172 {
173         void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
174         void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
175         u32 size = kattr->test.ctx_size_in;
176         void *data;
177         int err;
178 
179         if (!data_in && !data_out)
180                 return NULL;
181 
182         data = kzalloc(max_size, GFP_USER);
183         if (!data)
184                 return ERR_PTR(-ENOMEM);
185 
186         if (data_in) {
187                 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
188                 if (err) {
189                         kfree(data);
190                         return ERR_PTR(err);
191                 }
192 
193                 size = min_t(u32, max_size, size);
194                 if (copy_from_user(data, data_in, size)) {
195                         kfree(data);
196                         return ERR_PTR(-EFAULT);
197                 }
198         }
199         return data;
200 }
201 
202 static int bpf_ctx_finish(const union bpf_attr *kattr,
203                           union bpf_attr __user *uattr, const void *data,
204                           u32 size)
205 {
206         void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
207         int err = -EFAULT;
208         u32 copy_size = size;
209 
210         if (!data || !data_out)
211                 return 0;
212 
213         if (copy_size > kattr->test.ctx_size_out) {
214                 copy_size = kattr->test.ctx_size_out;
215                 err = -ENOSPC;
216         }
217 
218         if (copy_to_user(data_out, data, copy_size))
219                 goto out;
220         if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
221                 goto out;
222         if (err != -ENOSPC)
223                 err = 0;
224 out:
225         return err;
226 }
227 
228 /**
229  * range_is_zero - test whether buffer is initialized
230  * @buf: buffer to check
231  * @from: check from this position
232  * @to: check up until (excluding) this position
233  *
234  * This function returns true if the there is a non-zero byte
235  * in the buf in the range [from,to).
236  */
237 static inline bool range_is_zero(void *buf, size_t from, size_t to)
238 {
239         return !memchr_inv((u8 *)buf + from, 0, to - from);
240 }
241 
242 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
243 {
244         struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
245 
246         if (!__skb)
247                 return 0;
248 
249         /* make sure the fields we don't use are zeroed */
250         if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority)))
251                 return -EINVAL;
252 
253         /* priority is allowed */
254 
255         if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
256                            sizeof_field(struct __sk_buff, priority),
257                            offsetof(struct __sk_buff, cb)))
258                 return -EINVAL;
259 
260         /* cb is allowed */
261 
262         if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
263                            sizeof_field(struct __sk_buff, cb),
264                            offsetof(struct __sk_buff, tstamp)))
265                 return -EINVAL;
266 
267         /* tstamp is allowed */
268 
269         if (!range_is_zero(__skb, offsetof(struct __sk_buff, tstamp) +
270                            sizeof_field(struct __sk_buff, tstamp),
271                            sizeof(struct __sk_buff)))
272                 return -EINVAL;
273 
274         skb->priority = __skb->priority;
275         skb->tstamp = __skb->tstamp;
276         memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
277 
278         return 0;
279 }
280 
281 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
282 {
283         struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
284 
285         if (!__skb)
286                 return;
287 
288         __skb->priority = skb->priority;
289         __skb->tstamp = skb->tstamp;
290         memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
291 }
292 
293 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
294                           union bpf_attr __user *uattr)
295 {
296         bool is_l2 = false, is_direct_pkt_access = false;
297         u32 size = kattr->test.data_size_in;
298         u32 repeat = kattr->test.repeat;
299         struct __sk_buff *ctx = NULL;
300         u32 retval, duration;
301         int hh_len = ETH_HLEN;
302         struct sk_buff *skb;
303         struct sock *sk;
304         void *data;
305         int ret;
306 
307         data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
308                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
309         if (IS_ERR(data))
310                 return PTR_ERR(data);
311 
312         ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
313         if (IS_ERR(ctx)) {
314                 kfree(data);
315                 return PTR_ERR(ctx);
316         }
317 
318         switch (prog->type) {
319         case BPF_PROG_TYPE_SCHED_CLS:
320         case BPF_PROG_TYPE_SCHED_ACT:
321                 is_l2 = true;
322                 /* fall through */
323         case BPF_PROG_TYPE_LWT_IN:
324         case BPF_PROG_TYPE_LWT_OUT:
325         case BPF_PROG_TYPE_LWT_XMIT:
326                 is_direct_pkt_access = true;
327                 break;
328         default:
329                 break;
330         }
331 
332         sk = kzalloc(sizeof(struct sock), GFP_USER);
333         if (!sk) {
334                 kfree(data);
335                 kfree(ctx);
336                 return -ENOMEM;
337         }
338         sock_net_set(sk, current->nsproxy->net_ns);
339         sock_init_data(NULL, sk);
340 
341         skb = build_skb(data, 0);
342         if (!skb) {
343                 kfree(data);
344                 kfree(ctx);
345                 kfree(sk);
346                 return -ENOMEM;
347         }
348         skb->sk = sk;
349 
350         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
351         __skb_put(skb, size);
352         skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
353         skb_reset_network_header(skb);
354 
355         if (is_l2)
356                 __skb_push(skb, hh_len);
357         if (is_direct_pkt_access)
358                 bpf_compute_data_pointers(skb);
359         ret = convert___skb_to_skb(skb, ctx);
360         if (ret)
361                 goto out;
362         ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
363         if (ret)
364                 goto out;
365         if (!is_l2) {
366                 if (skb_headroom(skb) < hh_len) {
367                         int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
368 
369                         if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
370                                 ret = -ENOMEM;
371                                 goto out;
372                         }
373                 }
374                 memset(__skb_push(skb, hh_len), 0, hh_len);
375         }
376         convert_skb_to___skb(skb, ctx);
377 
378         size = skb->len;
379         /* bpf program can never convert linear skb to non-linear */
380         if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
381                 size = skb_headlen(skb);
382         ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
383         if (!ret)
384                 ret = bpf_ctx_finish(kattr, uattr, ctx,
385                                      sizeof(struct __sk_buff));
386 out:
387         kfree_skb(skb);
388         bpf_sk_storage_free(sk);
389         kfree(sk);
390         kfree(ctx);
391         return ret;
392 }
393 
394 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
395                           union bpf_attr __user *uattr)
396 {
397         u32 size = kattr->test.data_size_in;
398         u32 repeat = kattr->test.repeat;
399         struct netdev_rx_queue *rxqueue;
400         struct xdp_buff xdp = {};
401         u32 retval, duration;
402         void *data;
403         int ret;
404 
405         if (kattr->test.ctx_in || kattr->test.ctx_out)
406                 return -EINVAL;
407 
408         data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
409         if (IS_ERR(data))
410                 return PTR_ERR(data);
411 
412         xdp.data_hard_start = data;
413         xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
414         xdp.data_meta = xdp.data;
415         xdp.data_end = xdp.data + size;
416 
417         rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
418         xdp.rxq = &rxqueue->xdp_rxq;
419 
420         ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
421         if (ret)
422                 goto out;
423         if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
424             xdp.data_end != xdp.data + size)
425                 size = xdp.data_end - xdp.data;
426         ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
427 out:
428         kfree(data);
429         return ret;
430 }
431 
432 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
433 {
434         /* make sure the fields we don't use are zeroed */
435         if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
436                 return -EINVAL;
437 
438         /* flags is allowed */
439 
440         if (!range_is_zero(ctx, offsetof(struct bpf_flow_keys, flags) +
441                            sizeof_field(struct bpf_flow_keys, flags),
442                            sizeof(struct bpf_flow_keys)))
443                 return -EINVAL;
444 
445         return 0;
446 }
447 
448 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
449                                      const union bpf_attr *kattr,
450                                      union bpf_attr __user *uattr)
451 {
452         u32 size = kattr->test.data_size_in;
453         struct bpf_flow_dissector ctx = {};
454         u32 repeat = kattr->test.repeat;
455         struct bpf_flow_keys *user_ctx;
456         struct bpf_flow_keys flow_keys;
457         u64 time_start, time_spent = 0;
458         const struct ethhdr *eth;
459         unsigned int flags = 0;
460         u32 retval, duration;
461         void *data;
462         int ret;
463         u32 i;
464 
465         if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
466                 return -EINVAL;
467 
468         if (size < ETH_HLEN)
469                 return -EINVAL;
470 
471         data = bpf_test_init(kattr, size, 0, 0);
472         if (IS_ERR(data))
473                 return PTR_ERR(data);
474 
475         eth = (struct ethhdr *)data;
476 
477         if (!repeat)
478                 repeat = 1;
479 
480         user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
481         if (IS_ERR(user_ctx)) {
482                 kfree(data);
483                 return PTR_ERR(user_ctx);
484         }
485         if (user_ctx) {
486                 ret = verify_user_bpf_flow_keys(user_ctx);
487                 if (ret)
488                         goto out;
489                 flags = user_ctx->flags;
490         }
491 
492         ctx.flow_keys = &flow_keys;
493         ctx.data = data;
494         ctx.data_end = (__u8 *)data + size;
495 
496         rcu_read_lock();
497         preempt_disable();
498         time_start = ktime_get_ns();
499         for (i = 0; i < repeat; i++) {
500                 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
501                                           size, flags);
502 
503                 if (signal_pending(current)) {
504                         preempt_enable();
505                         rcu_read_unlock();
506 
507                         ret = -EINTR;
508                         goto out;
509                 }
510 
511                 if (need_resched()) {
512                         time_spent += ktime_get_ns() - time_start;
513                         preempt_enable();
514                         rcu_read_unlock();
515 
516                         cond_resched();
517 
518                         rcu_read_lock();
519                         preempt_disable();
520                         time_start = ktime_get_ns();
521                 }
522         }
523         time_spent += ktime_get_ns() - time_start;
524         preempt_enable();
525         rcu_read_unlock();
526 
527         do_div(time_spent, repeat);
528         duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
529 
530         ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
531                               retval, duration);
532         if (!ret)
533                 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
534                                      sizeof(struct bpf_flow_keys));
535 
536 out:
537         kfree(user_ctx);
538         kfree(data);
539         return ret;
540 }
541 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp