~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/bpf/map_tests/sk_storage_map.c

Version: ~ [ linux-5.18-rc6 ] ~ [ linux-5.17.6 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.38 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.114 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.192 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.241 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.277 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.312 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Copyright (c) 2019 Facebook  */
  3 #include <linux/compiler.h>
  4 #include <linux/err.h>
  5 
  6 #include <sys/resource.h>
  7 #include <sys/socket.h>
  8 #include <sys/types.h>
  9 #include <linux/btf.h>
 10 #include <unistd.h>
 11 #include <signal.h>
 12 #include <errno.h>
 13 #include <string.h>
 14 #include <pthread.h>
 15 
 16 #include <bpf/bpf.h>
 17 #include <bpf/libbpf.h>
 18 
 19 #include <test_btf.h>
 20 #include <test_maps.h>
 21 
 22 static struct bpf_create_map_attr xattr = {
 23         .name = "sk_storage_map",
 24         .map_type = BPF_MAP_TYPE_SK_STORAGE,
 25         .map_flags = BPF_F_NO_PREALLOC,
 26         .max_entries = 0,
 27         .key_size = 4,
 28         .value_size = 8,
 29         .btf_key_type_id = 1,
 30         .btf_value_type_id = 3,
 31         .btf_fd = -1,
 32 };
 33 
 34 static unsigned int nr_sk_threads_done;
 35 static unsigned int nr_sk_threads_err;
 36 static unsigned int nr_sk_per_thread = 4096;
 37 static unsigned int nr_sk_threads = 4;
 38 static int sk_storage_map = -1;
 39 static unsigned int stop;
 40 static int runtime_s = 5;
 41 
 42 static bool is_stopped(void)
 43 {
 44         return READ_ONCE(stop);
 45 }
 46 
 47 static unsigned int threads_err(void)
 48 {
 49         return READ_ONCE(nr_sk_threads_err);
 50 }
 51 
 52 static void notify_thread_err(void)
 53 {
 54         __sync_add_and_fetch(&nr_sk_threads_err, 1);
 55 }
 56 
 57 static bool wait_for_threads_err(void)
 58 {
 59         while (!is_stopped() && !threads_err())
 60                 usleep(500);
 61 
 62         return !is_stopped();
 63 }
 64 
 65 static unsigned int threads_done(void)
 66 {
 67         return READ_ONCE(nr_sk_threads_done);
 68 }
 69 
 70 static void notify_thread_done(void)
 71 {
 72         __sync_add_and_fetch(&nr_sk_threads_done, 1);
 73 }
 74 
 75 static void notify_thread_redo(void)
 76 {
 77         __sync_sub_and_fetch(&nr_sk_threads_done, 1);
 78 }
 79 
 80 static bool wait_for_threads_done(void)
 81 {
 82         while (threads_done() != nr_sk_threads && !is_stopped() &&
 83                !threads_err())
 84                 usleep(50);
 85 
 86         return !is_stopped() && !threads_err();
 87 }
 88 
 89 static bool wait_for_threads_redo(void)
 90 {
 91         while (threads_done() && !is_stopped() && !threads_err())
 92                 usleep(50);
 93 
 94         return !is_stopped() && !threads_err();
 95 }
 96 
 97 static bool wait_for_map(void)
 98 {
 99         while (READ_ONCE(sk_storage_map) == -1 && !is_stopped())
100                 usleep(50);
101 
102         return !is_stopped();
103 }
104 
105 static bool wait_for_map_close(void)
106 {
107         while (READ_ONCE(sk_storage_map) != -1 && !is_stopped())
108                 ;
109 
110         return !is_stopped();
111 }
112 
113 static int load_btf(void)
114 {
115         const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
116         __u32 btf_raw_types[] = {
117                 /* int */
118                 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
119                 /* struct bpf_spin_lock */                      /* [2] */
120                 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
121                 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
122                 /* struct val */                                /* [3] */
123                 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
124                 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
125                 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
126         };
127         struct btf_header btf_hdr = {
128                 .magic = BTF_MAGIC,
129                 .version = BTF_VERSION,
130                 .hdr_len = sizeof(struct btf_header),
131                 .type_len = sizeof(btf_raw_types),
132                 .str_off = sizeof(btf_raw_types),
133                 .str_len = sizeof(btf_str_sec),
134         };
135         __u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
136                      sizeof(btf_str_sec)];
137 
138         memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
139         memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
140         memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
141                btf_str_sec, sizeof(btf_str_sec));
142 
143         return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0);
144 }
145 
146 static int create_sk_storage_map(void)
147 {
148         int btf_fd, map_fd;
149 
150         btf_fd = load_btf();
151         CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
152               btf_fd, errno);
153         xattr.btf_fd = btf_fd;
154 
155         map_fd = bpf_create_map_xattr(&xattr);
156         xattr.btf_fd = -1;
157         close(btf_fd);
158         CHECK(map_fd == -1,
159               "bpf_create_map_xattr()", "errno:%d\n", errno);
160 
161         return map_fd;
162 }
163 
164 static void *insert_close_thread(void *arg)
165 {
166         struct {
167                 int cnt;
168                 int lock;
169         } value = { .cnt = 0xeB9F, .lock = 0, };
170         int i, map_fd, err, *sk_fds;
171 
172         sk_fds = malloc(sizeof(*sk_fds) * nr_sk_per_thread);
173         if (!sk_fds) {
174                 notify_thread_err();
175                 return ERR_PTR(-ENOMEM);
176         }
177 
178         for (i = 0; i < nr_sk_per_thread; i++)
179                 sk_fds[i] = -1;
180 
181         while (!is_stopped()) {
182                 if (!wait_for_map())
183                         goto close_all;
184 
185                 map_fd = READ_ONCE(sk_storage_map);
186                 for (i = 0; i < nr_sk_per_thread && !is_stopped(); i++) {
187                         sk_fds[i] = socket(AF_INET6, SOCK_STREAM, 0);
188                         if (sk_fds[i] == -1) {
189                                 err = -errno;
190                                 fprintf(stderr, "socket(): errno:%d\n", errno);
191                                 goto errout;
192                         }
193                         err = bpf_map_update_elem(map_fd, &sk_fds[i], &value,
194                                                   BPF_NOEXIST);
195                         if (err) {
196                                 err = -errno;
197                                 fprintf(stderr,
198                                         "bpf_map_update_elem(): errno:%d\n",
199                                         errno);
200                                 goto errout;
201                         }
202                 }
203 
204                 notify_thread_done();
205                 wait_for_map_close();
206 
207 close_all:
208                 for (i = 0; i < nr_sk_per_thread; i++) {
209                         close(sk_fds[i]);
210                         sk_fds[i] = -1;
211                 }
212 
213                 notify_thread_redo();
214         }
215 
216         free(sk_fds);
217         return NULL;
218 
219 errout:
220         for (i = 0; i < nr_sk_per_thread && sk_fds[i] != -1; i++)
221                 close(sk_fds[i]);
222         free(sk_fds);
223         notify_thread_err();
224         return ERR_PTR(err);
225 }
226 
227 static int do_sk_storage_map_stress_free(void)
228 {
229         int i, map_fd = -1, err = 0, nr_threads_created = 0;
230         pthread_t *sk_thread_ids;
231         void *thread_ret;
232 
233         sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
234         if (!sk_thread_ids) {
235                 fprintf(stderr, "malloc(sk_threads): NULL\n");
236                 return -ENOMEM;
237         }
238 
239         for (i = 0; i < nr_sk_threads; i++) {
240                 err = pthread_create(&sk_thread_ids[i], NULL,
241                                      insert_close_thread, NULL);
242                 if (err) {
243                         err = -errno;
244                         goto done;
245                 }
246                 nr_threads_created++;
247         }
248 
249         while (!is_stopped()) {
250                 map_fd = create_sk_storage_map();
251                 WRITE_ONCE(sk_storage_map, map_fd);
252 
253                 if (!wait_for_threads_done())
254                         break;
255 
256                 WRITE_ONCE(sk_storage_map, -1);
257                 close(map_fd);
258                 map_fd = -1;
259 
260                 if (!wait_for_threads_redo())
261                         break;
262         }
263 
264 done:
265         WRITE_ONCE(stop, 1);
266         for (i = 0; i < nr_threads_created; i++) {
267                 pthread_join(sk_thread_ids[i], &thread_ret);
268                 if (IS_ERR(thread_ret) && !err) {
269                         err = PTR_ERR(thread_ret);
270                         fprintf(stderr, "threads#%u: err:%d\n", i, err);
271                 }
272         }
273         free(sk_thread_ids);
274 
275         if (map_fd != -1)
276                 close(map_fd);
277 
278         return err;
279 }
280 
281 static void *update_thread(void *arg)
282 {
283         struct {
284                 int cnt;
285                 int lock;
286         } value = { .cnt = 0xeB9F, .lock = 0, };
287         int map_fd = READ_ONCE(sk_storage_map);
288         int sk_fd = *(int *)arg;
289         int err = 0; /* Suppress compiler false alarm */
290 
291         while (!is_stopped()) {
292                 err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
293                 if (err && errno != EAGAIN) {
294                         err = -errno;
295                         fprintf(stderr, "bpf_map_update_elem: %d %d\n",
296                                 err, errno);
297                         break;
298                 }
299         }
300 
301         if (!is_stopped()) {
302                 notify_thread_err();
303                 return ERR_PTR(err);
304         }
305 
306         return NULL;
307 }
308 
309 static void *delete_thread(void *arg)
310 {
311         int map_fd = READ_ONCE(sk_storage_map);
312         int sk_fd = *(int *)arg;
313         int err = 0; /* Suppress compiler false alarm */
314 
315         while (!is_stopped()) {
316                 err = bpf_map_delete_elem(map_fd, &sk_fd);
317                 if (err && errno != ENOENT) {
318                         err = -errno;
319                         fprintf(stderr, "bpf_map_delete_elem: %d %d\n",
320                                 err, errno);
321                         break;
322                 }
323         }
324 
325         if (!is_stopped()) {
326                 notify_thread_err();
327                 return ERR_PTR(err);
328         }
329 
330         return NULL;
331 }
332 
333 static int do_sk_storage_map_stress_change(void)
334 {
335         int i, sk_fd, map_fd = -1, err = 0, nr_threads_created = 0;
336         pthread_t *sk_thread_ids;
337         void *thread_ret;
338 
339         sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
340         if (!sk_thread_ids) {
341                 fprintf(stderr, "malloc(sk_threads): NULL\n");
342                 return -ENOMEM;
343         }
344 
345         sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
346         if (sk_fd == -1) {
347                 err = -errno;
348                 goto done;
349         }
350 
351         map_fd = create_sk_storage_map();
352         WRITE_ONCE(sk_storage_map, map_fd);
353 
354         for (i = 0; i < nr_sk_threads; i++) {
355                 if (i & 0x1)
356                         err = pthread_create(&sk_thread_ids[i], NULL,
357                                              update_thread, &sk_fd);
358                 else
359                         err = pthread_create(&sk_thread_ids[i], NULL,
360                                              delete_thread, &sk_fd);
361                 if (err) {
362                         err = -errno;
363                         goto done;
364                 }
365                 nr_threads_created++;
366         }
367 
368         wait_for_threads_err();
369 
370 done:
371         WRITE_ONCE(stop, 1);
372         for (i = 0; i < nr_threads_created; i++) {
373                 pthread_join(sk_thread_ids[i], &thread_ret);
374                 if (IS_ERR(thread_ret) && !err) {
375                         err = PTR_ERR(thread_ret);
376                         fprintf(stderr, "threads#%u: err:%d\n", i, err);
377                 }
378         }
379         free(sk_thread_ids);
380 
381         if (sk_fd != -1)
382                 close(sk_fd);
383         close(map_fd);
384 
385         return err;
386 }
387 
388 static void stop_handler(int signum)
389 {
390         if (signum != SIGALRM)
391                 printf("stopping...\n");
392         WRITE_ONCE(stop, 1);
393 }
394 
395 #define BPF_SK_STORAGE_MAP_TEST_NR_THREADS "BPF_SK_STORAGE_MAP_TEST_NR_THREADS"
396 #define BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD "BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD"
397 #define BPF_SK_STORAGE_MAP_TEST_RUNTIME_S "BPF_SK_STORAGE_MAP_TEST_RUNTIME_S"
398 #define BPF_SK_STORAGE_MAP_TEST_NAME "BPF_SK_STORAGE_MAP_TEST_NAME"
399 
400 static void test_sk_storage_map_stress_free(void)
401 {
402         struct rlimit rlim_old, rlim_new = {};
403         int err;
404 
405         getrlimit(RLIMIT_NOFILE, &rlim_old);
406 
407         signal(SIGTERM, stop_handler);
408         signal(SIGINT, stop_handler);
409         if (runtime_s > 0) {
410                 signal(SIGALRM, stop_handler);
411                 alarm(runtime_s);
412         }
413 
414         if (rlim_old.rlim_cur < nr_sk_threads * nr_sk_per_thread) {
415                 rlim_new.rlim_cur = nr_sk_threads * nr_sk_per_thread + 128;
416                 rlim_new.rlim_max = rlim_new.rlim_cur + 128;
417                 err = setrlimit(RLIMIT_NOFILE, &rlim_new);
418                 CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
419                       rlim_new.rlim_cur, errno);
420         }
421 
422         err = do_sk_storage_map_stress_free();
423 
424         signal(SIGTERM, SIG_DFL);
425         signal(SIGINT, SIG_DFL);
426         if (runtime_s > 0) {
427                 signal(SIGALRM, SIG_DFL);
428                 alarm(0);
429         }
430 
431         if (rlim_new.rlim_cur)
432                 setrlimit(RLIMIT_NOFILE, &rlim_old);
433 
434         CHECK(err, "test_sk_storage_map_stress_free", "err:%d\n", err);
435 }
436 
437 static void test_sk_storage_map_stress_change(void)
438 {
439         int err;
440 
441         signal(SIGTERM, stop_handler);
442         signal(SIGINT, stop_handler);
443         if (runtime_s > 0) {
444                 signal(SIGALRM, stop_handler);
445                 alarm(runtime_s);
446         }
447 
448         err = do_sk_storage_map_stress_change();
449 
450         signal(SIGTERM, SIG_DFL);
451         signal(SIGINT, SIG_DFL);
452         if (runtime_s > 0) {
453                 signal(SIGALRM, SIG_DFL);
454                 alarm(0);
455         }
456 
457         CHECK(err, "test_sk_storage_map_stress_change", "err:%d\n", err);
458 }
459 
460 static void test_sk_storage_map_basic(void)
461 {
462         struct {
463                 int cnt;
464                 int lock;
465         } value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value;
466         struct bpf_create_map_attr bad_xattr;
467         int btf_fd, map_fd, sk_fd, err;
468 
469         btf_fd = load_btf();
470         CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
471               btf_fd, errno);
472         xattr.btf_fd = btf_fd;
473 
474         sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
475         CHECK(sk_fd == -1, "socket()", "sk_fd:%d errno:%d\n",
476               sk_fd, errno);
477 
478         map_fd = bpf_create_map_xattr(&xattr);
479         CHECK(map_fd == -1, "bpf_create_map_xattr(good_xattr)",
480               "map_fd:%d errno:%d\n", map_fd, errno);
481 
482         /* Add new elem */
483         memcpy(&lookup_value, &value, sizeof(value));
484         err = bpf_map_update_elem(map_fd, &sk_fd, &value,
485                                   BPF_NOEXIST | BPF_F_LOCK);
486         CHECK(err, "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
487               "err:%d errno:%d\n", err, errno);
488         err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
489                                         BPF_F_LOCK);
490         CHECK(err || lookup_value.cnt != value.cnt,
491               "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
492               "err:%d errno:%d cnt:%x(%x)\n",
493               err, errno, lookup_value.cnt, value.cnt);
494 
495         /* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
496         value.cnt += 1;
497         err = bpf_map_update_elem(map_fd, &sk_fd, &value,
498                                   BPF_EXIST | BPF_F_LOCK);
499         CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
500               "err:%d errno:%d\n", err, errno);
501         err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
502                                         BPF_F_LOCK);
503         CHECK(err || lookup_value.cnt != value.cnt,
504               "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
505               "err:%d errno:%d cnt:%x(%x)\n",
506               err, errno, lookup_value.cnt, value.cnt);
507 
508         /* Bump the cnt and update with BPF_EXIST */
509         value.cnt += 1;
510         err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
511         CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
512               "err:%d errno:%d\n", err, errno);
513         err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
514                                         BPF_F_LOCK);
515         CHECK(err || lookup_value.cnt != value.cnt,
516               "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
517               "err:%d errno:%d cnt:%x(%x)\n",
518               err, errno, lookup_value.cnt, value.cnt);
519 
520         /* Update with BPF_NOEXIST */
521         value.cnt += 1;
522         err = bpf_map_update_elem(map_fd, &sk_fd, &value,
523                                   BPF_NOEXIST | BPF_F_LOCK);
524         CHECK(!err || errno != EEXIST,
525               "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
526               "err:%d errno:%d\n", err, errno);
527         err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_NOEXIST);
528         CHECK(!err || errno != EEXIST, "bpf_map_update_elem(BPF_NOEXIST)",
529               "err:%d errno:%d\n", err, errno);
530         value.cnt -= 1;
531         err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
532                                         BPF_F_LOCK);
533         CHECK(err || lookup_value.cnt != value.cnt,
534               "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
535               "err:%d errno:%d cnt:%x(%x)\n",
536               err, errno, lookup_value.cnt, value.cnt);
537 
538         /* Bump the cnt again and update with map_flags == 0 */
539         value.cnt += 1;
540         err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
541         CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
542               err, errno);
543         err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
544                                         BPF_F_LOCK);
545         CHECK(err || lookup_value.cnt != value.cnt,
546               "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
547               "err:%d errno:%d cnt:%x(%x)\n",
548               err, errno, lookup_value.cnt, value.cnt);
549 
550         /* Test delete elem */
551         err = bpf_map_delete_elem(map_fd, &sk_fd);
552         CHECK(err, "bpf_map_delete_elem()", "err:%d errno:%d\n",
553               err, errno);
554         err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
555                                         BPF_F_LOCK);
556         CHECK(!err || errno != ENOENT,
557               "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
558               "err:%d errno:%d\n", err, errno);
559         err = bpf_map_delete_elem(map_fd, &sk_fd);
560         CHECK(!err || errno != ENOENT, "bpf_map_delete_elem()",
561               "err:%d errno:%d\n", err, errno);
562 
563         memcpy(&bad_xattr, &xattr, sizeof(xattr));
564         bad_xattr.btf_key_type_id = 0;
565         err = bpf_create_map_xattr(&bad_xattr);
566         CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
567               "err:%d errno:%d\n", err, errno);
568 
569         memcpy(&bad_xattr, &xattr, sizeof(xattr));
570         bad_xattr.btf_key_type_id = 3;
571         err = bpf_create_map_xattr(&bad_xattr);
572         CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
573               "err:%d errno:%d\n", err, errno);
574 
575         memcpy(&bad_xattr, &xattr, sizeof(xattr));
576         bad_xattr.max_entries = 1;
577         err = bpf_create_map_xattr(&bad_xattr);
578         CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
579               "err:%d errno:%d\n", err, errno);
580 
581         memcpy(&bad_xattr, &xattr, sizeof(xattr));
582         bad_xattr.map_flags = 0;
583         err = bpf_create_map_xattr(&bad_xattr);
584         CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
585               "err:%d errno:%d\n", err, errno);
586 
587         xattr.btf_fd = -1;
588         close(btf_fd);
589         close(map_fd);
590         close(sk_fd);
591 }
592 
593 void test_sk_storage_map(void)
594 {
595         const char *test_name, *env_opt;
596         bool test_ran = false;
597 
598         test_name = getenv(BPF_SK_STORAGE_MAP_TEST_NAME);
599 
600         env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_NR_THREADS);
601         if (env_opt)
602                 nr_sk_threads = atoi(env_opt);
603 
604         env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD);
605         if (env_opt)
606                 nr_sk_per_thread = atoi(env_opt);
607 
608         env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_RUNTIME_S);
609         if (env_opt)
610                 runtime_s = atoi(env_opt);
611 
612         if (!test_name || !strcmp(test_name, "basic")) {
613                 test_sk_storage_map_basic();
614                 test_ran = true;
615         }
616         if (!test_name || !strcmp(test_name, "stress_free")) {
617                 test_sk_storage_map_stress_free();
618                 test_ran = true;
619         }
620         if (!test_name || !strcmp(test_name, "stress_change")) {
621                 test_sk_storage_map_stress_change();
622                 test_ran = true;
623         }
624 
625         if (test_ran)
626                 printf("%s:PASS\n", __func__);
627         else
628                 CHECK(1, "Invalid test_name", "%s\n", test_name);
629 }
630 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp