~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/bpf/bpftool/cfg.c

Version: ~ [ linux-4.18-rc1 ] ~ [ linux-4.17.2 ] ~ [ linux-4.16.16 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.50 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.109 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.138 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.113 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.57 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
  2 /*
  3  * Copyright (C) 2018 Netronome Systems, Inc.
  4  *
  5  * This software is dual licensed under the GNU General License Version 2,
  6  * June 1991 as shown in the file COPYING in the top-level directory of this
  7  * source tree or the BSD 2-Clause License provided below.  You have the
  8  * option to license this software under the complete terms of either license.
  9  *
 10  * The BSD 2-Clause License:
 11  *
 12  *     Redistribution and use in source and binary forms, with or
 13  *     without modification, are permitted provided that the following
 14  *     conditions are met:
 15  *
 16  *      1. Redistributions of source code must retain the above
 17  *         copyright notice, this list of conditions and the following
 18  *         disclaimer.
 19  *
 20  *      2. Redistributions in binary form must reproduce the above
 21  *         copyright notice, this list of conditions and the following
 22  *         disclaimer in the documentation and/or other materials
 23  *         provided with the distribution.
 24  *
 25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 28  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 29  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 35  * POSSIBILITY OF SUCH DAMAGE.
 36  */
 37 
 38 #include <linux/list.h>
 39 #include <stdlib.h>
 40 #include <string.h>
 41 
 42 #include "cfg.h"
 43 #include "main.h"
 44 #include "xlated_dumper.h"
 45 
 46 struct cfg {
 47         struct list_head funcs;
 48         int func_num;
 49 };
 50 
 51 struct func_node {
 52         struct list_head l;
 53         struct list_head bbs;
 54         struct bpf_insn *start;
 55         struct bpf_insn *end;
 56         int idx;
 57         int bb_num;
 58 };
 59 
 60 struct bb_node {
 61         struct list_head l;
 62         struct list_head e_prevs;
 63         struct list_head e_succs;
 64         struct bpf_insn *head;
 65         struct bpf_insn *tail;
 66         int idx;
 67 };
 68 
 69 #define EDGE_FLAG_EMPTY         0x0
 70 #define EDGE_FLAG_FALLTHROUGH   0x1
 71 #define EDGE_FLAG_JUMP          0x2
 72 struct edge_node {
 73         struct list_head l;
 74         struct bb_node *src;
 75         struct bb_node *dst;
 76         int flags;
 77 };
 78 
 79 #define ENTRY_BLOCK_INDEX       0
 80 #define EXIT_BLOCK_INDEX        1
 81 #define NUM_FIXED_BLOCKS        2
 82 #define func_prev(func)         list_prev_entry(func, l)
 83 #define func_next(func)         list_next_entry(func, l)
 84 #define bb_prev(bb)             list_prev_entry(bb, l)
 85 #define bb_next(bb)             list_next_entry(bb, l)
 86 #define entry_bb(func)          func_first_bb(func)
 87 #define exit_bb(func)           func_last_bb(func)
 88 #define cfg_first_func(cfg)     \
 89         list_first_entry(&cfg->funcs, struct func_node, l)
 90 #define cfg_last_func(cfg)      \
 91         list_last_entry(&cfg->funcs, struct func_node, l)
 92 #define func_first_bb(func)     \
 93         list_first_entry(&func->bbs, struct bb_node, l)
 94 #define func_last_bb(func)      \
 95         list_last_entry(&func->bbs, struct bb_node, l)
 96 
 97 static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
 98 {
 99         struct func_node *new_func, *func;
100 
101         list_for_each_entry(func, &cfg->funcs, l) {
102                 if (func->start == insn)
103                         return func;
104                 else if (func->start > insn)
105                         break;
106         }
107 
108         func = func_prev(func);
109         new_func = calloc(1, sizeof(*new_func));
110         if (!new_func) {
111                 p_err("OOM when allocating FUNC node");
112                 return NULL;
113         }
114         new_func->start = insn;
115         new_func->idx = cfg->func_num;
116         list_add(&new_func->l, &func->l);
117         cfg->func_num++;
118 
119         return new_func;
120 }
121 
122 static struct bb_node *func_append_bb(struct func_node *func,
123                                       struct bpf_insn *insn)
124 {
125         struct bb_node *new_bb, *bb;
126 
127         list_for_each_entry(bb, &func->bbs, l) {
128                 if (bb->head == insn)
129                         return bb;
130                 else if (bb->head > insn)
131                         break;
132         }
133 
134         bb = bb_prev(bb);
135         new_bb = calloc(1, sizeof(*new_bb));
136         if (!new_bb) {
137                 p_err("OOM when allocating BB node");
138                 return NULL;
139         }
140         new_bb->head = insn;
141         INIT_LIST_HEAD(&new_bb->e_prevs);
142         INIT_LIST_HEAD(&new_bb->e_succs);
143         list_add(&new_bb->l, &bb->l);
144 
145         return new_bb;
146 }
147 
148 static struct bb_node *func_insert_dummy_bb(struct list_head *after)
149 {
150         struct bb_node *bb;
151 
152         bb = calloc(1, sizeof(*bb));
153         if (!bb) {
154                 p_err("OOM when allocating BB node");
155                 return NULL;
156         }
157 
158         INIT_LIST_HEAD(&bb->e_prevs);
159         INIT_LIST_HEAD(&bb->e_succs);
160         list_add(&bb->l, after);
161 
162         return bb;
163 }
164 
165 static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
166                                 struct bpf_insn *end)
167 {
168         struct func_node *func, *last_func;
169 
170         func = cfg_append_func(cfg, cur);
171         if (!func)
172                 return true;
173 
174         for (; cur < end; cur++) {
175                 if (cur->code != (BPF_JMP | BPF_CALL))
176                         continue;
177                 if (cur->src_reg != BPF_PSEUDO_CALL)
178                         continue;
179                 func = cfg_append_func(cfg, cur + cur->off + 1);
180                 if (!func)
181                         return true;
182         }
183 
184         last_func = cfg_last_func(cfg);
185         last_func->end = end - 1;
186         func = cfg_first_func(cfg);
187         list_for_each_entry_from(func, &last_func->l, l) {
188                 func->end = func_next(func)->start - 1;
189         }
190 
191         return false;
192 }
193 
194 static bool func_partition_bb_head(struct func_node *func)
195 {
196         struct bpf_insn *cur, *end;
197         struct bb_node *bb;
198 
199         cur = func->start;
200         end = func->end;
201         INIT_LIST_HEAD(&func->bbs);
202         bb = func_append_bb(func, cur);
203         if (!bb)
204                 return true;
205 
206         for (; cur <= end; cur++) {
207                 if (BPF_CLASS(cur->code) == BPF_JMP) {
208                         u8 opcode = BPF_OP(cur->code);
209 
210                         if (opcode == BPF_EXIT || opcode == BPF_CALL)
211                                 continue;
212 
213                         bb = func_append_bb(func, cur + cur->off + 1);
214                         if (!bb)
215                                 return true;
216 
217                         if (opcode != BPF_JA) {
218                                 bb = func_append_bb(func, cur + 1);
219                                 if (!bb)
220                                         return true;
221                         }
222                 }
223         }
224 
225         return false;
226 }
227 
228 static void func_partition_bb_tail(struct func_node *func)
229 {
230         unsigned int bb_idx = NUM_FIXED_BLOCKS;
231         struct bb_node *bb, *last;
232 
233         last = func_last_bb(func);
234         last->tail = func->end;
235         bb = func_first_bb(func);
236         list_for_each_entry_from(bb, &last->l, l) {
237                 bb->tail = bb_next(bb)->head - 1;
238                 bb->idx = bb_idx++;
239         }
240 
241         last->idx = bb_idx++;
242         func->bb_num = bb_idx;
243 }
244 
245 static bool func_add_special_bb(struct func_node *func)
246 {
247         struct bb_node *bb;
248 
249         bb = func_insert_dummy_bb(&func->bbs);
250         if (!bb)
251                 return true;
252         bb->idx = ENTRY_BLOCK_INDEX;
253 
254         bb = func_insert_dummy_bb(&func_last_bb(func)->l);
255         if (!bb)
256                 return true;
257         bb->idx = EXIT_BLOCK_INDEX;
258 
259         return false;
260 }
261 
262 static bool func_partition_bb(struct func_node *func)
263 {
264         if (func_partition_bb_head(func))
265                 return true;
266 
267         func_partition_bb_tail(func);
268 
269         return false;
270 }
271 
272 static struct bb_node *func_search_bb_with_head(struct func_node *func,
273                                                 struct bpf_insn *insn)
274 {
275         struct bb_node *bb;
276 
277         list_for_each_entry(bb, &func->bbs, l) {
278                 if (bb->head == insn)
279                         return bb;
280         }
281 
282         return NULL;
283 }
284 
285 static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
286                                   int flags)
287 {
288         struct edge_node *e;
289 
290         e = calloc(1, sizeof(*e));
291         if (!e) {
292                 p_err("OOM when allocating edge node");
293                 return NULL;
294         }
295 
296         if (src)
297                 e->src = src;
298         if (dst)
299                 e->dst = dst;
300 
301         e->flags |= flags;
302 
303         return e;
304 }
305 
306 static bool func_add_bb_edges(struct func_node *func)
307 {
308         struct bpf_insn *insn;
309         struct edge_node *e;
310         struct bb_node *bb;
311 
312         bb = entry_bb(func);
313         e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
314         if (!e)
315                 return true;
316         list_add_tail(&e->l, &bb->e_succs);
317 
318         bb = exit_bb(func);
319         e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
320         if (!e)
321                 return true;
322         list_add_tail(&e->l, &bb->e_prevs);
323 
324         bb = entry_bb(func);
325         bb = bb_next(bb);
326         list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
327                 e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
328                 if (!e)
329                         return true;
330                 e->src = bb;
331 
332                 insn = bb->tail;
333                 if (BPF_CLASS(insn->code) != BPF_JMP ||
334                     BPF_OP(insn->code) == BPF_EXIT) {
335                         e->dst = bb_next(bb);
336                         e->flags |= EDGE_FLAG_FALLTHROUGH;
337                         list_add_tail(&e->l, &bb->e_succs);
338                         continue;
339                 } else if (BPF_OP(insn->code) == BPF_JA) {
340                         e->dst = func_search_bb_with_head(func,
341                                                           insn + insn->off + 1);
342                         e->flags |= EDGE_FLAG_JUMP;
343                         list_add_tail(&e->l, &bb->e_succs);
344                         continue;
345                 }
346 
347                 e->dst = bb_next(bb);
348                 e->flags |= EDGE_FLAG_FALLTHROUGH;
349                 list_add_tail(&e->l, &bb->e_succs);
350 
351                 e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
352                 if (!e)
353                         return true;
354                 e->src = bb;
355                 e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
356                 list_add_tail(&e->l, &bb->e_succs);
357         }
358 
359         return false;
360 }
361 
362 static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
363 {
364         int cnt = len / sizeof(*insn);
365         struct func_node *func;
366 
367         INIT_LIST_HEAD(&cfg->funcs);
368 
369         if (cfg_partition_funcs(cfg, insn, insn + cnt))
370                 return true;
371 
372         list_for_each_entry(func, &cfg->funcs, l) {
373                 if (func_partition_bb(func) || func_add_special_bb(func))
374                         return true;
375 
376                 if (func_add_bb_edges(func))
377                         return true;
378         }
379 
380         return false;
381 }
382 
383 static void cfg_destroy(struct cfg *cfg)
384 {
385         struct func_node *func, *func2;
386 
387         list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
388                 struct bb_node *bb, *bb2;
389 
390                 list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
391                         struct edge_node *e, *e2;
392 
393                         list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
394                                 list_del(&e->l);
395                                 free(e);
396                         }
397 
398                         list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
399                                 list_del(&e->l);
400                                 free(e);
401                         }
402 
403                         list_del(&bb->l);
404                         free(bb);
405                 }
406 
407                 list_del(&func->l);
408                 free(func);
409         }
410 }
411 
412 static void draw_bb_node(struct func_node *func, struct bb_node *bb)
413 {
414         const char *shape;
415 
416         if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
417                 shape = "Mdiamond";
418         else
419                 shape = "record";
420 
421         printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
422                func->idx, bb->idx, shape);
423 
424         if (bb->idx == ENTRY_BLOCK_INDEX) {
425                 printf("ENTRY");
426         } else if (bb->idx == EXIT_BLOCK_INDEX) {
427                 printf("EXIT");
428         } else {
429                 unsigned int start_idx;
430                 struct dump_data dd = {};
431 
432                 printf("{");
433                 kernel_syms_load(&dd);
434                 start_idx = bb->head - func->start;
435                 dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
436                 kernel_syms_destroy(&dd);
437                 printf("}");
438         }
439 
440         printf("\"];\n\n");
441 }
442 
443 static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
444 {
445         const char *style = "\"solid,bold\"";
446         const char *color = "black";
447         int func_idx = func->idx;
448         struct edge_node *e;
449         int weight = 10;
450 
451         if (list_empty(&bb->e_succs))
452                 return;
453 
454         list_for_each_entry(e, &bb->e_succs, l) {
455                 printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
456                        func_idx, e->src->idx, func_idx, e->dst->idx,
457                        style, color, weight);
458                 printf("];\n");
459         }
460 }
461 
462 static void func_output_bb_def(struct func_node *func)
463 {
464         struct bb_node *bb;
465 
466         list_for_each_entry(bb, &func->bbs, l) {
467                 draw_bb_node(func, bb);
468         }
469 }
470 
471 static void func_output_edges(struct func_node *func)
472 {
473         int func_idx = func->idx;
474         struct bb_node *bb;
475 
476         list_for_each_entry(bb, &func->bbs, l) {
477                 draw_bb_succ_edges(func, bb);
478         }
479 
480         /* Add an invisible edge from ENTRY to EXIT, this is to
481          * improve the graph layout.
482          */
483         printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
484                func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
485 }
486 
487 static void cfg_dump(struct cfg *cfg)
488 {
489         struct func_node *func;
490 
491         printf("digraph \"DOT graph for eBPF program\" {\n");
492         list_for_each_entry(func, &cfg->funcs, l) {
493                 printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
494                        func->idx, func->idx);
495                 func_output_bb_def(func);
496                 func_output_edges(func);
497                 printf("}\n");
498         }
499         printf("}\n");
500 }
501 
502 void dump_xlated_cfg(void *buf, unsigned int len)
503 {
504         struct bpf_insn *insn = buf;
505         struct cfg cfg;
506 
507         memset(&cfg, 0, sizeof(cfg));
508         if (cfg_build(&cfg, insn, len))
509                 return;
510 
511         cfg_dump(&cfg);
512 
513         cfg_destroy(&cfg);
514 }
515 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp