~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/perf/bench/numa.c

Version: ~ [ linux-5.1-rc5 ] ~ [ linux-5.0.7 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.34 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.111 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.168 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.178 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.138 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.65 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * numa.c
  4  *
  5  * numa: Simulate NUMA-sensitive workload and measure their NUMA performance
  6  */
  7 
  8 #include <inttypes.h>
  9 /* For the CLR_() macros */
 10 #include <pthread.h>
 11 
 12 #include "../perf.h"
 13 #include "../builtin.h"
 14 #include "../util/util.h"
 15 #include <subcmd/parse-options.h>
 16 #include "../util/cloexec.h"
 17 
 18 #include "bench.h"
 19 
 20 #include <errno.h>
 21 #include <sched.h>
 22 #include <stdio.h>
 23 #include <assert.h>
 24 #include <malloc.h>
 25 #include <signal.h>
 26 #include <stdlib.h>
 27 #include <string.h>
 28 #include <unistd.h>
 29 #include <sys/mman.h>
 30 #include <sys/time.h>
 31 #include <sys/resource.h>
 32 #include <sys/wait.h>
 33 #include <sys/prctl.h>
 34 #include <sys/types.h>
 35 #include <linux/kernel.h>
 36 #include <linux/time64.h>
 37 #include <linux/numa.h>
 38 
 39 #include <numa.h>
 40 #include <numaif.h>
 41 
 42 /*
 43  * Regular printout to the terminal, supressed if -q is specified:
 44  */
 45 #define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
 46 
 47 /*
 48  * Debug printf:
 49  */
 50 #undef dprintf
 51 #define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
 52 
 53 struct thread_data {
 54         int                     curr_cpu;
 55         cpu_set_t               bind_cpumask;
 56         int                     bind_node;
 57         u8                      *process_data;
 58         int                     process_nr;
 59         int                     thread_nr;
 60         int                     task_nr;
 61         unsigned int            loops_done;
 62         u64                     val;
 63         u64                     runtime_ns;
 64         u64                     system_time_ns;
 65         u64                     user_time_ns;
 66         double                  speed_gbs;
 67         pthread_mutex_t         *process_lock;
 68 };
 69 
 70 /* Parameters set by options: */
 71 
 72 struct params {
 73         /* Startup synchronization: */
 74         bool                    serialize_startup;
 75 
 76         /* Task hierarchy: */
 77         int                     nr_proc;
 78         int                     nr_threads;
 79 
 80         /* Working set sizes: */
 81         const char              *mb_global_str;
 82         const char              *mb_proc_str;
 83         const char              *mb_proc_locked_str;
 84         const char              *mb_thread_str;
 85 
 86         double                  mb_global;
 87         double                  mb_proc;
 88         double                  mb_proc_locked;
 89         double                  mb_thread;
 90 
 91         /* Access patterns to the working set: */
 92         bool                    data_reads;
 93         bool                    data_writes;
 94         bool                    data_backwards;
 95         bool                    data_zero_memset;
 96         bool                    data_rand_walk;
 97         u32                     nr_loops;
 98         u32                     nr_secs;
 99         u32                     sleep_usecs;
100 
101         /* Working set initialization: */
102         bool                    init_zero;
103         bool                    init_random;
104         bool                    init_cpu0;
105 
106         /* Misc options: */
107         int                     show_details;
108         int                     run_all;
109         int                     thp;
110 
111         long                    bytes_global;
112         long                    bytes_process;
113         long                    bytes_process_locked;
114         long                    bytes_thread;
115 
116         int                     nr_tasks;
117         bool                    show_quiet;
118 
119         bool                    show_convergence;
120         bool                    measure_convergence;
121 
122         int                     perturb_secs;
123         int                     nr_cpus;
124         int                     nr_nodes;
125 
126         /* Affinity options -C and -N: */
127         char                    *cpu_list_str;
128         char                    *node_list_str;
129 };
130 
131 
132 /* Global, read-writable area, accessible to all processes and threads: */
133 
134 struct global_info {
135         u8                      *data;
136 
137         pthread_mutex_t         startup_mutex;
138         int                     nr_tasks_started;
139 
140         pthread_mutex_t         startup_done_mutex;
141 
142         pthread_mutex_t         start_work_mutex;
143         int                     nr_tasks_working;
144 
145         pthread_mutex_t         stop_work_mutex;
146         u64                     bytes_done;
147 
148         struct thread_data      *threads;
149 
150         /* Convergence latency measurement: */
151         bool                    all_converged;
152         bool                    stop_work;
153 
154         int                     print_once;
155 
156         struct params           p;
157 };
158 
159 static struct global_info       *g = NULL;
160 
161 static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
162 static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
163 
164 struct params p0;
165 
166 static const struct option options[] = {
167         OPT_INTEGER('p', "nr_proc"      , &p0.nr_proc,          "number of processes"),
168         OPT_INTEGER('t', "nr_threads"   , &p0.nr_threads,       "number of threads per process"),
169 
170         OPT_STRING('G', "mb_global"     , &p0.mb_global_str,    "MB", "global  memory (MBs)"),
171         OPT_STRING('P', "mb_proc"       , &p0.mb_proc_str,      "MB", "process memory (MBs)"),
172         OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
173         OPT_STRING('T', "mb_thread"     , &p0.mb_thread_str,    "MB", "thread  memory (MBs)"),
174 
175         OPT_UINTEGER('l', "nr_loops"    , &p0.nr_loops,         "max number of loops to run (default: unlimited)"),
176         OPT_UINTEGER('s', "nr_secs"     , &p0.nr_secs,          "max number of seconds to run (default: 5 secs)"),
177         OPT_UINTEGER('u', "usleep"      , &p0.sleep_usecs,      "usecs to sleep per loop iteration"),
178 
179         OPT_BOOLEAN('R', "data_reads"   , &p0.data_reads,       "access the data via reads (can be mixed with -W)"),
180         OPT_BOOLEAN('W', "data_writes"  , &p0.data_writes,      "access the data via writes (can be mixed with -R)"),
181         OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards,  "access the data backwards as well"),
182         OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
183         OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk,  "access the data with random (32bit LFSR) walk"),
184 
185 
186         OPT_BOOLEAN('z', "init_zero"    , &p0.init_zero,        "bzero the initial allocations"),
187         OPT_BOOLEAN('I', "init_random"  , &p0.init_random,      "randomize the contents of the initial allocations"),
188         OPT_BOOLEAN('', "init_cpu0"    , &p0.init_cpu0,        "do the initial allocations on CPU#0"),
189         OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs,      "perturb thread 0/0 every X secs, to test convergence stability"),
190 
191         OPT_INCR   ('d', "show_details" , &p0.show_details,     "Show details"),
192         OPT_INCR   ('a', "all"          , &p0.run_all,          "Run all tests in the suite"),
193         OPT_INTEGER('H', "thp"          , &p0.thp,              "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
194         OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, "
195                     "convergence is reached when each process (all its threads) is running on a single NUMA node."),
196         OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
197         OPT_BOOLEAN('q', "quiet"        , &p0.show_quiet,       "quiet mode"),
198         OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
199 
200         /* Special option string parsing callbacks: */
201         OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]",
202                         "bind the first N tasks to these specific cpus (the rest is unbound)",
203                         parse_cpus_opt),
204         OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]",
205                         "bind the first N tasks to these specific memory nodes (the rest is unbound)",
206                         parse_nodes_opt),
207         OPT_END()
208 };
209 
210 static const char * const bench_numa_usage[] = {
211         "perf bench numa <options>",
212         NULL
213 };
214 
215 static const char * const numa_usage[] = {
216         "perf bench numa mem [<options>]",
217         NULL
218 };
219 
220 /*
221  * To get number of numa nodes present.
222  */
223 static int nr_numa_nodes(void)
224 {
225         int i, nr_nodes = 0;
226 
227         for (i = 0; i < g->p.nr_nodes; i++) {
228                 if (numa_bitmask_isbitset(numa_nodes_ptr, i))
229                         nr_nodes++;
230         }
231 
232         return nr_nodes;
233 }
234 
235 /*
236  * To check if given numa node is present.
237  */
238 static int is_node_present(int node)
239 {
240         return numa_bitmask_isbitset(numa_nodes_ptr, node);
241 }
242 
243 /*
244  * To check given numa node has cpus.
245  */
246 static bool node_has_cpus(int node)
247 {
248         struct bitmask *cpu = numa_allocate_cpumask();
249         unsigned int i;
250 
251         if (cpu && !numa_node_to_cpus(node, cpu)) {
252                 for (i = 0; i < cpu->size; i++) {
253                         if (numa_bitmask_isbitset(cpu, i))
254                                 return true;
255                 }
256         }
257 
258         return false; /* lets fall back to nocpus safely */
259 }
260 
261 static cpu_set_t bind_to_cpu(int target_cpu)
262 {
263         cpu_set_t orig_mask, mask;
264         int ret;
265 
266         ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
267         BUG_ON(ret);
268 
269         CPU_ZERO(&mask);
270 
271         if (target_cpu == -1) {
272                 int cpu;
273 
274                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
275                         CPU_SET(cpu, &mask);
276         } else {
277                 BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
278                 CPU_SET(target_cpu, &mask);
279         }
280 
281         ret = sched_setaffinity(0, sizeof(mask), &mask);
282         BUG_ON(ret);
283 
284         return orig_mask;
285 }
286 
287 static cpu_set_t bind_to_node(int target_node)
288 {
289         int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
290         cpu_set_t orig_mask, mask;
291         int cpu;
292         int ret;
293 
294         BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
295         BUG_ON(!cpus_per_node);
296 
297         ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
298         BUG_ON(ret);
299 
300         CPU_ZERO(&mask);
301 
302         if (target_node == NUMA_NO_NODE) {
303                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
304                         CPU_SET(cpu, &mask);
305         } else {
306                 int cpu_start = (target_node + 0) * cpus_per_node;
307                 int cpu_stop  = (target_node + 1) * cpus_per_node;
308 
309                 BUG_ON(cpu_stop > g->p.nr_cpus);
310 
311                 for (cpu = cpu_start; cpu < cpu_stop; cpu++)
312                         CPU_SET(cpu, &mask);
313         }
314 
315         ret = sched_setaffinity(0, sizeof(mask), &mask);
316         BUG_ON(ret);
317 
318         return orig_mask;
319 }
320 
321 static void bind_to_cpumask(cpu_set_t mask)
322 {
323         int ret;
324 
325         ret = sched_setaffinity(0, sizeof(mask), &mask);
326         BUG_ON(ret);
327 }
328 
329 static void mempol_restore(void)
330 {
331         int ret;
332 
333         ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
334 
335         BUG_ON(ret);
336 }
337 
338 static void bind_to_memnode(int node)
339 {
340         unsigned long nodemask;
341         int ret;
342 
343         if (node == NUMA_NO_NODE)
344                 return;
345 
346         BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
347         nodemask = 1L << node;
348 
349         ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
350         dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);
351 
352         BUG_ON(ret);
353 }
354 
355 #define HPSIZE (2*1024*1024)
356 
357 #define set_taskname(fmt...)                            \
358 do {                                                    \
359         char name[20];                                  \
360                                                         \
361         snprintf(name, 20, fmt);                        \
362         prctl(PR_SET_NAME, name);                       \
363 } while (0)
364 
365 static u8 *alloc_data(ssize_t bytes0, int map_flags,
366                       int init_zero, int init_cpu0, int thp, int init_random)
367 {
368         cpu_set_t orig_mask;
369         ssize_t bytes;
370         u8 *buf;
371         int ret;
372 
373         if (!bytes0)
374                 return NULL;
375 
376         /* Allocate and initialize all memory on CPU#0: */
377         if (init_cpu0) {
378                 orig_mask = bind_to_node(0);
379                 bind_to_memnode(0);
380         }
381 
382         bytes = bytes0 + HPSIZE;
383 
384         buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0);
385         BUG_ON(buf == (void *)-1);
386 
387         if (map_flags == MAP_PRIVATE) {
388                 if (thp > 0) {
389                         ret = madvise(buf, bytes, MADV_HUGEPAGE);
390                         if (ret && !g->print_once) {
391                                 g->print_once = 1;
392                                 printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n");
393                         }
394                 }
395                 if (thp < 0) {
396                         ret = madvise(buf, bytes, MADV_NOHUGEPAGE);
397                         if (ret && !g->print_once) {
398                                 g->print_once = 1;
399                                 printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n");
400                         }
401                 }
402         }
403 
404         if (init_zero) {
405                 bzero(buf, bytes);
406         } else {
407                 /* Initialize random contents, different in each word: */
408                 if (init_random) {
409                         u64 *wbuf = (void *)buf;
410                         long off = rand();
411                         long i;
412 
413                         for (i = 0; i < bytes/8; i++)
414                                 wbuf[i] = i + off;
415                 }
416         }
417 
418         /* Align to 2MB boundary: */
419         buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1));
420 
421         /* Restore affinity: */
422         if (init_cpu0) {
423                 bind_to_cpumask(orig_mask);
424                 mempol_restore();
425         }
426 
427         return buf;
428 }
429 
430 static void free_data(void *data, ssize_t bytes)
431 {
432         int ret;
433 
434         if (!data)
435                 return;
436 
437         ret = munmap(data, bytes);
438         BUG_ON(ret);
439 }
440 
441 /*
442  * Create a shared memory buffer that can be shared between processes, zeroed:
443  */
444 static void * zalloc_shared_data(ssize_t bytes)
445 {
446         return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0,  g->p.thp, g->p.init_random);
447 }
448 
449 /*
450  * Create a shared memory buffer that can be shared between processes:
451  */
452 static void * setup_shared_data(ssize_t bytes)
453 {
454         return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
455 }
456 
457 /*
458  * Allocate process-local memory - this will either be shared between
459  * threads of this process, or only be accessed by this thread:
460  */
461 static void * setup_private_data(ssize_t bytes)
462 {
463         return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
464 }
465 
466 /*
467  * Return a process-shared (global) mutex:
468  */
469 static void init_global_mutex(pthread_mutex_t *mutex)
470 {
471         pthread_mutexattr_t attr;
472 
473         pthread_mutexattr_init(&attr);
474         pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
475         pthread_mutex_init(mutex, &attr);
476 }
477 
478 static int parse_cpu_list(const char *arg)
479 {
480         p0.cpu_list_str = strdup(arg);
481 
482         dprintf("got CPU list: {%s}\n", p0.cpu_list_str);
483 
484         return 0;
485 }
486 
487 static int parse_setup_cpu_list(void)
488 {
489         struct thread_data *td;
490         char *str0, *str;
491         int t;
492 
493         if (!g->p.cpu_list_str)
494                 return 0;
495 
496         dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
497 
498         str0 = str = strdup(g->p.cpu_list_str);
499         t = 0;
500 
501         BUG_ON(!str);
502 
503         tprintf("# binding tasks to CPUs:\n");
504         tprintf("#  ");
505 
506         while (true) {
507                 int bind_cpu, bind_cpu_0, bind_cpu_1;
508                 char *tok, *tok_end, *tok_step, *tok_len, *tok_mul;
509                 int bind_len;
510                 int step;
511                 int mul;
512 
513                 tok = strsep(&str, ",");
514                 if (!tok)
515                         break;
516 
517                 tok_end = strstr(tok, "-");
518 
519                 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
520                 if (!tok_end) {
521                         /* Single CPU specified: */
522                         bind_cpu_0 = bind_cpu_1 = atol(tok);
523                 } else {
524                         /* CPU range specified (for example: "5-11"): */
525                         bind_cpu_0 = atol(tok);
526                         bind_cpu_1 = atol(tok_end + 1);
527                 }
528 
529                 step = 1;
530                 tok_step = strstr(tok, "#");
531                 if (tok_step) {
532                         step = atol(tok_step + 1);
533                         BUG_ON(step <= 0 || step >= g->p.nr_cpus);
534                 }
535 
536                 /*
537                  * Mask length.
538                  * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4',
539                  * where the _4 means the next 4 CPUs are allowed.
540                  */
541                 bind_len = 1;
542                 tok_len = strstr(tok, "_");
543                 if (tok_len) {
544                         bind_len = atol(tok_len + 1);
545                         BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
546                 }
547 
548                 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
549                 mul = 1;
550                 tok_mul = strstr(tok, "x");
551                 if (tok_mul) {
552                         mul = atol(tok_mul + 1);
553                         BUG_ON(mul <= 0);
554                 }
555 
556                 dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
557 
558                 if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) {
559                         printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus);
560                         return -1;
561                 }
562 
563                 BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
564                 BUG_ON(bind_cpu_0 > bind_cpu_1);
565 
566                 for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
567                         int i;
568 
569                         for (i = 0; i < mul; i++) {
570                                 int cpu;
571 
572                                 if (t >= g->p.nr_tasks) {
573                                         printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu);
574                                         goto out;
575                                 }
576                                 td = g->threads + t;
577 
578                                 if (t)
579                                         tprintf(",");
580                                 if (bind_len > 1) {
581                                         tprintf("%2d/%d", bind_cpu, bind_len);
582                                 } else {
583                                         tprintf("%2d", bind_cpu);
584                                 }
585 
586                                 CPU_ZERO(&td->bind_cpumask);
587                                 for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
588                                         BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
589                                         CPU_SET(cpu, &td->bind_cpumask);
590                                 }
591                                 t++;
592                         }
593                 }
594         }
595 out:
596 
597         tprintf("\n");
598 
599         if (t < g->p.nr_tasks)
600                 printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
601 
602         free(str0);
603         return 0;
604 }
605 
606 static int parse_cpus_opt(const struct option *opt __maybe_unused,
607                           const char *arg, int unset __maybe_unused)
608 {
609         if (!arg)
610                 return -1;
611 
612         return parse_cpu_list(arg);
613 }
614 
615 static int parse_node_list(const char *arg)
616 {
617         p0.node_list_str = strdup(arg);
618 
619         dprintf("got NODE list: {%s}\n", p0.node_list_str);
620 
621         return 0;
622 }
623 
624 static int parse_setup_node_list(void)
625 {
626         struct thread_data *td;
627         char *str0, *str;
628         int t;
629 
630         if (!g->p.node_list_str)
631                 return 0;
632 
633         dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
634 
635         str0 = str = strdup(g->p.node_list_str);
636         t = 0;
637 
638         BUG_ON(!str);
639 
640         tprintf("# binding tasks to NODEs:\n");
641         tprintf("# ");
642 
643         while (true) {
644                 int bind_node, bind_node_0, bind_node_1;
645                 char *tok, *tok_end, *tok_step, *tok_mul;
646                 int step;
647                 int mul;
648 
649                 tok = strsep(&str, ",");
650                 if (!tok)
651                         break;
652 
653                 tok_end = strstr(tok, "-");
654 
655                 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
656                 if (!tok_end) {
657                         /* Single NODE specified: */
658                         bind_node_0 = bind_node_1 = atol(tok);
659                 } else {
660                         /* NODE range specified (for example: "5-11"): */
661                         bind_node_0 = atol(tok);
662                         bind_node_1 = atol(tok_end + 1);
663                 }
664 
665                 step = 1;
666                 tok_step = strstr(tok, "#");
667                 if (tok_step) {
668                         step = atol(tok_step + 1);
669                         BUG_ON(step <= 0 || step >= g->p.nr_nodes);
670                 }
671 
672                 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
673                 mul = 1;
674                 tok_mul = strstr(tok, "x");
675                 if (tok_mul) {
676                         mul = atol(tok_mul + 1);
677                         BUG_ON(mul <= 0);
678                 }
679 
680                 dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
681 
682                 if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) {
683                         printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes);
684                         return -1;
685                 }
686 
687                 BUG_ON(bind_node_0 < 0 || bind_node_1 < 0);
688                 BUG_ON(bind_node_0 > bind_node_1);
689 
690                 for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
691                         int i;
692 
693                         for (i = 0; i < mul; i++) {
694                                 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
695                                         printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
696                                         goto out;
697                                 }
698                                 td = g->threads + t;
699 
700                                 if (!t)
701                                         tprintf(" %2d", bind_node);
702                                 else
703                                         tprintf(",%2d", bind_node);
704 
705                                 td->bind_node = bind_node;
706                                 t++;
707                         }
708                 }
709         }
710 out:
711 
712         tprintf("\n");
713 
714         if (t < g->p.nr_tasks)
715                 printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
716 
717         free(str0);
718         return 0;
719 }
720 
721 static int parse_nodes_opt(const struct option *opt __maybe_unused,
722                           const char *arg, int unset __maybe_unused)
723 {
724         if (!arg)
725                 return -1;
726 
727         return parse_node_list(arg);
728 
729         return 0;
730 }
731 
732 #define BIT(x) (1ul << x)
733 
734 static inline uint32_t lfsr_32(uint32_t lfsr)
735 {
736         const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
737         return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps);
738 }
739 
740 /*
741  * Make sure there's real data dependency to RAM (when read
742  * accesses are enabled), so the compiler, the CPU and the
743  * kernel (KSM, zero page, etc.) cannot optimize away RAM
744  * accesses:
745  */
746 static inline u64 access_data(u64 *data, u64 val)
747 {
748         if (g->p.data_reads)
749                 val += *data;
750         if (g->p.data_writes)
751                 *data = val + 1;
752         return val;
753 }
754 
755 /*
756  * The worker process does two types of work, a forwards going
757  * loop and a backwards going loop.
758  *
759  * We do this so that on multiprocessor systems we do not create
760  * a 'train' of processing, with highly synchronized processes,
761  * skewing the whole benchmark.
762  */
763 static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val)
764 {
765         long words = bytes/sizeof(u64);
766         u64 *data = (void *)__data;
767         long chunk_0, chunk_1;
768         u64 *d0, *d, *d1;
769         long off;
770         long i;
771 
772         BUG_ON(!data && words);
773         BUG_ON(data && !words);
774 
775         if (!data)
776                 return val;
777 
778         /* Very simple memset() work variant: */
779         if (g->p.data_zero_memset && !g->p.data_rand_walk) {
780                 bzero(data, bytes);
781                 return val;
782         }
783 
784         /* Spread out by PID/TID nr and by loop nr: */
785         chunk_0 = words/nr_max;
786         chunk_1 = words/g->p.nr_loops;
787         off = nr*chunk_0 + loop*chunk_1;
788 
789         while (off >= words)
790                 off -= words;
791 
792         if (g->p.data_rand_walk) {
793                 u32 lfsr = nr + loop + val;
794                 int j;
795 
796                 for (i = 0; i < words/1024; i++) {
797                         long start, end;
798 
799                         lfsr = lfsr_32(lfsr);
800 
801                         start = lfsr % words;
802                         end = min(start + 1024, words-1);
803 
804                         if (g->p.data_zero_memset) {
805                                 bzero(data + start, (end-start) * sizeof(u64));
806                         } else {
807                                 for (j = start; j < end; j++)
808                                         val = access_data(data + j, val);
809                         }
810                 }
811         } else if (!g->p.data_backwards || (nr + loop) & 1) {
812 
813                 d0 = data + off;
814                 d  = data + off + 1;
815                 d1 = data + words;
816 
817                 /* Process data forwards: */
818                 for (;;) {
819                         if (unlikely(d >= d1))
820                                 d = data;
821                         if (unlikely(d == d0))
822                                 break;
823 
824                         val = access_data(d, val);
825 
826                         d++;
827                 }
828         } else {
829                 /* Process data backwards: */
830 
831                 d0 = data + off;
832                 d  = data + off - 1;
833                 d1 = data + words;
834 
835                 /* Process data forwards: */
836                 for (;;) {
837                         if (unlikely(d < data))
838                                 d = data + words-1;
839                         if (unlikely(d == d0))
840                                 break;
841 
842                         val = access_data(d, val);
843 
844                         d--;
845                 }
846         }
847 
848         return val;
849 }
850 
851 static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
852 {
853         unsigned int cpu;
854 
855         cpu = sched_getcpu();
856 
857         g->threads[task_nr].curr_cpu = cpu;
858         prctl(0, bytes_worked);
859 }
860 
861 #define MAX_NR_NODES    64
862 
863 /*
864  * Count the number of nodes a process's threads
865  * are spread out on.
866  *
867  * A count of 1 means that the process is compressed
868  * to a single node. A count of g->p.nr_nodes means it's
869  * spread out on the whole system.
870  */
871 static int count_process_nodes(int process_nr)
872 {
873         char node_present[MAX_NR_NODES] = { 0, };
874         int nodes;
875         int n, t;
876 
877         for (t = 0; t < g->p.nr_threads; t++) {
878                 struct thread_data *td;
879                 int task_nr;
880                 int node;
881 
882                 task_nr = process_nr*g->p.nr_threads + t;
883                 td = g->threads + task_nr;
884 
885                 node = numa_node_of_cpu(td->curr_cpu);
886                 if (node < 0) /* curr_cpu was likely still -1 */
887                         return 0;
888 
889                 node_present[node] = 1;
890         }
891 
892         nodes = 0;
893 
894         for (n = 0; n < MAX_NR_NODES; n++)
895                 nodes += node_present[n];
896 
897         return nodes;
898 }
899 
900 /*
901  * Count the number of distinct process-threads a node contains.
902  *
903  * A count of 1 means that the node contains only a single
904  * process. If all nodes on the system contain at most one
905  * process then we are well-converged.
906  */
907 static int count_node_processes(int node)
908 {
909         int processes = 0;
910         int t, p;
911 
912         for (p = 0; p < g->p.nr_proc; p++) {
913                 for (t = 0; t < g->p.nr_threads; t++) {
914                         struct thread_data *td;
915                         int task_nr;
916                         int n;
917 
918                         task_nr = p*g->p.nr_threads + t;
919                         td = g->threads + task_nr;
920 
921                         n = numa_node_of_cpu(td->curr_cpu);
922                         if (n == node) {
923                                 processes++;
924                                 break;
925                         }
926                 }
927         }
928 
929         return processes;
930 }
931 
932 static void calc_convergence_compression(int *strong)
933 {
934         unsigned int nodes_min, nodes_max;
935         int p;
936 
937         nodes_min = -1;
938         nodes_max =  0;
939 
940         for (p = 0; p < g->p.nr_proc; p++) {
941                 unsigned int nodes = count_process_nodes(p);
942 
943                 if (!nodes) {
944                         *strong = 0;
945                         return;
946                 }
947 
948                 nodes_min = min(nodes, nodes_min);
949                 nodes_max = max(nodes, nodes_max);
950         }
951 
952         /* Strong convergence: all threads compress on a single node: */
953         if (nodes_min == 1 && nodes_max == 1) {
954                 *strong = 1;
955         } else {
956                 *strong = 0;
957                 tprintf(" {%d-%d}", nodes_min, nodes_max);
958         }
959 }
960 
961 static void calc_convergence(double runtime_ns_max, double *convergence)
962 {
963         unsigned int loops_done_min, loops_done_max;
964         int process_groups;
965         int nodes[MAX_NR_NODES];
966         int distance;
967         int nr_min;
968         int nr_max;
969         int strong;
970         int sum;
971         int nr;
972         int node;
973         int cpu;
974         int t;
975 
976         if (!g->p.show_convergence && !g->p.measure_convergence)
977                 return;
978 
979         for (node = 0; node < g->p.nr_nodes; node++)
980                 nodes[node] = 0;
981 
982         loops_done_min = -1;
983         loops_done_max = 0;
984 
985         for (t = 0; t < g->p.nr_tasks; t++) {
986                 struct thread_data *td = g->threads + t;
987                 unsigned int loops_done;
988 
989                 cpu = td->curr_cpu;
990 
991                 /* Not all threads have written it yet: */
992                 if (cpu < 0)
993                         continue;
994 
995                 node = numa_node_of_cpu(cpu);
996 
997                 nodes[node]++;
998 
999                 loops_done = td->loops_done;
1000                 loops_done_min = min(loops_done, loops_done_min);
1001                 loops_done_max = max(loops_done, loops_done_max);
1002         }
1003 
1004         nr_max = 0;
1005         nr_min = g->p.nr_tasks;
1006         sum = 0;
1007 
1008         for (node = 0; node < g->p.nr_nodes; node++) {
1009                 if (!is_node_present(node))
1010                         continue;
1011                 nr = nodes[node];
1012                 nr_min = min(nr, nr_min);
1013                 nr_max = max(nr, nr_max);
1014                 sum += nr;
1015         }
1016         BUG_ON(nr_min > nr_max);
1017 
1018         BUG_ON(sum > g->p.nr_tasks);
1019 
1020         if (0 && (sum < g->p.nr_tasks))
1021                 return;
1022 
1023         /*
1024          * Count the number of distinct process groups present
1025          * on nodes - when we are converged this will decrease
1026          * to g->p.nr_proc:
1027          */
1028         process_groups = 0;
1029 
1030         for (node = 0; node < g->p.nr_nodes; node++) {
1031                 int processes;
1032 
1033                 if (!is_node_present(node))
1034                         continue;
1035                 processes = count_node_processes(node);
1036                 nr = nodes[node];
1037                 tprintf(" %2d/%-2d", nr, processes);
1038 
1039                 process_groups += processes;
1040         }
1041 
1042         distance = nr_max - nr_min;
1043 
1044         tprintf(" [%2d/%-2d]", distance, process_groups);
1045 
1046         tprintf(" l:%3d-%-3d (%3d)",
1047                 loops_done_min, loops_done_max, loops_done_max-loops_done_min);
1048 
1049         if (loops_done_min && loops_done_max) {
1050                 double skew = 1.0 - (double)loops_done_min/loops_done_max;
1051 
1052                 tprintf(" [%4.1f%%]", skew * 100.0);
1053         }
1054 
1055         calc_convergence_compression(&strong);
1056 
1057         if (strong && process_groups == g->p.nr_proc) {
1058                 if (!*convergence) {
1059                         *convergence = runtime_ns_max;
1060                         tprintf(" (%6.1fs converged)\n", *convergence / NSEC_PER_SEC);
1061                         if (g->p.measure_convergence) {
1062                                 g->all_converged = true;
1063                                 g->stop_work = true;
1064                         }
1065                 }
1066         } else {
1067                 if (*convergence) {
1068                         tprintf(" (%6.1fs de-converged)", runtime_ns_max / NSEC_PER_SEC);
1069                         *convergence = 0;
1070                 }
1071                 tprintf("\n");
1072         }
1073 }
1074 
1075 static void show_summary(double runtime_ns_max, int l, double *convergence)
1076 {
1077         tprintf("\r #  %5.1f%%  [%.1f mins]",
1078                 (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0);
1079 
1080         calc_convergence(runtime_ns_max, convergence);
1081 
1082         if (g->p.show_details >= 0)
1083                 fflush(stdout);
1084 }
1085 
1086 static void *worker_thread(void *__tdata)
1087 {
1088         struct thread_data *td = __tdata;
1089         struct timeval start0, start, stop, diff;
1090         int process_nr = td->process_nr;
1091         int thread_nr = td->thread_nr;
1092         unsigned long last_perturbance;
1093         int task_nr = td->task_nr;
1094         int details = g->p.show_details;
1095         int first_task, last_task;
1096         double convergence = 0;
1097         u64 val = td->val;
1098         double runtime_ns_max;
1099         u8 *global_data;
1100         u8 *process_data;
1101         u8 *thread_data;
1102         u64 bytes_done, secs;
1103         long work_done;
1104         u32 l;
1105         struct rusage rusage;
1106 
1107         bind_to_cpumask(td->bind_cpumask);
1108         bind_to_memnode(td->bind_node);
1109 
1110         set_taskname("thread %d/%d", process_nr, thread_nr);
1111 
1112         global_data = g->data;
1113         process_data = td->process_data;
1114         thread_data = setup_private_data(g->p.bytes_thread);
1115 
1116         bytes_done = 0;
1117 
1118         last_task = 0;
1119         if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
1120                 last_task = 1;
1121 
1122         first_task = 0;
1123         if (process_nr == 0 && thread_nr == 0)
1124                 first_task = 1;
1125 
1126         if (details >= 2) {
1127                 printf("#  thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n",
1128                         process_nr, thread_nr, global_data, process_data, thread_data);
1129         }
1130 
1131         if (g->p.serialize_startup) {
1132                 pthread_mutex_lock(&g->startup_mutex);
1133                 g->nr_tasks_started++;
1134                 pthread_mutex_unlock(&g->startup_mutex);
1135 
1136                 /* Here we will wait for the main process to start us all at once: */
1137                 pthread_mutex_lock(&g->start_work_mutex);
1138                 g->nr_tasks_working++;
1139 
1140                 /* Last one wake the main process: */
1141                 if (g->nr_tasks_working == g->p.nr_tasks)
1142                         pthread_mutex_unlock(&g->startup_done_mutex);
1143 
1144                 pthread_mutex_unlock(&g->start_work_mutex);
1145         }
1146 
1147         gettimeofday(&start0, NULL);
1148 
1149         start = stop = start0;
1150         last_perturbance = start.tv_sec;
1151 
1152         for (l = 0; l < g->p.nr_loops; l++) {
1153                 start = stop;
1154 
1155                 if (g->stop_work)
1156                         break;
1157 
1158                 val += do_work(global_data,  g->p.bytes_global,  process_nr, g->p.nr_proc,      l, val);
1159                 val += do_work(process_data, g->p.bytes_process, thread_nr,  g->p.nr_threads,   l, val);
1160                 val += do_work(thread_data,  g->p.bytes_thread,  0,          1,         l, val);
1161 
1162                 if (g->p.sleep_usecs) {
1163                         pthread_mutex_lock(td->process_lock);
1164                         usleep(g->p.sleep_usecs);
1165                         pthread_mutex_unlock(td->process_lock);
1166                 }
1167                 /*
1168                  * Amount of work to be done under a process-global lock:
1169                  */
1170                 if (g->p.bytes_process_locked) {
1171                         pthread_mutex_lock(td->process_lock);
1172                         val += do_work(process_data, g->p.bytes_process_locked, thread_nr,  g->p.nr_threads,    l, val);
1173                         pthread_mutex_unlock(td->process_lock);
1174                 }
1175 
1176                 work_done = g->p.bytes_global + g->p.bytes_process +
1177                             g->p.bytes_process_locked + g->p.bytes_thread;
1178 
1179                 update_curr_cpu(task_nr, work_done);
1180                 bytes_done += work_done;
1181 
1182                 if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
1183                         continue;
1184 
1185                 td->loops_done = l;
1186 
1187                 gettimeofday(&stop, NULL);
1188 
1189                 /* Check whether our max runtime timed out: */
1190                 if (g->p.nr_secs) {
1191                         timersub(&stop, &start0, &diff);
1192                         if ((u32)diff.tv_sec >= g->p.nr_secs) {
1193                                 g->stop_work = true;
1194                                 break;
1195                         }
1196                 }
1197 
1198                 /* Update the summary at most once per second: */
1199                 if (start.tv_sec == stop.tv_sec)
1200                         continue;
1201 
1202                 /*
1203                  * Perturb the first task's equilibrium every g->p.perturb_secs seconds,
1204                  * by migrating to CPU#0:
1205                  */
1206                 if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
1207                         cpu_set_t orig_mask;
1208                         int target_cpu;
1209                         int this_cpu;
1210 
1211                         last_perturbance = stop.tv_sec;
1212 
1213                         /*
1214                          * Depending on where we are running, move into
1215                          * the other half of the system, to create some
1216                          * real disturbance:
1217                          */
1218                         this_cpu = g->threads[task_nr].curr_cpu;
1219                         if (this_cpu < g->p.nr_cpus/2)
1220                                 target_cpu = g->p.nr_cpus-1;
1221                         else
1222                                 target_cpu = 0;
1223 
1224                         orig_mask = bind_to_cpu(target_cpu);
1225 
1226                         /* Here we are running on the target CPU already */
1227                         if (details >= 1)
1228                                 printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
1229 
1230                         bind_to_cpumask(orig_mask);
1231                 }
1232 
1233                 if (details >= 3) {
1234                         timersub(&stop, &start, &diff);
1235                         runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1236                         runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1237 
1238                         if (details >= 0) {
1239                                 printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n",
1240                                         process_nr, thread_nr, runtime_ns_max / bytes_done, val);
1241                         }
1242                         fflush(stdout);
1243                 }
1244                 if (!last_task)
1245                         continue;
1246 
1247                 timersub(&stop, &start0, &diff);
1248                 runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1249                 runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1250 
1251                 show_summary(runtime_ns_max, l, &convergence);
1252         }
1253 
1254         gettimeofday(&stop, NULL);
1255         timersub(&stop, &start0, &diff);
1256         td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
1257         td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
1258         secs = td->runtime_ns / NSEC_PER_SEC;
1259         td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
1260 
1261         getrusage(RUSAGE_THREAD, &rusage);
1262         td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
1263         td->system_time_ns += rusage.ru_stime.tv_usec * NSEC_PER_USEC;
1264         td->user_time_ns = rusage.ru_utime.tv_sec * NSEC_PER_SEC;
1265         td->user_time_ns += rusage.ru_utime.tv_usec * NSEC_PER_USEC;
1266 
1267         free_data(thread_data, g->p.bytes_thread);
1268 
1269         pthread_mutex_lock(&g->stop_work_mutex);
1270         g->bytes_done += bytes_done;
1271         pthread_mutex_unlock(&g->stop_work_mutex);
1272 
1273         return NULL;
1274 }
1275 
1276 /*
1277  * A worker process starts a couple of threads:
1278  */
1279 static void worker_process(int process_nr)
1280 {
1281         pthread_mutex_t process_lock;
1282         struct thread_data *td;
1283         pthread_t *pthreads;
1284         u8 *process_data;
1285         int task_nr;
1286         int ret;
1287         int t;
1288 
1289         pthread_mutex_init(&process_lock, NULL);
1290         set_taskname("process %d", process_nr);
1291 
1292         /*
1293          * Pick up the memory policy and the CPU binding of our first thread,
1294          * so that we initialize memory accordingly:
1295          */
1296         task_nr = process_nr*g->p.nr_threads;
1297         td = g->threads + task_nr;
1298 
1299         bind_to_memnode(td->bind_node);
1300         bind_to_cpumask(td->bind_cpumask);
1301 
1302         pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
1303         process_data = setup_private_data(g->p.bytes_process);
1304 
1305         if (g->p.show_details >= 3) {
1306                 printf(" # process %2d global mem: %p, process mem: %p\n",
1307                         process_nr, g->data, process_data);
1308         }
1309 
1310         for (t = 0; t < g->p.nr_threads; t++) {
1311                 task_nr = process_nr*g->p.nr_threads + t;
1312                 td = g->threads + task_nr;
1313 
1314                 td->process_data = process_data;
1315                 td->process_nr   = process_nr;
1316                 td->thread_nr    = t;
1317                 td->task_nr      = task_nr;
1318                 td->val          = rand();
1319                 td->curr_cpu     = -1;
1320                 td->process_lock = &process_lock;
1321 
1322                 ret = pthread_create(pthreads + t, NULL, worker_thread, td);
1323                 BUG_ON(ret);
1324         }
1325 
1326         for (t = 0; t < g->p.nr_threads; t++) {
1327                 ret = pthread_join(pthreads[t], NULL);
1328                 BUG_ON(ret);
1329         }
1330 
1331         free_data(process_data, g->p.bytes_process);
1332         free(pthreads);
1333 }
1334 
1335 static void print_summary(void)
1336 {
1337         if (g->p.show_details < 0)
1338                 return;
1339 
1340         printf("\n ###\n");
1341         printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1342                 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
1343         printf(" #      %5dx %5ldMB global  shared mem operations\n",
1344                         g->p.nr_loops, g->p.bytes_global/1024/1024);
1345         printf(" #      %5dx %5ldMB process shared mem operations\n",
1346                         g->p.nr_loops, g->p.bytes_process/1024/1024);
1347         printf(" #      %5dx %5ldMB thread  local  mem operations\n",
1348                         g->p.nr_loops, g->p.bytes_thread/1024/1024);
1349 
1350         printf(" ###\n");
1351 
1352         printf("\n ###\n"); fflush(stdout);
1353 }
1354 
1355 static void init_thread_data(void)
1356 {
1357         ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1358         int t;
1359 
1360         g->threads = zalloc_shared_data(size);
1361 
1362         for (t = 0; t < g->p.nr_tasks; t++) {
1363                 struct thread_data *td = g->threads + t;
1364                 int cpu;
1365 
1366                 /* Allow all nodes by default: */
1367                 td->bind_node = NUMA_NO_NODE;
1368 
1369                 /* Allow all CPUs by default: */
1370                 CPU_ZERO(&td->bind_cpumask);
1371                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
1372                         CPU_SET(cpu, &td->bind_cpumask);
1373         }
1374 }
1375 
1376 static void deinit_thread_data(void)
1377 {
1378         ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1379 
1380         free_data(g->threads, size);
1381 }
1382 
1383 static int init(void)
1384 {
1385         g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0);
1386 
1387         /* Copy over options: */
1388         g->p = p0;
1389 
1390         g->p.nr_cpus = numa_num_configured_cpus();
1391 
1392         g->p.nr_nodes = numa_max_node() + 1;
1393 
1394         /* char array in count_process_nodes(): */
1395         BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
1396 
1397         if (g->p.show_quiet && !g->p.show_details)
1398                 g->p.show_details = -1;
1399 
1400         /* Some memory should be specified: */
1401         if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
1402                 return -1;
1403 
1404         if (g->p.mb_global_str) {
1405                 g->p.mb_global = atof(g->p.mb_global_str);
1406                 BUG_ON(g->p.mb_global < 0);
1407         }
1408 
1409         if (g->p.mb_proc_str) {
1410                 g->p.mb_proc = atof(g->p.mb_proc_str);
1411                 BUG_ON(g->p.mb_proc < 0);
1412         }
1413 
1414         if (g->p.mb_proc_locked_str) {
1415                 g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
1416                 BUG_ON(g->p.mb_proc_locked < 0);
1417                 BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
1418         }
1419 
1420         if (g->p.mb_thread_str) {
1421                 g->p.mb_thread = atof(g->p.mb_thread_str);
1422                 BUG_ON(g->p.mb_thread < 0);
1423         }
1424 
1425         BUG_ON(g->p.nr_threads <= 0);
1426         BUG_ON(g->p.nr_proc <= 0);
1427 
1428         g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
1429 
1430         g->p.bytes_global               = g->p.mb_global        *1024L*1024L;
1431         g->p.bytes_process              = g->p.mb_proc          *1024L*1024L;
1432         g->p.bytes_process_locked       = g->p.mb_proc_locked   *1024L*1024L;
1433         g->p.bytes_thread               = g->p.mb_thread        *1024L*1024L;
1434 
1435         g->data = setup_shared_data(g->p.bytes_global);
1436 
1437         /* Startup serialization: */
1438         init_global_mutex(&g->start_work_mutex);
1439         init_global_mutex(&g->startup_mutex);
1440         init_global_mutex(&g->startup_done_mutex);
1441         init_global_mutex(&g->stop_work_mutex);
1442 
1443         init_thread_data();
1444 
1445         tprintf("#\n");
1446         if (parse_setup_cpu_list() || parse_setup_node_list())
1447                 return -1;
1448         tprintf("#\n");
1449 
1450         print_summary();
1451 
1452         return 0;
1453 }
1454 
1455 static void deinit(void)
1456 {
1457         free_data(g->data, g->p.bytes_global);
1458         g->data = NULL;
1459 
1460         deinit_thread_data();
1461 
1462         free_data(g, sizeof(*g));
1463         g = NULL;
1464 }
1465 
1466 /*
1467  * Print a short or long result, depending on the verbosity setting:
1468  */
1469 static void print_res(const char *name, double val,
1470                       const char *txt_unit, const char *txt_short, const char *txt_long)
1471 {
1472         if (!name)
1473                 name = "main,";
1474 
1475         if (!g->p.show_quiet)
1476                 printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
1477         else
1478                 printf(" %14.3f %s\n", val, txt_long);
1479 }
1480 
1481 static int __bench_numa(const char *name)
1482 {
1483         struct timeval start, stop, diff;
1484         u64 runtime_ns_min, runtime_ns_sum;
1485         pid_t *pids, pid, wpid;
1486         double delta_runtime;
1487         double runtime_avg;
1488         double runtime_sec_max;
1489         double runtime_sec_min;
1490         int wait_stat;
1491         double bytes;
1492         int i, t, p;
1493 
1494         if (init())
1495                 return -1;
1496 
1497         pids = zalloc(g->p.nr_proc * sizeof(*pids));
1498         pid = -1;
1499 
1500         /* All threads try to acquire it, this way we can wait for them to start up: */
1501         pthread_mutex_lock(&g->start_work_mutex);
1502 
1503         if (g->p.serialize_startup) {
1504                 tprintf(" #\n");
1505                 tprintf(" # Startup synchronization: ..."); fflush(stdout);
1506         }
1507 
1508         gettimeofday(&start, NULL);
1509 
1510         for (i = 0; i < g->p.nr_proc; i++) {
1511                 pid = fork();
1512                 dprintf(" # process %2d: PID %d\n", i, pid);
1513 
1514                 BUG_ON(pid < 0);
1515                 if (!pid) {
1516                         /* Child process: */
1517                         worker_process(i);
1518 
1519                         exit(0);
1520                 }
1521                 pids[i] = pid;
1522 
1523         }
1524         /* Wait for all the threads to start up: */
1525         while (g->nr_tasks_started != g->p.nr_tasks)
1526                 usleep(USEC_PER_MSEC);
1527 
1528         BUG_ON(g->nr_tasks_started != g->p.nr_tasks);
1529 
1530         if (g->p.serialize_startup) {
1531                 double startup_sec;
1532 
1533                 pthread_mutex_lock(&g->startup_done_mutex);
1534 
1535                 /* This will start all threads: */
1536                 pthread_mutex_unlock(&g->start_work_mutex);
1537 
1538                 /* This mutex is locked - the last started thread will wake us: */
1539                 pthread_mutex_lock(&g->startup_done_mutex);
1540 
1541                 gettimeofday(&stop, NULL);
1542 
1543                 timersub(&stop, &start, &diff);
1544 
1545                 startup_sec = diff.tv_sec * NSEC_PER_SEC;
1546                 startup_sec += diff.tv_usec * NSEC_PER_USEC;
1547                 startup_sec /= NSEC_PER_SEC;
1548 
1549                 tprintf(" threads initialized in %.6f seconds.\n", startup_sec);
1550                 tprintf(" #\n");
1551 
1552                 start = stop;
1553                 pthread_mutex_unlock(&g->startup_done_mutex);
1554         } else {
1555                 gettimeofday(&start, NULL);
1556         }
1557 
1558         /* Parent process: */
1559 
1560 
1561         for (i = 0; i < g->p.nr_proc; i++) {
1562                 wpid = waitpid(pids[i], &wait_stat, 0);
1563                 BUG_ON(wpid < 0);
1564                 BUG_ON(!WIFEXITED(wait_stat));
1565 
1566         }
1567 
1568         runtime_ns_sum = 0;
1569         runtime_ns_min = -1LL;
1570 
1571         for (t = 0; t < g->p.nr_tasks; t++) {
1572                 u64 thread_runtime_ns = g->threads[t].runtime_ns;
1573 
1574                 runtime_ns_sum += thread_runtime_ns;
1575                 runtime_ns_min = min(thread_runtime_ns, runtime_ns_min);
1576         }
1577 
1578         gettimeofday(&stop, NULL);
1579         timersub(&stop, &start, &diff);
1580 
1581         BUG_ON(bench_format != BENCH_FORMAT_DEFAULT);
1582 
1583         tprintf("\n ###\n");
1584         tprintf("\n");
1585 
1586         runtime_sec_max = diff.tv_sec * NSEC_PER_SEC;
1587         runtime_sec_max += diff.tv_usec * NSEC_PER_USEC;
1588         runtime_sec_max /= NSEC_PER_SEC;
1589 
1590         runtime_sec_min = runtime_ns_min / NSEC_PER_SEC;
1591 
1592         bytes = g->bytes_done;
1593         runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC;
1594 
1595         if (g->p.measure_convergence) {
1596                 print_res(name, runtime_sec_max,
1597                         "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge");
1598         }
1599 
1600         print_res(name, runtime_sec_max,
1601                 "secs,", "runtime-max/thread",  "secs slowest (max) thread-runtime");
1602 
1603         print_res(name, runtime_sec_min,
1604                 "secs,", "runtime-min/thread",  "secs fastest (min) thread-runtime");
1605 
1606         print_res(name, runtime_avg,
1607                 "secs,", "runtime-avg/thread",  "secs average thread-runtime");
1608 
1609         delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0;
1610         print_res(name, delta_runtime / runtime_sec_max * 100.0,
1611                 "%,", "spread-runtime/thread",  "% difference between max/avg runtime");
1612 
1613         print_res(name, bytes / g->p.nr_tasks / 1e9,
1614                 "GB,", "data/thread",           "GB data processed, per thread");
1615 
1616         print_res(name, bytes / 1e9,
1617                 "GB,", "data-total",            "GB data processed, total");
1618 
1619         print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks),
1620                 "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime");
1621 
1622         print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
1623                 "GB/sec,", "thread-speed",      "GB/sec/thread speed");
1624 
1625         print_res(name, bytes / runtime_sec_max / 1e9,
1626                 "GB/sec,", "total-speed",       "GB/sec total speed");
1627 
1628         if (g->p.show_details >= 2) {
1629                 char tname[14 + 2 * 10 + 1];
1630                 struct thread_data *td;
1631                 for (p = 0; p < g->p.nr_proc; p++) {
1632                         for (t = 0; t < g->p.nr_threads; t++) {
1633                                 memset(tname, 0, sizeof(tname));
1634                                 td = g->threads + p*g->p.nr_threads + t;
1635                                 snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
1636                                 print_res(tname, td->speed_gbs,
1637                                         "GB/sec",       "thread-speed", "GB/sec/thread speed");
1638                                 print_res(tname, td->system_time_ns / NSEC_PER_SEC,
1639                                         "secs", "thread-system-time", "system CPU time/thread");
1640                                 print_res(tname, td->user_time_ns / NSEC_PER_SEC,
1641                                         "secs", "thread-user-time", "user CPU time/thread");
1642                         }
1643                 }
1644         }
1645 
1646         free(pids);
1647 
1648         deinit();
1649 
1650         return 0;
1651 }
1652 
1653 #define MAX_ARGS 50
1654 
1655 static int command_size(const char **argv)
1656 {
1657         int size = 0;
1658 
1659         while (*argv) {
1660                 size++;
1661                 argv++;
1662         }
1663 
1664         BUG_ON(size >= MAX_ARGS);
1665 
1666         return size;
1667 }
1668 
1669 static void init_params(struct params *p, const char *name, int argc, const char **argv)
1670 {
1671         int i;
1672 
1673         printf("\n # Running %s \"perf bench numa", name);
1674 
1675         for (i = 0; i < argc; i++)
1676                 printf(" %s", argv[i]);
1677 
1678         printf("\"\n");
1679 
1680         memset(p, 0, sizeof(*p));
1681 
1682         /* Initialize nonzero defaults: */
1683 
1684         p->serialize_startup            = 1;
1685         p->data_reads                   = true;
1686         p->data_writes                  = true;
1687         p->data_backwards               = true;
1688         p->data_rand_walk               = true;
1689         p->nr_loops                     = -1;
1690         p->init_random                  = true;
1691         p->mb_global_str                = "1";
1692         p->nr_proc                      = 1;
1693         p->nr_threads                   = 1;
1694         p->nr_secs                      = 5;
1695         p->run_all                      = argc == 1;
1696 }
1697 
1698 static int run_bench_numa(const char *name, const char **argv)
1699 {
1700         int argc = command_size(argv);
1701 
1702         init_params(&p0, name, argc, argv);
1703         argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1704         if (argc)
1705                 goto err;
1706 
1707         if (__bench_numa(name))
1708                 goto err;
1709 
1710         return 0;
1711 
1712 err:
1713         return -1;
1714 }
1715 
1716 #define OPT_BW_RAM              "-s",  "20", "-zZq",    "--thp", " 1", "--no-data_rand_walk"
1717 #define OPT_BW_RAM_NOTHP        OPT_BW_RAM,             "--thp", "-1"
1718 
1719 #define OPT_CONV                "-s", "100", "-zZ0qcm", "--thp", " 1"
1720 #define OPT_CONV_NOTHP          OPT_CONV,               "--thp", "-1"
1721 
1722 #define OPT_BW                  "-s",  "20", "-zZ0q",   "--thp", " 1"
1723 #define OPT_BW_NOTHP            OPT_BW,                 "--thp", "-1"
1724 
1725 /*
1726  * The built-in test-suite executed by "perf bench numa -a".
1727  *
1728  * (A minimum of 4 nodes and 16 GB of RAM is recommended.)
1729  */
1730 static const char *tests[][MAX_ARGS] = {
1731    /* Basic single-stream NUMA bandwidth measurements: */
1732    { "RAM-bw-local,",     "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1733                           "-C" ,   "", "-M",   "", OPT_BW_RAM },
1734    { "RAM-bw-local-NOTHP,",
1735                           "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1736                           "-C" ,   "", "-M",   "", OPT_BW_RAM_NOTHP },
1737    { "RAM-bw-remote,",    "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1738                           "-C" ,   "", "-M",   "1", OPT_BW_RAM },
1739 
1740    /* 2-stream NUMA bandwidth measurements: */
1741    { "RAM-bw-local-2x,",  "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1742                            "-C", "0,2", "-M", "0x2", OPT_BW_RAM },
1743    { "RAM-bw-remote-2x,", "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1744                            "-C", "0,2", "-M", "1x2", OPT_BW_RAM },
1745 
1746    /* Cross-stream NUMA bandwidth measurement: */
1747    { "RAM-bw-cross,",     "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1748                            "-C", "0,8", "-M", "1,0", OPT_BW_RAM },
1749 
1750    /* Convergence latency measurements: */
1751    { " 1x3-convergence,", "mem",  "-p",  "1", "-t",  "3", "-P",  "512", OPT_CONV },
1752    { " 1x4-convergence,", "mem",  "-p",  "1", "-t",  "4", "-P",  "512", OPT_CONV },
1753    { " 1x6-convergence,", "mem",  "-p",  "1", "-t",  "6", "-P", "1020", OPT_CONV },
1754    { " 2x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1755    { " 3x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1756    { " 4x4-convergence,", "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV },
1757    { " 4x4-convergence-NOTHP,",
1758                           "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1759    { " 4x6-convergence,", "mem",  "-p",  "4", "-t",  "6", "-P", "1020", OPT_CONV },
1760    { " 4x8-convergence,", "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_CONV },
1761    { " 8x4-convergence,", "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV },
1762    { " 8x4-convergence-NOTHP,",
1763                           "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1764    { " 3x1-convergence,", "mem",  "-p",  "3", "-t",  "1", "-P",  "512", OPT_CONV },
1765    { " 4x1-convergence,", "mem",  "-p",  "4", "-t",  "1", "-P",  "512", OPT_CONV },
1766    { " 8x1-convergence,", "mem",  "-p",  "8", "-t",  "1", "-P",  "512", OPT_CONV },
1767    { "16x1-convergence,", "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_CONV },
1768    { "32x1-convergence,", "mem",  "-p", "32", "-t",  "1", "-P",  "128", OPT_CONV },
1769 
1770    /* Various NUMA process/thread layout bandwidth measurements: */
1771    { " 2x1-bw-process,",  "mem",  "-p",  "2", "-t",  "1", "-P", "1024", OPT_BW },
1772    { " 3x1-bw-process,",  "mem",  "-p",  "3", "-t",  "1", "-P", "1024", OPT_BW },
1773    { " 4x1-bw-process,",  "mem",  "-p",  "4", "-t",  "1", "-P", "1024", OPT_BW },
1774    { " 8x1-bw-process,",  "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW },
1775    { " 8x1-bw-process-NOTHP,",
1776                           "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW_NOTHP },
1777    { "16x1-bw-process,",  "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_BW },
1778 
1779    { " 4x1-bw-thread,",   "mem",  "-p",  "1", "-t",  "4", "-T",  "256", OPT_BW },
1780    { " 8x1-bw-thread,",   "mem",  "-p",  "1", "-t",  "8", "-T",  "256", OPT_BW },
1781    { "16x1-bw-thread,",   "mem",  "-p",  "1", "-t", "16", "-T",  "128", OPT_BW },
1782    { "32x1-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-T",   "64", OPT_BW },
1783 
1784    { " 2x3-bw-thread,",   "mem",  "-p",  "2", "-t",  "3", "-P",  "512", OPT_BW },
1785    { " 4x4-bw-thread,",   "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_BW },
1786    { " 4x6-bw-thread,",   "mem",  "-p",  "4", "-t",  "6", "-P",  "512", OPT_BW },
1787    { " 4x8-bw-thread,",   "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW },
1788    { " 4x8-bw-thread-NOTHP,",
1789                           "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW_NOTHP },
1790    { " 3x3-bw-thread,",   "mem",  "-p",  "3", "-t",  "3", "-P",  "512", OPT_BW },
1791    { " 5x5-bw-thread,",   "mem",  "-p",  "5", "-t",  "5", "-P",  "512", OPT_BW },
1792 
1793    { "2x16-bw-thread,",   "mem",  "-p",  "2", "-t", "16", "-P",  "512", OPT_BW },
1794    { "1x32-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-P", "2048", OPT_BW },
1795 
1796    { "numa02-bw,",        "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW },
1797    { "numa02-bw-NOTHP,",  "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW_NOTHP },
1798    { "numa01-bw-thread,", "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW },
1799    { "numa01-bw-thread-NOTHP,",
1800                           "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW_NOTHP },
1801 };
1802 
1803 static int bench_all(void)
1804 {
1805         int nr = ARRAY_SIZE(tests);
1806         int ret;
1807         int i;
1808 
1809         ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'");
1810         BUG_ON(ret < 0);
1811 
1812         for (i = 0; i < nr; i++) {
1813                 run_bench_numa(tests[i][0], tests[i] + 1);
1814         }
1815 
1816         printf("\n");
1817 
1818         return 0;
1819 }
1820 
1821 int bench_numa(int argc, const char **argv)
1822 {
1823         init_params(&p0, "main,", argc, argv);
1824         argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1825         if (argc)
1826                 goto err;
1827 
1828         if (p0.run_all)
1829                 return bench_all();
1830 
1831         if (__bench_numa(NULL))
1832                 goto err;
1833 
1834         return 0;
1835 
1836 err:
1837         usage_with_options(numa_usage, options);
1838         return -1;
1839 }
1840 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp