~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/events/ring_buffer.c

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.2 ] ~ [ linux-4.20.16 ] ~ [ linux-4.19.29 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.106 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.163 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Performance events ring-buffer code:
  4  *
  5  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  6  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  7  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  8  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  9  */
 10 
 11 #include <linux/perf_event.h>
 12 #include <linux/vmalloc.h>
 13 #include <linux/slab.h>
 14 #include <linux/circ_buf.h>
 15 #include <linux/poll.h>
 16 #include <linux/nospec.h>
 17 
 18 #include "internal.h"
 19 
 20 static void perf_output_wakeup(struct perf_output_handle *handle)
 21 {
 22         atomic_set(&handle->rb->poll, EPOLLIN);
 23 
 24         handle->event->pending_wakeup = 1;
 25         irq_work_queue(&handle->event->pending);
 26 }
 27 
 28 /*
 29  * We need to ensure a later event_id doesn't publish a head when a former
 30  * event isn't done writing. However since we need to deal with NMIs we
 31  * cannot fully serialize things.
 32  *
 33  * We only publish the head (and generate a wakeup) when the outer-most
 34  * event completes.
 35  */
 36 static void perf_output_get_handle(struct perf_output_handle *handle)
 37 {
 38         struct ring_buffer *rb = handle->rb;
 39 
 40         preempt_disable();
 41         local_inc(&rb->nest);
 42         handle->wakeup = local_read(&rb->wakeup);
 43 }
 44 
 45 static void perf_output_put_handle(struct perf_output_handle *handle)
 46 {
 47         struct ring_buffer *rb = handle->rb;
 48         unsigned long head;
 49 
 50 again:
 51         head = local_read(&rb->head);
 52 
 53         /*
 54          * IRQ/NMI can happen here, which means we can miss a head update.
 55          */
 56 
 57         if (!local_dec_and_test(&rb->nest))
 58                 goto out;
 59 
 60         /*
 61          * Since the mmap() consumer (userspace) can run on a different CPU:
 62          *
 63          *   kernel                             user
 64          *
 65          *   if (LOAD ->data_tail) {            LOAD ->data_head
 66          *                      (A)             smp_rmb()       (C)
 67          *      STORE $data                     LOAD $data
 68          *      smp_wmb()       (B)             smp_mb()        (D)
 69          *      STORE ->data_head               STORE ->data_tail
 70          *   }
 71          *
 72          * Where A pairs with D, and B pairs with C.
 73          *
 74          * In our case (A) is a control dependency that separates the load of
 75          * the ->data_tail and the stores of $data. In case ->data_tail
 76          * indicates there is no room in the buffer to store $data we do not.
 77          *
 78          * D needs to be a full barrier since it separates the data READ
 79          * from the tail WRITE.
 80          *
 81          * For B a WMB is sufficient since it separates two WRITEs, and for C
 82          * an RMB is sufficient since it separates two READs.
 83          *
 84          * See perf_output_begin().
 85          */
 86         smp_wmb(); /* B, matches C */
 87         rb->user_page->data_head = head;
 88 
 89         /*
 90          * Now check if we missed an update -- rely on previous implied
 91          * compiler barriers to force a re-read.
 92          */
 93         if (unlikely(head != local_read(&rb->head))) {
 94                 local_inc(&rb->nest);
 95                 goto again;
 96         }
 97 
 98         if (handle->wakeup != local_read(&rb->wakeup))
 99                 perf_output_wakeup(handle);
100 
101 out:
102         preempt_enable();
103 }
104 
105 static __always_inline bool
106 ring_buffer_has_space(unsigned long head, unsigned long tail,
107                       unsigned long data_size, unsigned int size,
108                       bool backward)
109 {
110         if (!backward)
111                 return CIRC_SPACE(head, tail, data_size) >= size;
112         else
113                 return CIRC_SPACE(tail, head, data_size) >= size;
114 }
115 
116 static __always_inline int
117 __perf_output_begin(struct perf_output_handle *handle,
118                     struct perf_event *event, unsigned int size,
119                     bool backward)
120 {
121         struct ring_buffer *rb;
122         unsigned long tail, offset, head;
123         int have_lost, page_shift;
124         struct {
125                 struct perf_event_header header;
126                 u64                      id;
127                 u64                      lost;
128         } lost_event;
129 
130         rcu_read_lock();
131         /*
132          * For inherited events we send all the output towards the parent.
133          */
134         if (event->parent)
135                 event = event->parent;
136 
137         rb = rcu_dereference(event->rb);
138         if (unlikely(!rb))
139                 goto out;
140 
141         if (unlikely(rb->paused)) {
142                 if (rb->nr_pages)
143                         local_inc(&rb->lost);
144                 goto out;
145         }
146 
147         handle->rb    = rb;
148         handle->event = event;
149 
150         have_lost = local_read(&rb->lost);
151         if (unlikely(have_lost)) {
152                 size += sizeof(lost_event);
153                 if (event->attr.sample_id_all)
154                         size += event->id_header_size;
155         }
156 
157         perf_output_get_handle(handle);
158 
159         do {
160                 tail = READ_ONCE(rb->user_page->data_tail);
161                 offset = head = local_read(&rb->head);
162                 if (!rb->overwrite) {
163                         if (unlikely(!ring_buffer_has_space(head, tail,
164                                                             perf_data_size(rb),
165                                                             size, backward)))
166                                 goto fail;
167                 }
168 
169                 /*
170                  * The above forms a control dependency barrier separating the
171                  * @tail load above from the data stores below. Since the @tail
172                  * load is required to compute the branch to fail below.
173                  *
174                  * A, matches D; the full memory barrier userspace SHOULD issue
175                  * after reading the data and before storing the new tail
176                  * position.
177                  *
178                  * See perf_output_put_handle().
179                  */
180 
181                 if (!backward)
182                         head += size;
183                 else
184                         head -= size;
185         } while (local_cmpxchg(&rb->head, offset, head) != offset);
186 
187         if (backward) {
188                 offset = head;
189                 head = (u64)(-head);
190         }
191 
192         /*
193          * We rely on the implied barrier() by local_cmpxchg() to ensure
194          * none of the data stores below can be lifted up by the compiler.
195          */
196 
197         if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
198                 local_add(rb->watermark, &rb->wakeup);
199 
200         page_shift = PAGE_SHIFT + page_order(rb);
201 
202         handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
203         offset &= (1UL << page_shift) - 1;
204         handle->addr = rb->data_pages[handle->page] + offset;
205         handle->size = (1UL << page_shift) - offset;
206 
207         if (unlikely(have_lost)) {
208                 struct perf_sample_data sample_data;
209 
210                 lost_event.header.size = sizeof(lost_event);
211                 lost_event.header.type = PERF_RECORD_LOST;
212                 lost_event.header.misc = 0;
213                 lost_event.id          = event->id;
214                 lost_event.lost        = local_xchg(&rb->lost, 0);
215 
216                 perf_event_header__init_id(&lost_event.header,
217                                            &sample_data, event);
218                 perf_output_put(handle, lost_event);
219                 perf_event__output_id_sample(event, handle, &sample_data);
220         }
221 
222         return 0;
223 
224 fail:
225         local_inc(&rb->lost);
226         perf_output_put_handle(handle);
227 out:
228         rcu_read_unlock();
229 
230         return -ENOSPC;
231 }
232 
233 int perf_output_begin_forward(struct perf_output_handle *handle,
234                              struct perf_event *event, unsigned int size)
235 {
236         return __perf_output_begin(handle, event, size, false);
237 }
238 
239 int perf_output_begin_backward(struct perf_output_handle *handle,
240                                struct perf_event *event, unsigned int size)
241 {
242         return __perf_output_begin(handle, event, size, true);
243 }
244 
245 int perf_output_begin(struct perf_output_handle *handle,
246                       struct perf_event *event, unsigned int size)
247 {
248 
249         return __perf_output_begin(handle, event, size,
250                                    unlikely(is_write_backward(event)));
251 }
252 
253 unsigned int perf_output_copy(struct perf_output_handle *handle,
254                       const void *buf, unsigned int len)
255 {
256         return __output_copy(handle, buf, len);
257 }
258 
259 unsigned int perf_output_skip(struct perf_output_handle *handle,
260                               unsigned int len)
261 {
262         return __output_skip(handle, NULL, len);
263 }
264 
265 void perf_output_end(struct perf_output_handle *handle)
266 {
267         perf_output_put_handle(handle);
268         rcu_read_unlock();
269 }
270 
271 static void
272 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
273 {
274         long max_size = perf_data_size(rb);
275 
276         if (watermark)
277                 rb->watermark = min(max_size, watermark);
278 
279         if (!rb->watermark)
280                 rb->watermark = max_size / 2;
281 
282         if (flags & RING_BUFFER_WRITABLE)
283                 rb->overwrite = 0;
284         else
285                 rb->overwrite = 1;
286 
287         refcount_set(&rb->refcount, 1);
288 
289         INIT_LIST_HEAD(&rb->event_list);
290         spin_lock_init(&rb->event_lock);
291 
292         /*
293          * perf_output_begin() only checks rb->paused, therefore
294          * rb->paused must be true if we have no pages for output.
295          */
296         if (!rb->nr_pages)
297                 rb->paused = 1;
298 }
299 
300 void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
301 {
302         /*
303          * OVERWRITE is determined by perf_aux_output_end() and can't
304          * be passed in directly.
305          */
306         if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
307                 return;
308 
309         handle->aux_flags |= flags;
310 }
311 EXPORT_SYMBOL_GPL(perf_aux_output_flag);
312 
313 /*
314  * This is called before hardware starts writing to the AUX area to
315  * obtain an output handle and make sure there's room in the buffer.
316  * When the capture completes, call perf_aux_output_end() to commit
317  * the recorded data to the buffer.
318  *
319  * The ordering is similar to that of perf_output_{begin,end}, with
320  * the exception of (B), which should be taken care of by the pmu
321  * driver, since ordering rules will differ depending on hardware.
322  *
323  * Call this from pmu::start(); see the comment in perf_aux_output_end()
324  * about its use in pmu callbacks. Both can also be called from the PMI
325  * handler if needed.
326  */
327 void *perf_aux_output_begin(struct perf_output_handle *handle,
328                             struct perf_event *event)
329 {
330         struct perf_event *output_event = event;
331         unsigned long aux_head, aux_tail;
332         struct ring_buffer *rb;
333 
334         if (output_event->parent)
335                 output_event = output_event->parent;
336 
337         /*
338          * Since this will typically be open across pmu::add/pmu::del, we
339          * grab ring_buffer's refcount instead of holding rcu read lock
340          * to make sure it doesn't disappear under us.
341          */
342         rb = ring_buffer_get(output_event);
343         if (!rb)
344                 return NULL;
345 
346         if (!rb_has_aux(rb))
347                 goto err;
348 
349         /*
350          * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
351          * about to get freed, so we leave immediately.
352          *
353          * Checking rb::aux_mmap_count and rb::refcount has to be done in
354          * the same order, see perf_mmap_close. Otherwise we end up freeing
355          * aux pages in this path, which is a bug, because in_atomic().
356          */
357         if (!atomic_read(&rb->aux_mmap_count))
358                 goto err;
359 
360         if (!refcount_inc_not_zero(&rb->aux_refcount))
361                 goto err;
362 
363         /*
364          * Nesting is not supported for AUX area, make sure nested
365          * writers are caught early
366          */
367         if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
368                 goto err_put;
369 
370         aux_head = rb->aux_head;
371 
372         handle->rb = rb;
373         handle->event = event;
374         handle->head = aux_head;
375         handle->size = 0;
376         handle->aux_flags = 0;
377 
378         /*
379          * In overwrite mode, AUX data stores do not depend on aux_tail,
380          * therefore (A) control dependency barrier does not exist. The
381          * (B) <-> (C) ordering is still observed by the pmu driver.
382          */
383         if (!rb->aux_overwrite) {
384                 aux_tail = READ_ONCE(rb->user_page->aux_tail);
385                 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
386                 if (aux_head - aux_tail < perf_aux_size(rb))
387                         handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
388 
389                 /*
390                  * handle->size computation depends on aux_tail load; this forms a
391                  * control dependency barrier separating aux_tail load from aux data
392                  * store that will be enabled on successful return
393                  */
394                 if (!handle->size) { /* A, matches D */
395                         event->pending_disable = 1;
396                         perf_output_wakeup(handle);
397                         local_set(&rb->aux_nest, 0);
398                         goto err_put;
399                 }
400         }
401 
402         return handle->rb->aux_priv;
403 
404 err_put:
405         /* can't be last */
406         rb_free_aux(rb);
407 
408 err:
409         ring_buffer_put(rb);
410         handle->event = NULL;
411 
412         return NULL;
413 }
414 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
415 
416 static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
417 {
418         if (rb->aux_overwrite)
419                 return false;
420 
421         if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
422                 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
423                 return true;
424         }
425 
426         return false;
427 }
428 
429 /*
430  * Commit the data written by hardware into the ring buffer by adjusting
431  * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
432  * pmu driver's responsibility to observe ordering rules of the hardware,
433  * so that all the data is externally visible before this is called.
434  *
435  * Note: this has to be called from pmu::stop() callback, as the assumption
436  * of the AUX buffer management code is that after pmu::stop(), the AUX
437  * transaction must be stopped and therefore drop the AUX reference count.
438  */
439 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
440 {
441         bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
442         struct ring_buffer *rb = handle->rb;
443         unsigned long aux_head;
444 
445         /* in overwrite mode, driver provides aux_head via handle */
446         if (rb->aux_overwrite) {
447                 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
448 
449                 aux_head = handle->head;
450                 rb->aux_head = aux_head;
451         } else {
452                 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
453 
454                 aux_head = rb->aux_head;
455                 rb->aux_head += size;
456         }
457 
458         if (size || handle->aux_flags) {
459                 /*
460                  * Only send RECORD_AUX if we have something useful to communicate
461                  *
462                  * Note: the OVERWRITE records by themselves are not considered
463                  * useful, as they don't communicate any *new* information,
464                  * aside from the short-lived offset, that becomes history at
465                  * the next event sched-in and therefore isn't useful.
466                  * The userspace that needs to copy out AUX data in overwrite
467                  * mode should know to use user_page::aux_head for the actual
468                  * offset. So, from now on we don't output AUX records that
469                  * have *only* OVERWRITE flag set.
470                  */
471 
472                 if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE)
473                         perf_event_aux_event(handle->event, aux_head, size,
474                                              handle->aux_flags);
475         }
476 
477         rb->user_page->aux_head = rb->aux_head;
478         if (rb_need_aux_wakeup(rb))
479                 wakeup = true;
480 
481         if (wakeup) {
482                 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
483                         handle->event->pending_disable = 1;
484                 perf_output_wakeup(handle);
485         }
486 
487         handle->event = NULL;
488 
489         local_set(&rb->aux_nest, 0);
490         /* can't be last */
491         rb_free_aux(rb);
492         ring_buffer_put(rb);
493 }
494 EXPORT_SYMBOL_GPL(perf_aux_output_end);
495 
496 /*
497  * Skip over a given number of bytes in the AUX buffer, due to, for example,
498  * hardware's alignment constraints.
499  */
500 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
501 {
502         struct ring_buffer *rb = handle->rb;
503 
504         if (size > handle->size)
505                 return -ENOSPC;
506 
507         rb->aux_head += size;
508 
509         rb->user_page->aux_head = rb->aux_head;
510         if (rb_need_aux_wakeup(rb)) {
511                 perf_output_wakeup(handle);
512                 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
513         }
514 
515         handle->head = rb->aux_head;
516         handle->size -= size;
517 
518         return 0;
519 }
520 EXPORT_SYMBOL_GPL(perf_aux_output_skip);
521 
522 void *perf_get_aux(struct perf_output_handle *handle)
523 {
524         /* this is only valid between perf_aux_output_begin and *_end */
525         if (!handle->event)
526                 return NULL;
527 
528         return handle->rb->aux_priv;
529 }
530 EXPORT_SYMBOL_GPL(perf_get_aux);
531 
532 #define PERF_AUX_GFP    (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
533 
534 static struct page *rb_alloc_aux_page(int node, int order)
535 {
536         struct page *page;
537 
538         if (order > MAX_ORDER)
539                 order = MAX_ORDER;
540 
541         do {
542                 page = alloc_pages_node(node, PERF_AUX_GFP, order);
543         } while (!page && order--);
544 
545         if (page && order) {
546                 /*
547                  * Communicate the allocation size to the driver:
548                  * if we managed to secure a high-order allocation,
549                  * set its first page's private to this order;
550                  * !PagePrivate(page) means it's just a normal page.
551                  */
552                 split_page(page, order);
553                 SetPagePrivate(page);
554                 set_page_private(page, order);
555         }
556 
557         return page;
558 }
559 
560 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
561 {
562         struct page *page = virt_to_page(rb->aux_pages[idx]);
563 
564         ClearPagePrivate(page);
565         page->mapping = NULL;
566         __free_page(page);
567 }
568 
569 static void __rb_free_aux(struct ring_buffer *rb)
570 {
571         int pg;
572 
573         /*
574          * Should never happen, the last reference should be dropped from
575          * perf_mmap_close() path, which first stops aux transactions (which
576          * in turn are the atomic holders of aux_refcount) and then does the
577          * last rb_free_aux().
578          */
579         WARN_ON_ONCE(in_atomic());
580 
581         if (rb->aux_priv) {
582                 rb->free_aux(rb->aux_priv);
583                 rb->free_aux = NULL;
584                 rb->aux_priv = NULL;
585         }
586 
587         if (rb->aux_nr_pages) {
588                 for (pg = 0; pg < rb->aux_nr_pages; pg++)
589                         rb_free_aux_page(rb, pg);
590 
591                 kfree(rb->aux_pages);
592                 rb->aux_nr_pages = 0;
593         }
594 }
595 
596 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
597                  pgoff_t pgoff, int nr_pages, long watermark, int flags)
598 {
599         bool overwrite = !(flags & RING_BUFFER_WRITABLE);
600         int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
601         int ret = -ENOMEM, max_order;
602 
603         if (!has_aux(event))
604                 return -EOPNOTSUPP;
605 
606         /*
607          * We need to start with the max_order that fits in nr_pages,
608          * not the other way around, hence ilog2() and not get_order.
609          */
610         max_order = ilog2(nr_pages);
611 
612         /*
613          * PMU requests more than one contiguous chunks of memory
614          * for SW double buffering
615          */
616         if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
617             !overwrite) {
618                 if (!max_order)
619                         return -EINVAL;
620 
621                 max_order--;
622         }
623 
624         rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
625                                      node);
626         if (!rb->aux_pages)
627                 return -ENOMEM;
628 
629         rb->free_aux = event->pmu->free_aux;
630         for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
631                 struct page *page;
632                 int last, order;
633 
634                 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
635                 page = rb_alloc_aux_page(node, order);
636                 if (!page)
637                         goto out;
638 
639                 for (last = rb->aux_nr_pages + (1 << page_private(page));
640                      last > rb->aux_nr_pages; rb->aux_nr_pages++)
641                         rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
642         }
643 
644         /*
645          * In overwrite mode, PMUs that don't support SG may not handle more
646          * than one contiguous allocation, since they rely on PMI to do double
647          * buffering. In this case, the entire buffer has to be one contiguous
648          * chunk.
649          */
650         if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
651             overwrite) {
652                 struct page *page = virt_to_page(rb->aux_pages[0]);
653 
654                 if (page_private(page) != max_order)
655                         goto out;
656         }
657 
658         rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
659                                              overwrite);
660         if (!rb->aux_priv)
661                 goto out;
662 
663         ret = 0;
664 
665         /*
666          * aux_pages (and pmu driver's private data, aux_priv) will be
667          * referenced in both producer's and consumer's contexts, thus
668          * we keep a refcount here to make sure either of the two can
669          * reference them safely.
670          */
671         refcount_set(&rb->aux_refcount, 1);
672 
673         rb->aux_overwrite = overwrite;
674         rb->aux_watermark = watermark;
675 
676         if (!rb->aux_watermark && !rb->aux_overwrite)
677                 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
678 
679 out:
680         if (!ret)
681                 rb->aux_pgoff = pgoff;
682         else
683                 __rb_free_aux(rb);
684 
685         return ret;
686 }
687 
688 void rb_free_aux(struct ring_buffer *rb)
689 {
690         if (refcount_dec_and_test(&rb->aux_refcount))
691                 __rb_free_aux(rb);
692 }
693 
694 #ifndef CONFIG_PERF_USE_VMALLOC
695 
696 /*
697  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
698  */
699 
700 static struct page *
701 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
702 {
703         if (pgoff > rb->nr_pages)
704                 return NULL;
705 
706         if (pgoff == 0)
707                 return virt_to_page(rb->user_page);
708 
709         return virt_to_page(rb->data_pages[pgoff - 1]);
710 }
711 
712 static void *perf_mmap_alloc_page(int cpu)
713 {
714         struct page *page;
715         int node;
716 
717         node = (cpu == -1) ? cpu : cpu_to_node(cpu);
718         page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
719         if (!page)
720                 return NULL;
721 
722         return page_address(page);
723 }
724 
725 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
726 {
727         struct ring_buffer *rb;
728         unsigned long size;
729         int i;
730 
731         size = sizeof(struct ring_buffer);
732         size += nr_pages * sizeof(void *);
733 
734         if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
735                 goto fail;
736 
737         rb = kzalloc(size, GFP_KERNEL);
738         if (!rb)
739                 goto fail;
740 
741         rb->user_page = perf_mmap_alloc_page(cpu);
742         if (!rb->user_page)
743                 goto fail_user_page;
744 
745         for (i = 0; i < nr_pages; i++) {
746                 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
747                 if (!rb->data_pages[i])
748                         goto fail_data_pages;
749         }
750 
751         rb->nr_pages = nr_pages;
752 
753         ring_buffer_init(rb, watermark, flags);
754 
755         return rb;
756 
757 fail_data_pages:
758         for (i--; i >= 0; i--)
759                 free_page((unsigned long)rb->data_pages[i]);
760 
761         free_page((unsigned long)rb->user_page);
762 
763 fail_user_page:
764         kfree(rb);
765 
766 fail:
767         return NULL;
768 }
769 
770 static void perf_mmap_free_page(unsigned long addr)
771 {
772         struct page *page = virt_to_page((void *)addr);
773 
774         page->mapping = NULL;
775         __free_page(page);
776 }
777 
778 void rb_free(struct ring_buffer *rb)
779 {
780         int i;
781 
782         perf_mmap_free_page((unsigned long)rb->user_page);
783         for (i = 0; i < rb->nr_pages; i++)
784                 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
785         kfree(rb);
786 }
787 
788 #else
789 static int data_page_nr(struct ring_buffer *rb)
790 {
791         return rb->nr_pages << page_order(rb);
792 }
793 
794 static struct page *
795 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
796 {
797         /* The '>' counts in the user page. */
798         if (pgoff > data_page_nr(rb))
799                 return NULL;
800 
801         return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
802 }
803 
804 static void perf_mmap_unmark_page(void *addr)
805 {
806         struct page *page = vmalloc_to_page(addr);
807 
808         page->mapping = NULL;
809 }
810 
811 static void rb_free_work(struct work_struct *work)
812 {
813         struct ring_buffer *rb;
814         void *base;
815         int i, nr;
816 
817         rb = container_of(work, struct ring_buffer, work);
818         nr = data_page_nr(rb);
819 
820         base = rb->user_page;
821         /* The '<=' counts in the user page. */
822         for (i = 0; i <= nr; i++)
823                 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
824 
825         vfree(base);
826         kfree(rb);
827 }
828 
829 void rb_free(struct ring_buffer *rb)
830 {
831         schedule_work(&rb->work);
832 }
833 
834 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
835 {
836         struct ring_buffer *rb;
837         unsigned long size;
838         void *all_buf;
839 
840         size = sizeof(struct ring_buffer);
841         size += sizeof(void *);
842 
843         rb = kzalloc(size, GFP_KERNEL);
844         if (!rb)
845                 goto fail;
846 
847         INIT_WORK(&rb->work, rb_free_work);
848 
849         all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
850         if (!all_buf)
851                 goto fail_all_buf;
852 
853         rb->user_page = all_buf;
854         rb->data_pages[0] = all_buf + PAGE_SIZE;
855         if (nr_pages) {
856                 rb->nr_pages = 1;
857                 rb->page_order = ilog2(nr_pages);
858         }
859 
860         ring_buffer_init(rb, watermark, flags);
861 
862         return rb;
863 
864 fail_all_buf:
865         kfree(rb);
866 
867 fail:
868         return NULL;
869 }
870 
871 #endif
872 
873 struct page *
874 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
875 {
876         if (rb->aux_nr_pages) {
877                 /* above AUX space */
878                 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
879                         return NULL;
880 
881                 /* AUX space */
882                 if (pgoff >= rb->aux_pgoff) {
883                         int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
884                         return virt_to_page(rb->aux_pages[aux_pgoff]);
885                 }
886         }
887 
888         return __perf_mmap_to_page(rb, pgoff);
889 }
890 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp