~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/events/ring_buffer.c

Version: ~ [ linux-5.5-rc6 ] ~ [ linux-5.4.11 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.95 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.164 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.209 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.209 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Performance events ring-buffer code:
  3  *
  4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8  *
  9  * For licensing details see kernel-base/COPYING
 10  */
 11 
 12 #include <linux/perf_event.h>
 13 #include <linux/vmalloc.h>
 14 #include <linux/slab.h>
 15 #include <linux/circ_buf.h>
 16 #include <linux/poll.h>
 17 
 18 #include "internal.h"
 19 
 20 static void perf_output_wakeup(struct perf_output_handle *handle)
 21 {
 22         atomic_set(&handle->rb->poll, POLLIN);
 23 
 24         handle->event->pending_wakeup = 1;
 25         irq_work_queue(&handle->event->pending);
 26 }
 27 
 28 /*
 29  * We need to ensure a later event_id doesn't publish a head when a former
 30  * event isn't done writing. However since we need to deal with NMIs we
 31  * cannot fully serialize things.
 32  *
 33  * We only publish the head (and generate a wakeup) when the outer-most
 34  * event completes.
 35  */
 36 static void perf_output_get_handle(struct perf_output_handle *handle)
 37 {
 38         struct ring_buffer *rb = handle->rb;
 39 
 40         preempt_disable();
 41         local_inc(&rb->nest);
 42         handle->wakeup = local_read(&rb->wakeup);
 43 }
 44 
 45 static void perf_output_put_handle(struct perf_output_handle *handle)
 46 {
 47         struct ring_buffer *rb = handle->rb;
 48         unsigned long head;
 49 
 50 again:
 51         head = local_read(&rb->head);
 52 
 53         /*
 54          * IRQ/NMI can happen here, which means we can miss a head update.
 55          */
 56 
 57         if (!local_dec_and_test(&rb->nest))
 58                 goto out;
 59 
 60         /*
 61          * Since the mmap() consumer (userspace) can run on a different CPU:
 62          *
 63          *   kernel                             user
 64          *
 65          *   if (LOAD ->data_tail) {            LOAD ->data_head
 66          *                      (A)             smp_rmb()       (C)
 67          *      STORE $data                     LOAD $data
 68          *      smp_wmb()       (B)             smp_mb()        (D)
 69          *      STORE ->data_head               STORE ->data_tail
 70          *   }
 71          *
 72          * Where A pairs with D, and B pairs with C.
 73          *
 74          * In our case (A) is a control dependency that separates the load of
 75          * the ->data_tail and the stores of $data. In case ->data_tail
 76          * indicates there is no room in the buffer to store $data we do not.
 77          *
 78          * D needs to be a full barrier since it separates the data READ
 79          * from the tail WRITE.
 80          *
 81          * For B a WMB is sufficient since it separates two WRITEs, and for C
 82          * an RMB is sufficient since it separates two READs.
 83          *
 84          * See perf_output_begin().
 85          */
 86         smp_wmb(); /* B, matches C */
 87         rb->user_page->data_head = head;
 88 
 89         /*
 90          * Now check if we missed an update -- rely on previous implied
 91          * compiler barriers to force a re-read.
 92          */
 93         if (unlikely(head != local_read(&rb->head))) {
 94                 local_inc(&rb->nest);
 95                 goto again;
 96         }
 97 
 98         if (handle->wakeup != local_read(&rb->wakeup))
 99                 perf_output_wakeup(handle);
100 
101 out:
102         preempt_enable();
103 }
104 
105 int perf_output_begin(struct perf_output_handle *handle,
106                       struct perf_event *event, unsigned int size)
107 {
108         struct ring_buffer *rb;
109         unsigned long tail, offset, head;
110         int have_lost, page_shift;
111         struct {
112                 struct perf_event_header header;
113                 u64                      id;
114                 u64                      lost;
115         } lost_event;
116 
117         rcu_read_lock();
118         /*
119          * For inherited events we send all the output towards the parent.
120          */
121         if (event->parent)
122                 event = event->parent;
123 
124         rb = rcu_dereference(event->rb);
125         if (unlikely(!rb))
126                 goto out;
127 
128         if (unlikely(!rb->nr_pages))
129                 goto out;
130 
131         handle->rb    = rb;
132         handle->event = event;
133 
134         have_lost = local_read(&rb->lost);
135         if (unlikely(have_lost)) {
136                 size += sizeof(lost_event);
137                 if (event->attr.sample_id_all)
138                         size += event->id_header_size;
139         }
140 
141         perf_output_get_handle(handle);
142 
143         do {
144                 tail = READ_ONCE(rb->user_page->data_tail);
145                 offset = head = local_read(&rb->head);
146                 if (!rb->overwrite &&
147                     unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
148                         goto fail;
149 
150                 /*
151                  * The above forms a control dependency barrier separating the
152                  * @tail load above from the data stores below. Since the @tail
153                  * load is required to compute the branch to fail below.
154                  *
155                  * A, matches D; the full memory barrier userspace SHOULD issue
156                  * after reading the data and before storing the new tail
157                  * position.
158                  *
159                  * See perf_output_put_handle().
160                  */
161 
162                 head += size;
163         } while (local_cmpxchg(&rb->head, offset, head) != offset);
164 
165         /*
166          * We rely on the implied barrier() by local_cmpxchg() to ensure
167          * none of the data stores below can be lifted up by the compiler.
168          */
169 
170         if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
171                 local_add(rb->watermark, &rb->wakeup);
172 
173         page_shift = PAGE_SHIFT + page_order(rb);
174 
175         handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
176         offset &= (1UL << page_shift) - 1;
177         handle->addr = rb->data_pages[handle->page] + offset;
178         handle->size = (1UL << page_shift) - offset;
179 
180         if (unlikely(have_lost)) {
181                 struct perf_sample_data sample_data;
182 
183                 lost_event.header.size = sizeof(lost_event);
184                 lost_event.header.type = PERF_RECORD_LOST;
185                 lost_event.header.misc = 0;
186                 lost_event.id          = event->id;
187                 lost_event.lost        = local_xchg(&rb->lost, 0);
188 
189                 perf_event_header__init_id(&lost_event.header,
190                                            &sample_data, event);
191                 perf_output_put(handle, lost_event);
192                 perf_event__output_id_sample(event, handle, &sample_data);
193         }
194 
195         return 0;
196 
197 fail:
198         local_inc(&rb->lost);
199         perf_output_put_handle(handle);
200 out:
201         rcu_read_unlock();
202 
203         return -ENOSPC;
204 }
205 
206 unsigned int perf_output_copy(struct perf_output_handle *handle,
207                       const void *buf, unsigned int len)
208 {
209         return __output_copy(handle, buf, len);
210 }
211 
212 unsigned int perf_output_skip(struct perf_output_handle *handle,
213                               unsigned int len)
214 {
215         return __output_skip(handle, NULL, len);
216 }
217 
218 void perf_output_end(struct perf_output_handle *handle)
219 {
220         perf_output_put_handle(handle);
221         rcu_read_unlock();
222 }
223 
224 static void rb_irq_work(struct irq_work *work);
225 
226 static void
227 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
228 {
229         long max_size = perf_data_size(rb);
230 
231         if (watermark)
232                 rb->watermark = min(max_size, watermark);
233 
234         if (!rb->watermark)
235                 rb->watermark = max_size / 2;
236 
237         if (flags & RING_BUFFER_WRITABLE)
238                 rb->overwrite = 0;
239         else
240                 rb->overwrite = 1;
241 
242         atomic_set(&rb->refcount, 1);
243 
244         INIT_LIST_HEAD(&rb->event_list);
245         spin_lock_init(&rb->event_lock);
246         init_irq_work(&rb->irq_work, rb_irq_work);
247 }
248 
249 static void ring_buffer_put_async(struct ring_buffer *rb)
250 {
251         if (!atomic_dec_and_test(&rb->refcount))
252                 return;
253 
254         rb->rcu_head.next = (void *)rb;
255         irq_work_queue(&rb->irq_work);
256 }
257 
258 /*
259  * This is called before hardware starts writing to the AUX area to
260  * obtain an output handle and make sure there's room in the buffer.
261  * When the capture completes, call perf_aux_output_end() to commit
262  * the recorded data to the buffer.
263  *
264  * The ordering is similar to that of perf_output_{begin,end}, with
265  * the exception of (B), which should be taken care of by the pmu
266  * driver, since ordering rules will differ depending on hardware.
267  */
268 void *perf_aux_output_begin(struct perf_output_handle *handle,
269                             struct perf_event *event)
270 {
271         struct perf_event *output_event = event;
272         unsigned long aux_head, aux_tail;
273         struct ring_buffer *rb;
274 
275         if (output_event->parent)
276                 output_event = output_event->parent;
277 
278         /*
279          * Since this will typically be open across pmu::add/pmu::del, we
280          * grab ring_buffer's refcount instead of holding rcu read lock
281          * to make sure it doesn't disappear under us.
282          */
283         rb = ring_buffer_get(output_event);
284         if (!rb)
285                 return NULL;
286 
287         if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
288                 goto err;
289 
290         /*
291          * Nesting is not supported for AUX area, make sure nested
292          * writers are caught early
293          */
294         if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
295                 goto err_put;
296 
297         aux_head = local_read(&rb->aux_head);
298 
299         handle->rb = rb;
300         handle->event = event;
301         handle->head = aux_head;
302         handle->size = 0;
303 
304         /*
305          * In overwrite mode, AUX data stores do not depend on aux_tail,
306          * therefore (A) control dependency barrier does not exist. The
307          * (B) <-> (C) ordering is still observed by the pmu driver.
308          */
309         if (!rb->aux_overwrite) {
310                 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
311                 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
312                 if (aux_head - aux_tail < perf_aux_size(rb))
313                         handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
314 
315                 /*
316                  * handle->size computation depends on aux_tail load; this forms a
317                  * control dependency barrier separating aux_tail load from aux data
318                  * store that will be enabled on successful return
319                  */
320                 if (!handle->size) { /* A, matches D */
321                         event->pending_disable = 1;
322                         perf_output_wakeup(handle);
323                         local_set(&rb->aux_nest, 0);
324                         goto err_put;
325                 }
326         }
327 
328         return handle->rb->aux_priv;
329 
330 err_put:
331         rb_free_aux(rb);
332 
333 err:
334         ring_buffer_put_async(rb);
335         handle->event = NULL;
336 
337         return NULL;
338 }
339 
340 /*
341  * Commit the data written by hardware into the ring buffer by adjusting
342  * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
343  * pmu driver's responsibility to observe ordering rules of the hardware,
344  * so that all the data is externally visible before this is called.
345  */
346 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
347                          bool truncated)
348 {
349         struct ring_buffer *rb = handle->rb;
350         bool wakeup = truncated;
351         unsigned long aux_head;
352         u64 flags = 0;
353 
354         if (truncated)
355                 flags |= PERF_AUX_FLAG_TRUNCATED;
356 
357         /* in overwrite mode, driver provides aux_head via handle */
358         if (rb->aux_overwrite) {
359                 flags |= PERF_AUX_FLAG_OVERWRITE;
360 
361                 aux_head = handle->head;
362                 local_set(&rb->aux_head, aux_head);
363         } else {
364                 aux_head = local_read(&rb->aux_head);
365                 local_add(size, &rb->aux_head);
366         }
367 
368         if (size || flags) {
369                 /*
370                  * Only send RECORD_AUX if we have something useful to communicate
371                  */
372 
373                 perf_event_aux_event(handle->event, aux_head, size, flags);
374         }
375 
376         aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
377 
378         if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
379                 wakeup = true;
380                 local_add(rb->aux_watermark, &rb->aux_wakeup);
381         }
382 
383         if (wakeup) {
384                 if (truncated)
385                         handle->event->pending_disable = 1;
386                 perf_output_wakeup(handle);
387         }
388 
389         handle->event = NULL;
390 
391         local_set(&rb->aux_nest, 0);
392         rb_free_aux(rb);
393         ring_buffer_put_async(rb);
394 }
395 
396 /*
397  * Skip over a given number of bytes in the AUX buffer, due to, for example,
398  * hardware's alignment constraints.
399  */
400 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
401 {
402         struct ring_buffer *rb = handle->rb;
403         unsigned long aux_head;
404 
405         if (size > handle->size)
406                 return -ENOSPC;
407 
408         local_add(size, &rb->aux_head);
409 
410         aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
411         if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
412                 perf_output_wakeup(handle);
413                 local_add(rb->aux_watermark, &rb->aux_wakeup);
414                 handle->wakeup = local_read(&rb->aux_wakeup) +
415                                  rb->aux_watermark;
416         }
417 
418         handle->head = aux_head;
419         handle->size -= size;
420 
421         return 0;
422 }
423 
424 void *perf_get_aux(struct perf_output_handle *handle)
425 {
426         /* this is only valid between perf_aux_output_begin and *_end */
427         if (!handle->event)
428                 return NULL;
429 
430         return handle->rb->aux_priv;
431 }
432 
433 #define PERF_AUX_GFP    (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
434 
435 static struct page *rb_alloc_aux_page(int node, int order)
436 {
437         struct page *page;
438 
439         if (order > MAX_ORDER)
440                 order = MAX_ORDER;
441 
442         do {
443                 page = alloc_pages_node(node, PERF_AUX_GFP, order);
444         } while (!page && order--);
445 
446         if (page && order) {
447                 /*
448                  * Communicate the allocation size to the driver:
449                  * if we managed to secure a high-order allocation,
450                  * set its first page's private to this order;
451                  * !PagePrivate(page) means it's just a normal page.
452                  */
453                 split_page(page, order);
454                 SetPagePrivate(page);
455                 set_page_private(page, order);
456         }
457 
458         return page;
459 }
460 
461 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
462 {
463         struct page *page = virt_to_page(rb->aux_pages[idx]);
464 
465         ClearPagePrivate(page);
466         page->mapping = NULL;
467         __free_page(page);
468 }
469 
470 static void __rb_free_aux(struct ring_buffer *rb)
471 {
472         int pg;
473 
474         if (rb->aux_priv) {
475                 rb->free_aux(rb->aux_priv);
476                 rb->free_aux = NULL;
477                 rb->aux_priv = NULL;
478         }
479 
480         if (rb->aux_nr_pages) {
481                 for (pg = 0; pg < rb->aux_nr_pages; pg++)
482                         rb_free_aux_page(rb, pg);
483 
484                 kfree(rb->aux_pages);
485                 rb->aux_nr_pages = 0;
486         }
487 }
488 
489 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
490                  pgoff_t pgoff, int nr_pages, long watermark, int flags)
491 {
492         bool overwrite = !(flags & RING_BUFFER_WRITABLE);
493         int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
494         int ret = -ENOMEM, max_order = 0;
495 
496         if (!has_aux(event))
497                 return -ENOTSUPP;
498 
499         if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
500                 /*
501                  * We need to start with the max_order that fits in nr_pages,
502                  * not the other way around, hence ilog2() and not get_order.
503                  */
504                 max_order = ilog2(nr_pages);
505 
506                 /*
507                  * PMU requests more than one contiguous chunks of memory
508                  * for SW double buffering
509                  */
510                 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
511                     !overwrite) {
512                         if (!max_order)
513                                 return -EINVAL;
514 
515                         max_order--;
516                 }
517         }
518 
519         rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
520         if (!rb->aux_pages)
521                 return -ENOMEM;
522 
523         rb->free_aux = event->pmu->free_aux;
524         for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
525                 struct page *page;
526                 int last, order;
527 
528                 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
529                 page = rb_alloc_aux_page(node, order);
530                 if (!page)
531                         goto out;
532 
533                 for (last = rb->aux_nr_pages + (1 << page_private(page));
534                      last > rb->aux_nr_pages; rb->aux_nr_pages++)
535                         rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
536         }
537 
538         /*
539          * In overwrite mode, PMUs that don't support SG may not handle more
540          * than one contiguous allocation, since they rely on PMI to do double
541          * buffering. In this case, the entire buffer has to be one contiguous
542          * chunk.
543          */
544         if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
545             overwrite) {
546                 struct page *page = virt_to_page(rb->aux_pages[0]);
547 
548                 if (page_private(page) != max_order)
549                         goto out;
550         }
551 
552         rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
553                                              overwrite);
554         if (!rb->aux_priv)
555                 goto out;
556 
557         ret = 0;
558 
559         /*
560          * aux_pages (and pmu driver's private data, aux_priv) will be
561          * referenced in both producer's and consumer's contexts, thus
562          * we keep a refcount here to make sure either of the two can
563          * reference them safely.
564          */
565         atomic_set(&rb->aux_refcount, 1);
566 
567         rb->aux_overwrite = overwrite;
568         rb->aux_watermark = watermark;
569 
570         if (!rb->aux_watermark && !rb->aux_overwrite)
571                 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
572 
573 out:
574         if (!ret)
575                 rb->aux_pgoff = pgoff;
576         else
577                 __rb_free_aux(rb);
578 
579         return ret;
580 }
581 
582 void rb_free_aux(struct ring_buffer *rb)
583 {
584         if (atomic_dec_and_test(&rb->aux_refcount))
585                 irq_work_queue(&rb->irq_work);
586 }
587 
588 static void rb_irq_work(struct irq_work *work)
589 {
590         struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
591 
592         if (!atomic_read(&rb->aux_refcount))
593                 __rb_free_aux(rb);
594 
595         if (rb->rcu_head.next == (void *)rb)
596                 call_rcu(&rb->rcu_head, rb_free_rcu);
597 }
598 
599 #ifndef CONFIG_PERF_USE_VMALLOC
600 
601 /*
602  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
603  */
604 
605 static struct page *
606 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
607 {
608         if (pgoff > rb->nr_pages)
609                 return NULL;
610 
611         if (pgoff == 0)
612                 return virt_to_page(rb->user_page);
613 
614         return virt_to_page(rb->data_pages[pgoff - 1]);
615 }
616 
617 static void *perf_mmap_alloc_page(int cpu)
618 {
619         struct page *page;
620         int node;
621 
622         node = (cpu == -1) ? cpu : cpu_to_node(cpu);
623         page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
624         if (!page)
625                 return NULL;
626 
627         return page_address(page);
628 }
629 
630 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
631 {
632         struct ring_buffer *rb;
633         unsigned long size;
634         int i;
635 
636         size = sizeof(struct ring_buffer);
637         size += nr_pages * sizeof(void *);
638 
639         rb = kzalloc(size, GFP_KERNEL);
640         if (!rb)
641                 goto fail;
642 
643         rb->user_page = perf_mmap_alloc_page(cpu);
644         if (!rb->user_page)
645                 goto fail_user_page;
646 
647         for (i = 0; i < nr_pages; i++) {
648                 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
649                 if (!rb->data_pages[i])
650                         goto fail_data_pages;
651         }
652 
653         rb->nr_pages = nr_pages;
654 
655         ring_buffer_init(rb, watermark, flags);
656 
657         return rb;
658 
659 fail_data_pages:
660         for (i--; i >= 0; i--)
661                 free_page((unsigned long)rb->data_pages[i]);
662 
663         free_page((unsigned long)rb->user_page);
664 
665 fail_user_page:
666         kfree(rb);
667 
668 fail:
669         return NULL;
670 }
671 
672 static void perf_mmap_free_page(unsigned long addr)
673 {
674         struct page *page = virt_to_page((void *)addr);
675 
676         page->mapping = NULL;
677         __free_page(page);
678 }
679 
680 void rb_free(struct ring_buffer *rb)
681 {
682         int i;
683 
684         perf_mmap_free_page((unsigned long)rb->user_page);
685         for (i = 0; i < rb->nr_pages; i++)
686                 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
687         kfree(rb);
688 }
689 
690 #else
691 static int data_page_nr(struct ring_buffer *rb)
692 {
693         return rb->nr_pages << page_order(rb);
694 }
695 
696 static struct page *
697 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
698 {
699         /* The '>' counts in the user page. */
700         if (pgoff > data_page_nr(rb))
701                 return NULL;
702 
703         return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
704 }
705 
706 static void perf_mmap_unmark_page(void *addr)
707 {
708         struct page *page = vmalloc_to_page(addr);
709 
710         page->mapping = NULL;
711 }
712 
713 static void rb_free_work(struct work_struct *work)
714 {
715         struct ring_buffer *rb;
716         void *base;
717         int i, nr;
718 
719         rb = container_of(work, struct ring_buffer, work);
720         nr = data_page_nr(rb);
721 
722         base = rb->user_page;
723         /* The '<=' counts in the user page. */
724         for (i = 0; i <= nr; i++)
725                 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
726 
727         vfree(base);
728         kfree(rb);
729 }
730 
731 void rb_free(struct ring_buffer *rb)
732 {
733         schedule_work(&rb->work);
734 }
735 
736 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
737 {
738         struct ring_buffer *rb;
739         unsigned long size;
740         void *all_buf;
741 
742         size = sizeof(struct ring_buffer);
743         size += sizeof(void *);
744 
745         rb = kzalloc(size, GFP_KERNEL);
746         if (!rb)
747                 goto fail;
748 
749         INIT_WORK(&rb->work, rb_free_work);
750 
751         all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
752         if (!all_buf)
753                 goto fail_all_buf;
754 
755         rb->user_page = all_buf;
756         rb->data_pages[0] = all_buf + PAGE_SIZE;
757         if (nr_pages) {
758                 rb->nr_pages = 1;
759                 rb->page_order = ilog2(nr_pages);
760         }
761 
762         ring_buffer_init(rb, watermark, flags);
763 
764         return rb;
765 
766 fail_all_buf:
767         kfree(rb);
768 
769 fail:
770         return NULL;
771 }
772 
773 #endif
774 
775 struct page *
776 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
777 {
778         if (rb->aux_nr_pages) {
779                 /* above AUX space */
780                 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
781                         return NULL;
782 
783                 /* AUX space */
784                 if (pgoff >= rb->aux_pgoff)
785                         return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
786         }
787 
788         return __perf_mmap_to_page(rb, pgoff);
789 }
790 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp