~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/trace/ring_buffer.c

Version: ~ [ linux-5.5-rc7 ] ~ [ linux-5.4.13 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.97 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.166 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.210 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.210 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Generic ring buffer
  3  *
  4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5  */
  6 #include <linux/ring_buffer.h>
  7 #include <linux/trace_clock.h>
  8 #include <linux/spinlock.h>
  9 #include <linux/debugfs.h>
 10 #include <linux/uaccess.h>
 11 #include <linux/hardirq.h>
 12 #include <linux/kmemcheck.h>
 13 #include <linux/module.h>
 14 #include <linux/percpu.h>
 15 #include <linux/mutex.h>
 16 #include <linux/slab.h>
 17 #include <linux/init.h>
 18 #include <linux/hash.h>
 19 #include <linux/list.h>
 20 #include <linux/cpu.h>
 21 #include <linux/fs.h>
 22 
 23 #include <asm/local.h>
 24 #include "trace.h"
 25 
 26 /*
 27  * The ring buffer header is special. We must manually up keep it.
 28  */
 29 int ring_buffer_print_entry_header(struct trace_seq *s)
 30 {
 31         int ret;
 32 
 33         ret = trace_seq_printf(s, "# compressed entry header\n");
 34         ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
 35         ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
 36         ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
 37         ret = trace_seq_printf(s, "\n");
 38         ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
 39                                RINGBUF_TYPE_PADDING);
 40         ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
 41                                RINGBUF_TYPE_TIME_EXTEND);
 42         ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
 43                                RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 44 
 45         return ret;
 46 }
 47 
 48 /*
 49  * The ring buffer is made up of a list of pages. A separate list of pages is
 50  * allocated for each CPU. A writer may only write to a buffer that is
 51  * associated with the CPU it is currently executing on.  A reader may read
 52  * from any per cpu buffer.
 53  *
 54  * The reader is special. For each per cpu buffer, the reader has its own
 55  * reader page. When a reader has read the entire reader page, this reader
 56  * page is swapped with another page in the ring buffer.
 57  *
 58  * Now, as long as the writer is off the reader page, the reader can do what
 59  * ever it wants with that page. The writer will never write to that page
 60  * again (as long as it is out of the ring buffer).
 61  *
 62  * Here's some silly ASCII art.
 63  *
 64  *   +------+
 65  *   |reader|          RING BUFFER
 66  *   |page  |
 67  *   +------+        +---+   +---+   +---+
 68  *                   |   |-->|   |-->|   |
 69  *                   +---+   +---+   +---+
 70  *                     ^               |
 71  *                     |               |
 72  *                     +---------------+
 73  *
 74  *
 75  *   +------+
 76  *   |reader|          RING BUFFER
 77  *   |page  |------------------v
 78  *   +------+        +---+   +---+   +---+
 79  *                   |   |-->|   |-->|   |
 80  *                   +---+   +---+   +---+
 81  *                     ^               |
 82  *                     |               |
 83  *                     +---------------+
 84  *
 85  *
 86  *   +------+
 87  *   |reader|          RING BUFFER
 88  *   |page  |------------------v
 89  *   +------+        +---+   +---+   +---+
 90  *      ^            |   |-->|   |-->|   |
 91  *      |            +---+   +---+   +---+
 92  *      |                              |
 93  *      |                              |
 94  *      +------------------------------+
 95  *
 96  *
 97  *   +------+
 98  *   |buffer|          RING BUFFER
 99  *   |page  |------------------v
100  *   +------+        +---+   +---+   +---+
101  *      ^            |   |   |   |-->|   |
102  *      |   New      +---+   +---+   +---+
103  *      |  Reader------^               |
104  *      |   page                       |
105  *      +------------------------------+
106  *
107  *
108  * After we make this swap, the reader can hand this page off to the splice
109  * code and be done with it. It can even allocate a new page if it needs to
110  * and swap that into the ring buffer.
111  *
112  * We will be using cmpxchg soon to make all this lockless.
113  *
114  */
115 
116 /*
117  * A fast way to enable or disable all ring buffers is to
118  * call tracing_on or tracing_off. Turning off the ring buffers
119  * prevents all ring buffers from being recorded to.
120  * Turning this switch on, makes it OK to write to the
121  * ring buffer, if the ring buffer is enabled itself.
122  *
123  * There's three layers that must be on in order to write
124  * to the ring buffer.
125  *
126  * 1) This global flag must be set.
127  * 2) The ring buffer must be enabled for recording.
128  * 3) The per cpu buffer must be enabled for recording.
129  *
130  * In case of an anomaly, this global flag has a bit set that
131  * will permantly disable all ring buffers.
132  */
133 
134 /*
135  * Global flag to disable all recording to ring buffers
136  *  This has two bits: ON, DISABLED
137  *
138  *  ON   DISABLED
139  * ---- ----------
140  *   0      0        : ring buffers are off
141  *   1      0        : ring buffers are on
142  *   X      1        : ring buffers are permanently disabled
143  */
144 
145 enum {
146         RB_BUFFERS_ON_BIT       = 0,
147         RB_BUFFERS_DISABLED_BIT = 1,
148 };
149 
150 enum {
151         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
152         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
153 };
154 
155 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
156 
157 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
158 
159 /**
160  * tracing_on - enable all tracing buffers
161  *
162  * This function enables all tracing buffers that may have been
163  * disabled with tracing_off.
164  */
165 void tracing_on(void)
166 {
167         set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
168 }
169 EXPORT_SYMBOL_GPL(tracing_on);
170 
171 /**
172  * tracing_off - turn off all tracing buffers
173  *
174  * This function stops all tracing buffers from recording data.
175  * It does not disable any overhead the tracers themselves may
176  * be causing. This function simply causes all recording to
177  * the ring buffers to fail.
178  */
179 void tracing_off(void)
180 {
181         clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
182 }
183 EXPORT_SYMBOL_GPL(tracing_off);
184 
185 /**
186  * tracing_off_permanent - permanently disable ring buffers
187  *
188  * This function, once called, will disable all ring buffers
189  * permanently.
190  */
191 void tracing_off_permanent(void)
192 {
193         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
194 }
195 
196 /**
197  * tracing_is_on - show state of ring buffers enabled
198  */
199 int tracing_is_on(void)
200 {
201         return ring_buffer_flags == RB_BUFFERS_ON;
202 }
203 EXPORT_SYMBOL_GPL(tracing_is_on);
204 
205 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
206 #define RB_ALIGNMENT            4U
207 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
209 
210 #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
211 # define RB_FORCE_8BYTE_ALIGNMENT       0
212 # define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
213 #else
214 # define RB_FORCE_8BYTE_ALIGNMENT       1
215 # define RB_ARCH_ALIGNMENT              8U
216 #endif
217 
218 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
219 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
220 
221 enum {
222         RB_LEN_TIME_EXTEND = 8,
223         RB_LEN_TIME_STAMP = 16,
224 };
225 
226 #define skip_time_extend(event) \
227         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
228 
229 static inline int rb_null_event(struct ring_buffer_event *event)
230 {
231         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
232 }
233 
234 static void rb_event_set_padding(struct ring_buffer_event *event)
235 {
236         /* padding has a NULL time_delta */
237         event->type_len = RINGBUF_TYPE_PADDING;
238         event->time_delta = 0;
239 }
240 
241 static unsigned
242 rb_event_data_length(struct ring_buffer_event *event)
243 {
244         unsigned length;
245 
246         if (event->type_len)
247                 length = event->type_len * RB_ALIGNMENT;
248         else
249                 length = event->array[0];
250         return length + RB_EVNT_HDR_SIZE;
251 }
252 
253 /*
254  * Return the length of the given event. Will return
255  * the length of the time extend if the event is a
256  * time extend.
257  */
258 static inline unsigned
259 rb_event_length(struct ring_buffer_event *event)
260 {
261         switch (event->type_len) {
262         case RINGBUF_TYPE_PADDING:
263                 if (rb_null_event(event))
264                         /* undefined */
265                         return -1;
266                 return  event->array[0] + RB_EVNT_HDR_SIZE;
267 
268         case RINGBUF_TYPE_TIME_EXTEND:
269                 return RB_LEN_TIME_EXTEND;
270 
271         case RINGBUF_TYPE_TIME_STAMP:
272                 return RB_LEN_TIME_STAMP;
273 
274         case RINGBUF_TYPE_DATA:
275                 return rb_event_data_length(event);
276         default:
277                 BUG();
278         }
279         /* not hit */
280         return 0;
281 }
282 
283 /*
284  * Return total length of time extend and data,
285  *   or just the event length for all other events.
286  */
287 static inline unsigned
288 rb_event_ts_length(struct ring_buffer_event *event)
289 {
290         unsigned len = 0;
291 
292         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
293                 /* time extends include the data event after it */
294                 len = RB_LEN_TIME_EXTEND;
295                 event = skip_time_extend(event);
296         }
297         return len + rb_event_length(event);
298 }
299 
300 /**
301  * ring_buffer_event_length - return the length of the event
302  * @event: the event to get the length of
303  *
304  * Returns the size of the data load of a data event.
305  * If the event is something other than a data event, it
306  * returns the size of the event itself. With the exception
307  * of a TIME EXTEND, where it still returns the size of the
308  * data load of the data event after it.
309  */
310 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
311 {
312         unsigned length;
313 
314         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
315                 event = skip_time_extend(event);
316 
317         length = rb_event_length(event);
318         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
319                 return length;
320         length -= RB_EVNT_HDR_SIZE;
321         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
322                 length -= sizeof(event->array[0]);
323         return length;
324 }
325 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
326 
327 /* inline for ring buffer fast paths */
328 static void *
329 rb_event_data(struct ring_buffer_event *event)
330 {
331         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
332                 event = skip_time_extend(event);
333         BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
334         /* If length is in len field, then array[0] has the data */
335         if (event->type_len)
336                 return (void *)&event->array[0];
337         /* Otherwise length is in array[0] and array[1] has the data */
338         return (void *)&event->array[1];
339 }
340 
341 /**
342  * ring_buffer_event_data - return the data of the event
343  * @event: the event to get the data from
344  */
345 void *ring_buffer_event_data(struct ring_buffer_event *event)
346 {
347         return rb_event_data(event);
348 }
349 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
350 
351 #define for_each_buffer_cpu(buffer, cpu)                \
352         for_each_cpu(cpu, buffer->cpumask)
353 
354 #define TS_SHIFT        27
355 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
356 #define TS_DELTA_TEST   (~TS_MASK)
357 
358 /* Flag when events were overwritten */
359 #define RB_MISSED_EVENTS        (1 << 31)
360 /* Missed count stored at end */
361 #define RB_MISSED_STORED        (1 << 30)
362 
363 struct buffer_data_page {
364         u64              time_stamp;    /* page time stamp */
365         local_t          commit;        /* write committed index */
366         unsigned char    data[];        /* data of buffer page */
367 };
368 
369 /*
370  * Note, the buffer_page list must be first. The buffer pages
371  * are allocated in cache lines, which means that each buffer
372  * page will be at the beginning of a cache line, and thus
373  * the least significant bits will be zero. We use this to
374  * add flags in the list struct pointers, to make the ring buffer
375  * lockless.
376  */
377 struct buffer_page {
378         struct list_head list;          /* list of buffer pages */
379         local_t          write;         /* index for next write */
380         unsigned         read;          /* index for next read */
381         local_t          entries;       /* entries on this page */
382         unsigned long    real_end;      /* real end of data */
383         struct buffer_data_page *page;  /* Actual data page */
384 };
385 
386 /*
387  * The buffer page counters, write and entries, must be reset
388  * atomically when crossing page boundaries. To synchronize this
389  * update, two counters are inserted into the number. One is
390  * the actual counter for the write position or count on the page.
391  *
392  * The other is a counter of updaters. Before an update happens
393  * the update partition of the counter is incremented. This will
394  * allow the updater to update the counter atomically.
395  *
396  * The counter is 20 bits, and the state data is 12.
397  */
398 #define RB_WRITE_MASK           0xfffff
399 #define RB_WRITE_INTCNT         (1 << 20)
400 
401 static void rb_init_page(struct buffer_data_page *bpage)
402 {
403         local_set(&bpage->commit, 0);
404 }
405 
406 /**
407  * ring_buffer_page_len - the size of data on the page.
408  * @page: The page to read
409  *
410  * Returns the amount of data on the page, including buffer page header.
411  */
412 size_t ring_buffer_page_len(void *page)
413 {
414         return local_read(&((struct buffer_data_page *)page)->commit)
415                 + BUF_PAGE_HDR_SIZE;
416 }
417 
418 /*
419  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
420  * this issue out.
421  */
422 static void free_buffer_page(struct buffer_page *bpage)
423 {
424         free_page((unsigned long)bpage->page);
425         kfree(bpage);
426 }
427 
428 /*
429  * We need to fit the time_stamp delta into 27 bits.
430  */
431 static inline int test_time_stamp(u64 delta)
432 {
433         if (delta & TS_DELTA_TEST)
434                 return 1;
435         return 0;
436 }
437 
438 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
439 
440 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
441 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
442 
443 int ring_buffer_print_page_header(struct trace_seq *s)
444 {
445         struct buffer_data_page field;
446         int ret;
447 
448         ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
449                                "offset:0;\tsize:%u;\tsigned:%u;\n",
450                                (unsigned int)sizeof(field.time_stamp),
451                                (unsigned int)is_signed_type(u64));
452 
453         ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
454                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
455                                (unsigned int)offsetof(typeof(field), commit),
456                                (unsigned int)sizeof(field.commit),
457                                (unsigned int)is_signed_type(long));
458 
459         ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
460                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
461                                (unsigned int)offsetof(typeof(field), commit),
462                                1,
463                                (unsigned int)is_signed_type(long));
464 
465         ret = trace_seq_printf(s, "\tfield: char data;\t"
466                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
467                                (unsigned int)offsetof(typeof(field), data),
468                                (unsigned int)BUF_PAGE_SIZE,
469                                (unsigned int)is_signed_type(char));
470 
471         return ret;
472 }
473 
474 /*
475  * head_page == tail_page && head == tail then buffer is empty.
476  */
477 struct ring_buffer_per_cpu {
478         int                             cpu;
479         atomic_t                        record_disabled;
480         struct ring_buffer              *buffer;
481         spinlock_t                      reader_lock;    /* serialize readers */
482         arch_spinlock_t                 lock;
483         struct lock_class_key           lock_key;
484         struct list_head                *pages;
485         struct buffer_page              *head_page;     /* read from head */
486         struct buffer_page              *tail_page;     /* write to tail */
487         struct buffer_page              *commit_page;   /* committed pages */
488         struct buffer_page              *reader_page;
489         unsigned long                   lost_events;
490         unsigned long                   last_overrun;
491         local_t                         commit_overrun;
492         local_t                         overrun;
493         local_t                         entries;
494         local_t                         committing;
495         local_t                         commits;
496         unsigned long                   read;
497         u64                             write_stamp;
498         u64                             read_stamp;
499 };
500 
501 struct ring_buffer {
502         unsigned                        pages;
503         unsigned                        flags;
504         int                             cpus;
505         atomic_t                        record_disabled;
506         cpumask_var_t                   cpumask;
507 
508         struct lock_class_key           *reader_lock_key;
509 
510         struct mutex                    mutex;
511 
512         struct ring_buffer_per_cpu      **buffers;
513 
514 #ifdef CONFIG_HOTPLUG_CPU
515         struct notifier_block           cpu_notify;
516 #endif
517         u64                             (*clock)(void);
518 };
519 
520 struct ring_buffer_iter {
521         struct ring_buffer_per_cpu      *cpu_buffer;
522         unsigned long                   head;
523         struct buffer_page              *head_page;
524         struct buffer_page              *cache_reader_page;
525         unsigned long                   cache_read;
526         u64                             read_stamp;
527 };
528 
529 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
530 #define RB_WARN_ON(b, cond)                                             \
531         ({                                                              \
532                 int _____ret = unlikely(cond);                          \
533                 if (_____ret) {                                         \
534                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
535                                 struct ring_buffer_per_cpu *__b =       \
536                                         (void *)b;                      \
537                                 atomic_inc(&__b->buffer->record_disabled); \
538                         } else                                          \
539                                 atomic_inc(&b->record_disabled);        \
540                         WARN_ON(1);                                     \
541                 }                                                       \
542                 _____ret;                                               \
543         })
544 
545 /* Up this if you want to test the TIME_EXTENTS and normalization */
546 #define DEBUG_SHIFT 0
547 
548 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
549 {
550         /* shift to debug/test normalization and TIME_EXTENTS */
551         return buffer->clock() << DEBUG_SHIFT;
552 }
553 
554 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
555 {
556         u64 time;
557 
558         preempt_disable_notrace();
559         time = rb_time_stamp(buffer);
560         preempt_enable_no_resched_notrace();
561 
562         return time;
563 }
564 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
565 
566 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
567                                       int cpu, u64 *ts)
568 {
569         /* Just stupid testing the normalize function and deltas */
570         *ts >>= DEBUG_SHIFT;
571 }
572 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
573 
574 /*
575  * Making the ring buffer lockless makes things tricky.
576  * Although writes only happen on the CPU that they are on,
577  * and they only need to worry about interrupts. Reads can
578  * happen on any CPU.
579  *
580  * The reader page is always off the ring buffer, but when the
581  * reader finishes with a page, it needs to swap its page with
582  * a new one from the buffer. The reader needs to take from
583  * the head (writes go to the tail). But if a writer is in overwrite
584  * mode and wraps, it must push the head page forward.
585  *
586  * Here lies the problem.
587  *
588  * The reader must be careful to replace only the head page, and
589  * not another one. As described at the top of the file in the
590  * ASCII art, the reader sets its old page to point to the next
591  * page after head. It then sets the page after head to point to
592  * the old reader page. But if the writer moves the head page
593  * during this operation, the reader could end up with the tail.
594  *
595  * We use cmpxchg to help prevent this race. We also do something
596  * special with the page before head. We set the LSB to 1.
597  *
598  * When the writer must push the page forward, it will clear the
599  * bit that points to the head page, move the head, and then set
600  * the bit that points to the new head page.
601  *
602  * We also don't want an interrupt coming in and moving the head
603  * page on another writer. Thus we use the second LSB to catch
604  * that too. Thus:
605  *
606  * head->list->prev->next        bit 1          bit 0
607  *                              -------        -------
608  * Normal page                     0              0
609  * Points to head page             0              1
610  * New head page                   1              0
611  *
612  * Note we can not trust the prev pointer of the head page, because:
613  *
614  * +----+       +-----+        +-----+
615  * |    |------>|  T  |---X--->|  N  |
616  * |    |<------|     |        |     |
617  * +----+       +-----+        +-----+
618  *   ^                           ^ |
619  *   |          +-----+          | |
620  *   +----------|  R  |----------+ |
621  *              |     |<-----------+
622  *              +-----+
623  *
624  * Key:  ---X-->  HEAD flag set in pointer
625  *         T      Tail page
626  *         R      Reader page
627  *         N      Next page
628  *
629  * (see __rb_reserve_next() to see where this happens)
630  *
631  *  What the above shows is that the reader just swapped out
632  *  the reader page with a page in the buffer, but before it
633  *  could make the new header point back to the new page added
634  *  it was preempted by a writer. The writer moved forward onto
635  *  the new page added by the reader and is about to move forward
636  *  again.
637  *
638  *  You can see, it is legitimate for the previous pointer of
639  *  the head (or any page) not to point back to itself. But only
640  *  temporarially.
641  */
642 
643 #define RB_PAGE_NORMAL          0UL
644 #define RB_PAGE_HEAD            1UL
645 #define RB_PAGE_UPDATE          2UL
646 
647 
648 #define RB_FLAG_MASK            3UL
649 
650 /* PAGE_MOVED is not part of the mask */
651 #define RB_PAGE_MOVED           4UL
652 
653 /*
654  * rb_list_head - remove any bit
655  */
656 static struct list_head *rb_list_head(struct list_head *list)
657 {
658         unsigned long val = (unsigned long)list;
659 
660         return (struct list_head *)(val & ~RB_FLAG_MASK);
661 }
662 
663 /*
664  * rb_is_head_page - test if the given page is the head page
665  *
666  * Because the reader may move the head_page pointer, we can
667  * not trust what the head page is (it may be pointing to
668  * the reader page). But if the next page is a header page,
669  * its flags will be non zero.
670  */
671 static inline int
672 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
673                 struct buffer_page *page, struct list_head *list)
674 {
675         unsigned long val;
676 
677         val = (unsigned long)list->next;
678 
679         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
680                 return RB_PAGE_MOVED;
681 
682         return val & RB_FLAG_MASK;
683 }
684 
685 /*
686  * rb_is_reader_page
687  *
688  * The unique thing about the reader page, is that, if the
689  * writer is ever on it, the previous pointer never points
690  * back to the reader page.
691  */
692 static int rb_is_reader_page(struct buffer_page *page)
693 {
694         struct list_head *list = page->list.prev;
695 
696         return rb_list_head(list->next) != &page->list;
697 }
698 
699 /*
700  * rb_set_list_to_head - set a list_head to be pointing to head.
701  */
702 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
703                                 struct list_head *list)
704 {
705         unsigned long *ptr;
706 
707         ptr = (unsigned long *)&list->next;
708         *ptr |= RB_PAGE_HEAD;
709         *ptr &= ~RB_PAGE_UPDATE;
710 }
711 
712 /*
713  * rb_head_page_activate - sets up head page
714  */
715 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
716 {
717         struct buffer_page *head;
718 
719         head = cpu_buffer->head_page;
720         if (!head)
721                 return;
722 
723         /*
724          * Set the previous list pointer to have the HEAD flag.
725          */
726         rb_set_list_to_head(cpu_buffer, head->list.prev);
727 }
728 
729 static void rb_list_head_clear(struct list_head *list)
730 {
731         unsigned long *ptr = (unsigned long *)&list->next;
732 
733         *ptr &= ~RB_FLAG_MASK;
734 }
735 
736 /*
737  * rb_head_page_dactivate - clears head page ptr (for free list)
738  */
739 static void
740 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
741 {
742         struct list_head *hd;
743 
744         /* Go through the whole list and clear any pointers found. */
745         rb_list_head_clear(cpu_buffer->pages);
746 
747         list_for_each(hd, cpu_buffer->pages)
748                 rb_list_head_clear(hd);
749 }
750 
751 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
752                             struct buffer_page *head,
753                             struct buffer_page *prev,
754                             int old_flag, int new_flag)
755 {
756         struct list_head *list;
757         unsigned long val = (unsigned long)&head->list;
758         unsigned long ret;
759 
760         list = &prev->list;
761 
762         val &= ~RB_FLAG_MASK;
763 
764         ret = cmpxchg((unsigned long *)&list->next,
765                       val | old_flag, val | new_flag);
766 
767         /* check if the reader took the page */
768         if ((ret & ~RB_FLAG_MASK) != val)
769                 return RB_PAGE_MOVED;
770 
771         return ret & RB_FLAG_MASK;
772 }
773 
774 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
775                                    struct buffer_page *head,
776                                    struct buffer_page *prev,
777                                    int old_flag)
778 {
779         return rb_head_page_set(cpu_buffer, head, prev,
780                                 old_flag, RB_PAGE_UPDATE);
781 }
782 
783 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
784                                  struct buffer_page *head,
785                                  struct buffer_page *prev,
786                                  int old_flag)
787 {
788         return rb_head_page_set(cpu_buffer, head, prev,
789                                 old_flag, RB_PAGE_HEAD);
790 }
791 
792 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
793                                    struct buffer_page *head,
794                                    struct buffer_page *prev,
795                                    int old_flag)
796 {
797         return rb_head_page_set(cpu_buffer, head, prev,
798                                 old_flag, RB_PAGE_NORMAL);
799 }
800 
801 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
802                                struct buffer_page **bpage)
803 {
804         struct list_head *p = rb_list_head((*bpage)->list.next);
805 
806         *bpage = list_entry(p, struct buffer_page, list);
807 }
808 
809 static struct buffer_page *
810 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
811 {
812         struct buffer_page *head;
813         struct buffer_page *page;
814         struct list_head *list;
815         int i;
816 
817         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
818                 return NULL;
819 
820         /* sanity check */
821         list = cpu_buffer->pages;
822         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
823                 return NULL;
824 
825         page = head = cpu_buffer->head_page;
826         /*
827          * It is possible that the writer moves the header behind
828          * where we started, and we miss in one loop.
829          * A second loop should grab the header, but we'll do
830          * three loops just because I'm paranoid.
831          */
832         for (i = 0; i < 3; i++) {
833                 do {
834                         if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
835                                 cpu_buffer->head_page = page;
836                                 return page;
837                         }
838                         rb_inc_page(cpu_buffer, &page);
839                 } while (page != head);
840         }
841 
842         RB_WARN_ON(cpu_buffer, 1);
843 
844         return NULL;
845 }
846 
847 static int rb_head_page_replace(struct buffer_page *old,
848                                 struct buffer_page *new)
849 {
850         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
851         unsigned long val;
852         unsigned long ret;
853 
854         val = *ptr & ~RB_FLAG_MASK;
855         val |= RB_PAGE_HEAD;
856 
857         ret = cmpxchg(ptr, val, (unsigned long)&new->list);
858 
859         return ret == val;
860 }
861 
862 /*
863  * rb_tail_page_update - move the tail page forward
864  *
865  * Returns 1 if moved tail page, 0 if someone else did.
866  */
867 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
868                                struct buffer_page *tail_page,
869                                struct buffer_page *next_page)
870 {
871         struct buffer_page *old_tail;
872         unsigned long old_entries;
873         unsigned long old_write;
874         int ret = 0;
875 
876         /*
877          * The tail page now needs to be moved forward.
878          *
879          * We need to reset the tail page, but without messing
880          * with possible erasing of data brought in by interrupts
881          * that have moved the tail page and are currently on it.
882          *
883          * We add a counter to the write field to denote this.
884          */
885         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
886         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
887 
888         /*
889          * Just make sure we have seen our old_write and synchronize
890          * with any interrupts that come in.
891          */
892         barrier();
893 
894         /*
895          * If the tail page is still the same as what we think
896          * it is, then it is up to us to update the tail
897          * pointer.
898          */
899         if (tail_page == cpu_buffer->tail_page) {
900                 /* Zero the write counter */
901                 unsigned long val = old_write & ~RB_WRITE_MASK;
902                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
903 
904                 /*
905                  * This will only succeed if an interrupt did
906                  * not come in and change it. In which case, we
907                  * do not want to modify it.
908                  *
909                  * We add (void) to let the compiler know that we do not care
910                  * about the return value of these functions. We use the
911                  * cmpxchg to only update if an interrupt did not already
912                  * do it for us. If the cmpxchg fails, we don't care.
913                  */
914                 (void)local_cmpxchg(&next_page->write, old_write, val);
915                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
916 
917                 /*
918                  * No need to worry about races with clearing out the commit.
919                  * it only can increment when a commit takes place. But that
920                  * only happens in the outer most nested commit.
921                  */
922                 local_set(&next_page->page->commit, 0);
923 
924                 old_tail = cmpxchg(&cpu_buffer->tail_page,
925                                    tail_page, next_page);
926 
927                 if (old_tail == tail_page)
928                         ret = 1;
929         }
930 
931         return ret;
932 }
933 
934 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
935                           struct buffer_page *bpage)
936 {
937         unsigned long val = (unsigned long)bpage;
938 
939         if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
940                 return 1;
941 
942         return 0;
943 }
944 
945 /**
946  * rb_check_list - make sure a pointer to a list has the last bits zero
947  */
948 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
949                          struct list_head *list)
950 {
951         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
952                 return 1;
953         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
954                 return 1;
955         return 0;
956 }
957 
958 /**
959  * check_pages - integrity check of buffer pages
960  * @cpu_buffer: CPU buffer with pages to test
961  *
962  * As a safety measure we check to make sure the data pages have not
963  * been corrupted.
964  */
965 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
966 {
967         struct list_head *head = cpu_buffer->pages;
968         struct buffer_page *bpage, *tmp;
969 
970         rb_head_page_deactivate(cpu_buffer);
971 
972         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
973                 return -1;
974         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
975                 return -1;
976 
977         if (rb_check_list(cpu_buffer, head))
978                 return -1;
979 
980         list_for_each_entry_safe(bpage, tmp, head, list) {
981                 if (RB_WARN_ON(cpu_buffer,
982                                bpage->list.next->prev != &bpage->list))
983                         return -1;
984                 if (RB_WARN_ON(cpu_buffer,
985                                bpage->list.prev->next != &bpage->list))
986                         return -1;
987                 if (rb_check_list(cpu_buffer, &bpage->list))
988                         return -1;
989         }
990 
991         rb_head_page_activate(cpu_buffer);
992 
993         return 0;
994 }
995 
996 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
997                              unsigned nr_pages)
998 {
999         struct buffer_page *bpage, *tmp;
1000         LIST_HEAD(pages);
1001         unsigned i;
1002 
1003         WARN_ON(!nr_pages);
1004 
1005         for (i = 0; i < nr_pages; i++) {
1006                 struct page *page;
1007                 /*
1008                  * __GFP_NORETRY flag makes sure that the allocation fails
1009                  * gracefully without invoking oom-killer and the system is
1010                  * not destabilized.
1011                  */
1012                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1013                                     GFP_KERNEL | __GFP_NORETRY,
1014                                     cpu_to_node(cpu_buffer->cpu));
1015                 if (!bpage)
1016                         goto free_pages;
1017 
1018                 rb_check_bpage(cpu_buffer, bpage);
1019 
1020                 list_add(&bpage->list, &pages);
1021 
1022                 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
1023                                         GFP_KERNEL | __GFP_NORETRY, 0);
1024                 if (!page)
1025                         goto free_pages;
1026                 bpage->page = page_address(page);
1027                 rb_init_page(bpage->page);
1028         }
1029 
1030         /*
1031          * The ring buffer page list is a circular list that does not
1032          * start and end with a list head. All page list items point to
1033          * other pages.
1034          */
1035         cpu_buffer->pages = pages.next;
1036         list_del(&pages);
1037 
1038         rb_check_pages(cpu_buffer);
1039 
1040         return 0;
1041 
1042  free_pages:
1043         list_for_each_entry_safe(bpage, tmp, &pages, list) {
1044                 list_del_init(&bpage->list);
1045                 free_buffer_page(bpage);
1046         }
1047         return -ENOMEM;
1048 }
1049 
1050 static struct ring_buffer_per_cpu *
1051 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1052 {
1053         struct ring_buffer_per_cpu *cpu_buffer;
1054         struct buffer_page *bpage;
1055         struct page *page;
1056         int ret;
1057 
1058         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1059                                   GFP_KERNEL, cpu_to_node(cpu));
1060         if (!cpu_buffer)
1061                 return NULL;
1062 
1063         cpu_buffer->cpu = cpu;
1064         cpu_buffer->buffer = buffer;
1065         spin_lock_init(&cpu_buffer->reader_lock);
1066         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1067         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1068 
1069         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1070                             GFP_KERNEL, cpu_to_node(cpu));
1071         if (!bpage)
1072                 goto fail_free_buffer;
1073 
1074         rb_check_bpage(cpu_buffer, bpage);
1075 
1076         cpu_buffer->reader_page = bpage;
1077         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1078         if (!page)
1079                 goto fail_free_reader;
1080         bpage->page = page_address(page);
1081         rb_init_page(bpage->page);
1082 
1083         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1084 
1085         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1086         if (ret < 0)
1087                 goto fail_free_reader;
1088 
1089         cpu_buffer->head_page
1090                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1091         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1092 
1093         rb_head_page_activate(cpu_buffer);
1094 
1095         return cpu_buffer;
1096 
1097  fail_free_reader:
1098         free_buffer_page(cpu_buffer->reader_page);
1099 
1100  fail_free_buffer:
1101         kfree(cpu_buffer);
1102         return NULL;
1103 }
1104 
1105 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1106 {
1107         struct list_head *head = cpu_buffer->pages;
1108         struct buffer_page *bpage, *tmp;
1109 
1110         free_buffer_page(cpu_buffer->reader_page);
1111 
1112         rb_head_page_deactivate(cpu_buffer);
1113 
1114         if (head) {
1115                 list_for_each_entry_safe(bpage, tmp, head, list) {
1116                         list_del_init(&bpage->list);
1117                         free_buffer_page(bpage);
1118                 }
1119                 bpage = list_entry(head, struct buffer_page, list);
1120                 free_buffer_page(bpage);
1121         }
1122 
1123         kfree(cpu_buffer);
1124 }
1125 
1126 #ifdef CONFIG_HOTPLUG_CPU
1127 static int rb_cpu_notify(struct notifier_block *self,
1128                          unsigned long action, void *hcpu);
1129 #endif
1130 
1131 /**
1132  * ring_buffer_alloc - allocate a new ring_buffer
1133  * @size: the size in bytes per cpu that is needed.
1134  * @flags: attributes to set for the ring buffer.
1135  *
1136  * Currently the only flag that is available is the RB_FL_OVERWRITE
1137  * flag. This flag means that the buffer will overwrite old data
1138  * when the buffer wraps. If this flag is not set, the buffer will
1139  * drop data when the tail hits the head.
1140  */
1141 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1142                                         struct lock_class_key *key)
1143 {
1144         struct ring_buffer *buffer;
1145         int bsize;
1146         int cpu;
1147 
1148         /* keep it in its own cache line */
1149         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1150                          GFP_KERNEL);
1151         if (!buffer)
1152                 return NULL;
1153 
1154         if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1155                 goto fail_free_buffer;
1156 
1157         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1158         buffer->flags = flags;
1159         buffer->clock = trace_clock_local;
1160         buffer->reader_lock_key = key;
1161 
1162         /* need at least two pages */
1163         if (buffer->pages < 2)
1164                 buffer->pages = 2;
1165 
1166         /*
1167          * In case of non-hotplug cpu, if the ring-buffer is allocated
1168          * in early initcall, it will not be notified of secondary cpus.
1169          * In that off case, we need to allocate for all possible cpus.
1170          */
1171 #ifdef CONFIG_HOTPLUG_CPU
1172         get_online_cpus();
1173         cpumask_copy(buffer->cpumask, cpu_online_mask);
1174 #else
1175         cpumask_copy(buffer->cpumask, cpu_possible_mask);
1176 #endif
1177         buffer->cpus = nr_cpu_ids;
1178 
1179         bsize = sizeof(void *) * nr_cpu_ids;
1180         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1181                                   GFP_KERNEL);
1182         if (!buffer->buffers)
1183                 goto fail_free_cpumask;
1184 
1185         for_each_buffer_cpu(buffer, cpu) {
1186                 buffer->buffers[cpu] =
1187                         rb_allocate_cpu_buffer(buffer, cpu);
1188                 if (!buffer->buffers[cpu])
1189                         goto fail_free_buffers;
1190         }
1191 
1192 #ifdef CONFIG_HOTPLUG_CPU
1193         buffer->cpu_notify.notifier_call = rb_cpu_notify;
1194         buffer->cpu_notify.priority = 0;
1195         register_cpu_notifier(&buffer->cpu_notify);
1196 #endif
1197 
1198         put_online_cpus();
1199         mutex_init(&buffer->mutex);
1200 
1201         return buffer;
1202 
1203  fail_free_buffers:
1204         for_each_buffer_cpu(buffer, cpu) {
1205                 if (buffer->buffers[cpu])
1206                         rb_free_cpu_buffer(buffer->buffers[cpu]);
1207         }
1208         kfree(buffer->buffers);
1209 
1210  fail_free_cpumask:
1211         free_cpumask_var(buffer->cpumask);
1212         put_online_cpus();
1213 
1214  fail_free_buffer:
1215         kfree(buffer);
1216         return NULL;
1217 }
1218 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1219 
1220 /**
1221  * ring_buffer_free - free a ring buffer.
1222  * @buffer: the buffer to free.
1223  */
1224 void
1225 ring_buffer_free(struct ring_buffer *buffer)
1226 {
1227         int cpu;
1228 
1229         get_online_cpus();
1230 
1231 #ifdef CONFIG_HOTPLUG_CPU
1232         unregister_cpu_notifier(&buffer->cpu_notify);
1233 #endif
1234 
1235         for_each_buffer_cpu(buffer, cpu)
1236                 rb_free_cpu_buffer(buffer->buffers[cpu]);
1237 
1238         put_online_cpus();
1239 
1240         kfree(buffer->buffers);
1241         free_cpumask_var(buffer->cpumask);
1242 
1243         kfree(buffer);
1244 }
1245 EXPORT_SYMBOL_GPL(ring_buffer_free);
1246 
1247 void ring_buffer_set_clock(struct ring_buffer *buffer,
1248                            u64 (*clock)(void))
1249 {
1250         buffer->clock = clock;
1251 }
1252 
1253 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1254 
1255 static void
1256 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1257 {
1258         struct buffer_page *bpage;
1259         struct list_head *p;
1260         unsigned i;
1261 
1262         spin_lock_irq(&cpu_buffer->reader_lock);
1263         rb_head_page_deactivate(cpu_buffer);
1264 
1265         for (i = 0; i < nr_pages; i++) {
1266                 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1267                         goto out;
1268                 p = cpu_buffer->pages->next;
1269                 bpage = list_entry(p, struct buffer_page, list);
1270                 list_del_init(&bpage->list);
1271                 free_buffer_page(bpage);
1272         }
1273         if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1274                 goto out;
1275 
1276         rb_reset_cpu(cpu_buffer);
1277         rb_check_pages(cpu_buffer);
1278 
1279 out:
1280         spin_unlock_irq(&cpu_buffer->reader_lock);
1281 }
1282 
1283 static void
1284 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1285                 struct list_head *pages, unsigned nr_pages)
1286 {
1287         struct buffer_page *bpage;
1288         struct list_head *p;
1289         unsigned i;
1290 
1291         spin_lock_irq(&cpu_buffer->reader_lock);
1292         rb_head_page_deactivate(cpu_buffer);
1293 
1294         for (i = 0; i < nr_pages; i++) {
1295                 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1296                         goto out;
1297                 p = pages->next;
1298                 bpage = list_entry(p, struct buffer_page, list);
1299                 list_del_init(&bpage->list);
1300                 list_add_tail(&bpage->list, cpu_buffer->pages);
1301         }
1302         rb_reset_cpu(cpu_buffer);
1303         rb_check_pages(cpu_buffer);
1304 
1305 out:
1306         spin_unlock_irq(&cpu_buffer->reader_lock);
1307 }
1308 
1309 /**
1310  * ring_buffer_resize - resize the ring buffer
1311  * @buffer: the buffer to resize.
1312  * @size: the new size.
1313  *
1314  * Minimum size is 2 * BUF_PAGE_SIZE.
1315  *
1316  * Returns -1 on failure.
1317  */
1318 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1319 {
1320         struct ring_buffer_per_cpu *cpu_buffer;
1321         unsigned nr_pages, rm_pages, new_pages;
1322         struct buffer_page *bpage, *tmp;
1323         unsigned long buffer_size;
1324         LIST_HEAD(pages);
1325         int i, cpu;
1326 
1327         /*
1328          * Always succeed at resizing a non-existent buffer:
1329          */
1330         if (!buffer)
1331                 return size;
1332 
1333         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1334         size *= BUF_PAGE_SIZE;
1335         buffer_size = buffer->pages * BUF_PAGE_SIZE;
1336 
1337         /* we need a minimum of two pages */
1338         if (size < BUF_PAGE_SIZE * 2)
1339                 size = BUF_PAGE_SIZE * 2;
1340 
1341         if (size == buffer_size)
1342                 return size;
1343 
1344         atomic_inc(&buffer->record_disabled);
1345 
1346         /* Make sure all writers are done with this buffer. */
1347         synchronize_sched();
1348 
1349         mutex_lock(&buffer->mutex);
1350         get_online_cpus();
1351 
1352         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1353 
1354         if (size < buffer_size) {
1355 
1356                 /* easy case, just free pages */
1357                 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
1358                         goto out_fail;
1359 
1360                 rm_pages = buffer->pages - nr_pages;
1361 
1362                 for_each_buffer_cpu(buffer, cpu) {
1363                         cpu_buffer = buffer->buffers[cpu];
1364                         rb_remove_pages(cpu_buffer, rm_pages);
1365                 }
1366                 goto out;
1367         }
1368 
1369         /*
1370          * This is a bit more difficult. We only want to add pages
1371          * when we can allocate enough for all CPUs. We do this
1372          * by allocating all the pages and storing them on a local
1373          * link list. If we succeed in our allocation, then we
1374          * add these pages to the cpu_buffers. Otherwise we just free
1375          * them all and return -ENOMEM;
1376          */
1377         if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1378                 goto out_fail;
1379 
1380         new_pages = nr_pages - buffer->pages;
1381 
1382         for_each_buffer_cpu(buffer, cpu) {
1383                 for (i = 0; i < new_pages; i++) {
1384                         struct page *page;
1385                         /*
1386                          * __GFP_NORETRY flag makes sure that the allocation
1387                          * fails gracefully without invoking oom-killer and
1388                          * the system is not destabilized.
1389                          */
1390                         bpage = kzalloc_node(ALIGN(sizeof(*bpage),
1391                                                   cache_line_size()),
1392                                             GFP_KERNEL | __GFP_NORETRY,
1393                                             cpu_to_node(cpu));
1394                         if (!bpage)
1395                                 goto free_pages;
1396                         list_add(&bpage->list, &pages);
1397                         page = alloc_pages_node(cpu_to_node(cpu),
1398                                                 GFP_KERNEL | __GFP_NORETRY, 0);
1399                         if (!page)
1400                                 goto free_pages;
1401                         bpage->page = page_address(page);
1402                         rb_init_page(bpage->page);
1403                 }
1404         }
1405 
1406         for_each_buffer_cpu(buffer, cpu) {
1407                 cpu_buffer = buffer->buffers[cpu];
1408                 rb_insert_pages(cpu_buffer, &pages, new_pages);
1409         }
1410 
1411         if (RB_WARN_ON(buffer, !list_empty(&pages)))
1412                 goto out_fail;
1413 
1414  out:
1415         buffer->pages = nr_pages;
1416         put_online_cpus();
1417         mutex_unlock(&buffer->mutex);
1418 
1419         atomic_dec(&buffer->record_disabled);
1420 
1421         return size;
1422 
1423  free_pages:
1424         list_for_each_entry_safe(bpage, tmp, &pages, list) {
1425                 list_del_init(&bpage->list);
1426                 free_buffer_page(bpage);
1427         }
1428         put_online_cpus();
1429         mutex_unlock(&buffer->mutex);
1430         atomic_dec(&buffer->record_disabled);
1431         return -ENOMEM;
1432 
1433         /*
1434          * Something went totally wrong, and we are too paranoid
1435          * to even clean up the mess.
1436          */
1437  out_fail:
1438         put_online_cpus();
1439         mutex_unlock(&buffer->mutex);
1440         atomic_dec(&buffer->record_disabled);
1441         return -1;
1442 }
1443 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1444 
1445 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1446 {
1447         mutex_lock(&buffer->mutex);
1448         if (val)
1449                 buffer->flags |= RB_FL_OVERWRITE;
1450         else
1451                 buffer->flags &= ~RB_FL_OVERWRITE;
1452         mutex_unlock(&buffer->mutex);
1453 }
1454 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1455 
1456 static inline void *
1457 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1458 {
1459         return bpage->data + index;
1460 }
1461 
1462 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1463 {
1464         return bpage->page->data + index;
1465 }
1466 
1467 static inline struct ring_buffer_event *
1468 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1469 {
1470         return __rb_page_index(cpu_buffer->reader_page,
1471                                cpu_buffer->reader_page->read);
1472 }
1473 
1474 static inline struct ring_buffer_event *
1475 rb_iter_head_event(struct ring_buffer_iter *iter)
1476 {
1477         return __rb_page_index(iter->head_page, iter->head);
1478 }
1479 
1480 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1481 {
1482         return local_read(&bpage->write) & RB_WRITE_MASK;
1483 }
1484 
1485 static inline unsigned rb_page_commit(struct buffer_page *bpage)
1486 {
1487         return local_read(&bpage->page->commit);
1488 }
1489 
1490 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1491 {
1492         return local_read(&bpage->entries) & RB_WRITE_MASK;
1493 }
1494 
1495 /* Size is determined by what has been committed */
1496 static inline unsigned rb_page_size(struct buffer_page *bpage)
1497 {
1498         return rb_page_commit(bpage);
1499 }
1500 
1501 static inline unsigned
1502 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1503 {
1504         return rb_page_commit(cpu_buffer->commit_page);
1505 }
1506 
1507 static inline unsigned
1508 rb_event_index(struct ring_buffer_event *event)
1509 {
1510         unsigned long addr = (unsigned long)event;
1511 
1512         return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1513 }
1514 
1515 static inline int
1516 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1517                    struct ring_buffer_event *event)
1518 {
1519         unsigned long addr = (unsigned long)event;
1520         unsigned long index;
1521 
1522         index = rb_event_index(event);
1523         addr &= PAGE_MASK;
1524 
1525         return cpu_buffer->commit_page->page == (void *)addr &&
1526                 rb_commit_index(cpu_buffer) == index;
1527 }
1528 
1529 static void
1530 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1531 {
1532         unsigned long max_count;
1533 
1534         /*
1535          * We only race with interrupts and NMIs on this CPU.
1536          * If we own the commit event, then we can commit
1537          * all others that interrupted us, since the interruptions
1538          * are in stack format (they finish before they come
1539          * back to us). This allows us to do a simple loop to
1540          * assign the commit to the tail.
1541          */
1542  again:
1543         max_count = cpu_buffer->buffer->pages * 100;
1544 
1545         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1546                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1547                         return;
1548                 if (RB_WARN_ON(cpu_buffer,
1549                                rb_is_reader_page(cpu_buffer->tail_page)))
1550                         return;
1551                 local_set(&cpu_buffer->commit_page->page->commit,
1552                           rb_page_write(cpu_buffer->commit_page));
1553                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1554                 cpu_buffer->write_stamp =
1555                         cpu_buffer->commit_page->page->time_stamp;
1556                 /* add barrier to keep gcc from optimizing too much */
1557                 barrier();
1558         }
1559         while (rb_commit_index(cpu_buffer) !=
1560                rb_page_write(cpu_buffer->commit_page)) {
1561 
1562                 local_set(&cpu_buffer->commit_page->page->commit,
1563                           rb_page_write(cpu_buffer->commit_page));
1564                 RB_WARN_ON(cpu_buffer,
1565                            local_read(&cpu_buffer->commit_page->page->commit) &
1566                            ~RB_WRITE_MASK);
1567                 barrier();
1568         }
1569 
1570         /* again, keep gcc from optimizing */
1571         barrier();
1572 
1573         /*
1574          * If an interrupt came in just after the first while loop
1575          * and pushed the tail page forward, we will be left with
1576          * a dangling commit that will never go forward.
1577          */
1578         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1579                 goto again;
1580 }
1581 
1582 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1583 {
1584         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1585         cpu_buffer->reader_page->read = 0;
1586 }
1587 
1588 static void rb_inc_iter(struct ring_buffer_iter *iter)
1589 {
1590         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1591 
1592         /*
1593          * The iterator could be on the reader page (it starts there).
1594          * But the head could have moved, since the reader was
1595          * found. Check for this case and assign the iterator
1596          * to the head page instead of next.
1597          */
1598         if (iter->head_page == cpu_buffer->reader_page)
1599                 iter->head_page = rb_set_head_page(cpu_buffer);
1600         else
1601                 rb_inc_page(cpu_buffer, &iter->head_page);
1602 
1603         iter->read_stamp = iter->head_page->page->time_stamp;
1604         iter->head = 0;
1605 }
1606 
1607 /* Slow path, do not inline */
1608 static noinline struct ring_buffer_event *
1609 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1610 {
1611         event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1612 
1613         /* Not the first event on the page? */
1614         if (rb_event_index(event)) {
1615                 event->time_delta = delta & TS_MASK;
1616                 event->array[0] = delta >> TS_SHIFT;
1617         } else {
1618                 /* nope, just zero it */
1619                 event->time_delta = 0;
1620                 event->array[0] = 0;
1621         }
1622 
1623         return skip_time_extend(event);
1624 }
1625 
1626 /**
1627  * ring_buffer_update_event - update event type and data
1628  * @event: the even to update
1629  * @type: the type of event
1630  * @length: the size of the event field in the ring buffer
1631  *
1632  * Update the type and data fields of the event. The length
1633  * is the actual size that is written to the ring buffer,
1634  * and with this, we can determine what to place into the
1635  * data field.
1636  */
1637 static void
1638 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1639                 struct ring_buffer_event *event, unsigned length,
1640                 int add_timestamp, u64 delta)
1641 {
1642         /* Only a commit updates the timestamp */
1643         if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1644                 delta = 0;
1645 
1646         /*
1647          * If we need to add a timestamp, then we
1648          * add it to the start of the resevered space.
1649          */
1650         if (unlikely(add_timestamp)) {
1651                 event = rb_add_time_stamp(event, delta);
1652                 length -= RB_LEN_TIME_EXTEND;
1653                 delta = 0;
1654         }
1655 
1656         event->time_delta = delta;
1657         length -= RB_EVNT_HDR_SIZE;
1658         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1659                 event->type_len = 0;
1660                 event->array[0] = length;
1661         } else
1662                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1663 }
1664 
1665 /*
1666  * rb_handle_head_page - writer hit the head page
1667  *
1668  * Returns: +1 to retry page
1669  *           0 to continue
1670  *          -1 on error
1671  */
1672 static int
1673 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1674                     struct buffer_page *tail_page,
1675                     struct buffer_page *next_page)
1676 {
1677         struct buffer_page *new_head;
1678         int entries;
1679         int type;
1680         int ret;
1681 
1682         entries = rb_page_entries(next_page);
1683 
1684         /*
1685          * The hard part is here. We need to move the head
1686          * forward, and protect against both readers on
1687          * other CPUs and writers coming in via interrupts.
1688          */
1689         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1690                                        RB_PAGE_HEAD);
1691 
1692         /*
1693          * type can be one of four:
1694          *  NORMAL - an interrupt already moved it for us
1695          *  HEAD   - we are the first to get here.
1696          *  UPDATE - we are the interrupt interrupting
1697          *           a current move.
1698          *  MOVED  - a reader on another CPU moved the next
1699          *           pointer to its reader page. Give up
1700          *           and try again.
1701          */
1702 
1703         switch (type) {
1704         case RB_PAGE_HEAD:
1705                 /*
1706                  * We changed the head to UPDATE, thus
1707                  * it is our responsibility to update
1708                  * the counters.
1709                  */
1710                 local_add(entries, &cpu_buffer->overrun);
1711 
1712                 /*
1713                  * The entries will be zeroed out when we move the
1714                  * tail page.
1715                  */
1716 
1717                 /* still more to do */
1718                 break;
1719 
1720         case RB_PAGE_UPDATE:
1721                 /*
1722                  * This is an interrupt that interrupt the
1723                  * previous update. Still more to do.
1724                  */
1725                 break;
1726         case RB_PAGE_NORMAL:
1727                 /*
1728                  * An interrupt came in before the update
1729                  * and processed this for us.
1730                  * Nothing left to do.
1731                  */
1732                 return 1;
1733         case RB_PAGE_MOVED:
1734                 /*
1735                  * The reader is on another CPU and just did
1736                  * a swap with our next_page.
1737                  * Try again.
1738                  */
1739                 return 1;
1740         default:
1741                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1742                 return -1;
1743         }
1744 
1745         /*
1746          * Now that we are here, the old head pointer is
1747          * set to UPDATE. This will keep the reader from
1748          * swapping the head page with the reader page.
1749          * The reader (on another CPU) will spin till
1750          * we are finished.
1751          *
1752          * We just need to protect against interrupts
1753          * doing the job. We will set the next pointer
1754          * to HEAD. After that, we set the old pointer
1755          * to NORMAL, but only if it was HEAD before.
1756          * otherwise we are an interrupt, and only
1757          * want the outer most commit to reset it.
1758          */
1759         new_head = next_page;
1760         rb_inc_page(cpu_buffer, &new_head);
1761 
1762         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1763                                     RB_PAGE_NORMAL);
1764 
1765         /*
1766          * Valid returns are:
1767          *  HEAD   - an interrupt came in and already set it.
1768          *  NORMAL - One of two things:
1769          *            1) We really set it.
1770          *            2) A bunch of interrupts came in and moved
1771          *               the page forward again.
1772          */
1773         switch (ret) {
1774         case RB_PAGE_HEAD:
1775         case RB_PAGE_NORMAL:
1776                 /* OK */
1777                 break;
1778         default:
1779                 RB_WARN_ON(cpu_buffer, 1);
1780                 return -1;
1781         }
1782 
1783         /*
1784          * It is possible that an interrupt came in,
1785          * set the head up, then more interrupts came in
1786          * and moved it again. When we get back here,
1787          * the page would have been set to NORMAL but we
1788          * just set it back to HEAD.
1789          *
1790          * How do you detect this? Well, if that happened
1791          * the tail page would have moved.
1792          */
1793         if (ret == RB_PAGE_NORMAL) {
1794                 /*
1795                  * If the tail had moved passed next, then we need
1796                  * to reset the pointer.
1797                  */
1798                 if (cpu_buffer->tail_page != tail_page &&
1799                     cpu_buffer->tail_page != next_page)
1800                         rb_head_page_set_normal(cpu_buffer, new_head,
1801                                                 next_page,
1802                                                 RB_PAGE_HEAD);
1803         }
1804 
1805         /*
1806          * If this was the outer most commit (the one that
1807          * changed the original pointer from HEAD to UPDATE),
1808          * then it is up to us to reset it to NORMAL.
1809          */
1810         if (type == RB_PAGE_HEAD) {
1811                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1812                                               tail_page,
1813                                               RB_PAGE_UPDATE);
1814                 if (RB_WARN_ON(cpu_buffer,
1815                                ret != RB_PAGE_UPDATE))
1816                         return -1;
1817         }
1818 
1819         return 0;
1820 }
1821 
1822 static unsigned rb_calculate_event_length(unsigned length)
1823 {
1824         struct ring_buffer_event event; /* Used only for sizeof array */
1825 
1826         /* zero length can cause confusions */
1827         if (!length)
1828                 length = 1;
1829 
1830         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1831                 length += sizeof(event.array[0]);
1832 
1833         length += RB_EVNT_HDR_SIZE;
1834         length = ALIGN(length, RB_ARCH_ALIGNMENT);
1835 
1836         return length;
1837 }
1838 
1839 static inline void
1840 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1841               struct buffer_page *tail_page,
1842               unsigned long tail, unsigned long length)
1843 {
1844         struct ring_buffer_event *event;
1845 
1846         /*
1847          * Only the event that crossed the page boundary
1848          * must fill the old tail_page with padding.
1849          */
1850         if (tail >= BUF_PAGE_SIZE) {
1851                 /*
1852                  * If the page was filled, then we still need
1853                  * to update the real_end. Reset it to zero
1854                  * and the reader will ignore it.
1855                  */
1856                 if (tail == BUF_PAGE_SIZE)
1857                         tail_page->real_end = 0;
1858 
1859                 local_sub(length, &tail_page->write);
1860                 return;
1861         }
1862 
1863         event = __rb_page_index(tail_page, tail);
1864         kmemcheck_annotate_bitfield(event, bitfield);
1865 
1866         /*
1867          * Save the original length to the meta data.
1868          * This will be used by the reader to add lost event
1869          * counter.
1870          */
1871         tail_page->real_end = tail;
1872 
1873         /*
1874          * If this event is bigger than the minimum size, then
1875          * we need to be careful that we don't subtract the
1876          * write counter enough to allow another writer to slip
1877          * in on this page.
1878          * We put in a discarded commit instead, to make sure
1879          * that this space is not used again.
1880          *
1881          * If we are less than the minimum size, we don't need to
1882          * worry about it.
1883          */
1884         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1885                 /* No room for any events */
1886 
1887                 /* Mark the rest of the page with padding */
1888                 rb_event_set_padding(event);
1889 
1890                 /* Set the write back to the previous setting */
1891                 local_sub(length, &tail_page->write);
1892                 return;
1893         }
1894 
1895         /* Put in a discarded event */
1896         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1897         event->type_len = RINGBUF_TYPE_PADDING;
1898         /* time delta must be non zero */
1899         event->time_delta = 1;
1900 
1901         /* Set write to end of buffer */
1902         length = (tail + length) - BUF_PAGE_SIZE;
1903         local_sub(length, &tail_page->write);
1904 }
1905 
1906 /*
1907  * This is the slow path, force gcc not to inline it.
1908  */
1909 static noinline struct ring_buffer_event *
1910 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1911              unsigned long length, unsigned long tail,
1912              struct buffer_page *tail_page, u64 ts)
1913 {
1914         struct buffer_page *commit_page = cpu_buffer->commit_page;
1915         struct ring_buffer *buffer = cpu_buffer->buffer;
1916         struct buffer_page *next_page;
1917         int ret;
1918 
1919         next_page = tail_page;
1920 
1921         rb_inc_page(cpu_buffer, &next_page);
1922 
1923         /*
1924          * If for some reason, we had an interrupt storm that made
1925          * it all the way around the buffer, bail, and warn
1926          * about it.
1927          */
1928         if (unlikely(next_page == commit_page)) {
1929                 local_inc(&cpu_buffer->commit_overrun);
1930                 goto out_reset;
1931         }
1932 
1933         /*
1934          * This is where the fun begins!
1935          *
1936          * We are fighting against races between a reader that
1937          * could be on another CPU trying to swap its reader
1938          * page with the buffer head.
1939          *
1940          * We are also fighting against interrupts coming in and
1941          * moving the head or tail on us as well.
1942          *
1943          * If the next page is the head page then we have filled
1944          * the buffer, unless the commit page is still on the
1945          * reader page.
1946          */
1947         if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
1948 
1949                 /*
1950                  * If the commit is not on the reader page, then
1951                  * move the header page.
1952                  */
1953                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1954                         /*
1955                          * If we are not in overwrite mode,
1956                          * this is easy, just stop here.
1957                          */
1958                         if (!(buffer->flags & RB_FL_OVERWRITE))
1959                                 goto out_reset;
1960 
1961                         ret = rb_handle_head_page(cpu_buffer,
1962                                                   tail_page,
1963                                                   next_page);
1964                         if (ret < 0)
1965                                 goto out_reset;
1966                         if (ret)
1967                                 goto out_again;
1968                 } else {
1969                         /*
1970                          * We need to be careful here too. The
1971                          * commit page could still be on the reader
1972                          * page. We could have a small buffer, and
1973                          * have filled up the buffer with events
1974                          * from interrupts and such, and wrapped.
1975                          *
1976                          * Note, if the tail page is also the on the
1977                          * reader_page, we let it move out.
1978                          */
1979                         if (unlikely((cpu_buffer->commit_page !=
1980                                       cpu_buffer->tail_page) &&
1981                                      (cpu_buffer->commit_page ==
1982                                       cpu_buffer->reader_page))) {
1983                                 local_inc(&cpu_buffer->commit_overrun);
1984                                 goto out_reset;
1985                         }
1986                 }
1987         }
1988 
1989         ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1990         if (ret) {
1991                 /*
1992                  * Nested commits always have zero deltas, so
1993                  * just reread the time stamp
1994                  */
1995                 ts = rb_time_stamp(buffer);
1996                 next_page->page->time_stamp = ts;
1997         }
1998 
1999  out_again:
2000 
2001         rb_reset_tail(cpu_buffer, tail_page, tail, length);
2002 
2003         /* fail and let the caller try again */
2004         return ERR_PTR(-EAGAIN);
2005 
2006  out_reset:
2007         /* reset write */
2008         rb_reset_tail(cpu_buffer, tail_page, tail, length);
2009 
2010         return NULL;
2011 }
2012 
2013 static struct ring_buffer_event *
2014 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2015                   unsigned long length, u64 ts,
2016                   u64 delta, int add_timestamp)
2017 {
2018         struct buffer_page *tail_page;
2019         struct ring_buffer_event *event;
2020         unsigned long tail, write;
2021 
2022         /*
2023          * If the time delta since the last event is too big to
2024          * hold in the time field of the event, then we append a
2025          * TIME EXTEND event ahead of the data event.
2026          */
2027         if (unlikely(add_timestamp))
2028                 length += RB_LEN_TIME_EXTEND;
2029 
2030         tail_page = cpu_buffer->tail_page;
2031         write = local_add_return(length, &tail_page->write);
2032 
2033         /* set write to only the index of the write */
2034         write &= RB_WRITE_MASK;
2035         tail = write - length;
2036 
2037         /* See if we shot pass the end of this buffer page */
2038         if (unlikely(write > BUF_PAGE_SIZE))
2039                 return rb_move_tail(cpu_buffer, length, tail,
2040                                     tail_page, ts);
2041 
2042         /* We reserved something on the buffer */
2043 
2044         event = __rb_page_index(tail_page, tail);
2045         kmemcheck_annotate_bitfield(event, bitfield);
2046         rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2047 
2048         local_inc(&tail_page->entries);
2049 
2050         /*
2051          * If this is the first commit on the page, then update
2052          * its timestamp.
2053          */
2054         if (!tail)
2055                 tail_page->page->time_stamp = ts;
2056 
2057         return event;
2058 }
2059 
2060 static inline int
2061 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2062                   struct ring_buffer_event *event)
2063 {
2064         unsigned long new_index, old_index;
2065         struct buffer_page *bpage;
2066         unsigned long index;
2067         unsigned long addr;
2068 
2069         new_index = rb_event_index(event);
2070         old_index = new_index + rb_event_ts_length(event);
2071         addr = (unsigned long)event;
2072         addr &= PAGE_MASK;
2073 
2074         bpage = cpu_buffer->tail_page;
2075 
2076         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2077                 unsigned long write_mask =
2078                         local_read(&bpage->write) & ~RB_WRITE_MASK;
2079                 /*
2080                  * This is on the tail page. It is possible that
2081                  * a write could come in and move the tail page
2082                  * and write to the next page. That is fine
2083                  * because we just shorten what is on this page.
2084                  */
2085                 old_index += write_mask;
2086                 new_index += write_mask;
2087                 index = local_cmpxchg(&bpage->write, old_index, new_index);
2088                 if (index == old_index)
2089                         return 1;
2090         }
2091 
2092         /* could not discard */
2093         return 0;
2094 }
2095 
2096 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2097 {
2098         local_inc(&cpu_buffer->committing);
2099         local_inc(&cpu_buffer->commits);
2100 }
2101 
2102 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2103 {
2104         unsigned long commits;
2105 
2106         if (RB_WARN_ON(cpu_buffer,
2107                        !local_read(&cpu_buffer->committing)))
2108                 return;
2109 
2110  again:
2111         commits = local_read(&cpu_buffer->commits);
2112         /* synchronize with interrupts */
2113         barrier();
2114         if (local_read(&cpu_buffer->committing) == 1)
2115                 rb_set_commit_to_write(cpu_buffer);
2116 
2117         local_dec(&cpu_buffer->committing);
2118 
2119         /* synchronize with interrupts */
2120         barrier();
2121 
2122         /*
2123          * Need to account for interrupts coming in between the
2124          * updating of the commit page and the clearing of the
2125          * committing counter.
2126          */
2127         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2128             !local_read(&cpu_buffer->committing)) {
2129                 local_inc(&cpu_buffer->committing);
2130                 goto again;
2131         }
2132 }
2133 
2134 static struct ring_buffer_event *
2135 rb_reserve_next_event(struct ring_buffer *buffer,
2136                       struct ring_buffer_per_cpu *cpu_buffer,
2137                       unsigned long length)
2138 {
2139         struct ring_buffer_event *event;
2140         u64 ts, delta;
2141         int nr_loops = 0;
2142         int add_timestamp;
2143         u64 diff;
2144 
2145         rb_start_commit(cpu_buffer);
2146 
2147 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2148         /*
2149          * Due to the ability to swap a cpu buffer from a buffer
2150          * it is possible it was swapped before we committed.
2151          * (committing stops a swap). We check for it here and
2152          * if it happened, we have to fail the write.
2153          */
2154         barrier();
2155         if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2156                 local_dec(&cpu_buffer->committing);
2157                 local_dec(&cpu_buffer->commits);
2158                 return NULL;
2159         }
2160 #endif
2161 
2162         length = rb_calculate_event_length(length);
2163  again:
2164         add_timestamp = 0;
2165         delta = 0;
2166 
2167         /*
2168          * We allow for interrupts to reenter here and do a trace.
2169          * If one does, it will cause this original code to loop
2170          * back here. Even with heavy interrupts happening, this
2171          * should only happen a few times in a row. If this happens
2172          * 1000 times in a row, there must be either an interrupt
2173          * storm or we have something buggy.
2174          * Bail!
2175          */
2176         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2177                 goto out_fail;
2178 
2179         ts = rb_time_stamp(cpu_buffer->buffer);
2180         diff = ts - cpu_buffer->write_stamp;
2181 
2182         /* make sure this diff is calculated here */
2183         barrier();
2184 
2185         /* Did the write stamp get updated already? */
2186         if (likely(ts >= cpu_buffer->write_stamp)) {
2187                 delta = diff;
2188                 if (unlikely(test_time_stamp(delta))) {
2189                         int local_clock_stable = 1;
2190 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2191                         local_clock_stable = sched_clock_stable;
2192 #endif
2193                         WARN_ONCE(delta > (1ULL << 59),
2194                                   KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2195                                   (unsigned long long)delta,
2196                                   (unsigned long long)ts,
2197                                   (unsigned long long)cpu_buffer->write_stamp,
2198                                   local_clock_stable ? "" :
2199                                   "If you just came from a suspend/resume,\n"
2200                                   "please switch to the trace global clock:\n"
2201                                   "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2202                         add_timestamp = 1;
2203                 }
2204         }
2205 
2206         event = __rb_reserve_next(cpu_buffer, length, ts,
2207                                   delta, add_timestamp);
2208         if (unlikely(PTR_ERR(event) == -EAGAIN))
2209                 goto again;
2210 
2211         if (!event)
2212                 goto out_fail;
2213 
2214         return event;
2215 
2216  out_fail:
2217         rb_end_commit(cpu_buffer);
2218         return NULL;
2219 }
2220 
2221 #ifdef CONFIG_TRACING
2222 
2223 #define TRACE_RECURSIVE_DEPTH 16
2224 
2225 /* Keep this code out of the fast path cache */
2226 static noinline void trace_recursive_fail(void)
2227 {
2228         /* Disable all tracing before we do anything else */
2229         tracing_off_permanent();
2230 
2231         printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2232                     "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2233                     trace_recursion_buffer(),
2234                     hardirq_count() >> HARDIRQ_SHIFT,
2235                     softirq_count() >> SOFTIRQ_SHIFT,
2236                     in_nmi());
2237 
2238         WARN_ON_ONCE(1);
2239 }
2240 
2241 static inline int trace_recursive_lock(void)
2242 {
2243         trace_recursion_inc();
2244 
2245         if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
2246                 return 0;
2247 
2248         trace_recursive_fail();
2249 
2250         return -1;
2251 }
2252 
2253 static inline void trace_recursive_unlock(void)
2254 {
2255         WARN_ON_ONCE(!trace_recursion_buffer());
2256 
2257         trace_recursion_dec();
2258 }
2259 
2260 #else
2261 
2262 #define trace_recursive_lock()          (0)
2263 #define trace_recursive_unlock()        do { } while (0)
2264 
2265 #endif
2266 
2267 /**
2268  * ring_buffer_lock_reserve - reserve a part of the buffer
2269  * @buffer: the ring buffer to reserve from
2270  * @length: the length of the data to reserve (excluding event header)
2271  *
2272  * Returns a reseverd event on the ring buffer to copy directly to.
2273  * The user of this interface will need to get the body to write into
2274  * and can use the ring_buffer_event_data() interface.
2275  *
2276  * The length is the length of the data needed, not the event length
2277  * which also includes the event header.
2278  *
2279  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2280  * If NULL is returned, then nothing has been allocated or locked.
2281  */
2282 struct ring_buffer_event *
2283 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2284 {
2285         struct ring_buffer_per_cpu *cpu_buffer;
2286         struct ring_buffer_event *event;
2287         int cpu;
2288 
2289         if (ring_buffer_flags != RB_BUFFERS_ON)
2290                 return NULL;
2291 
2292         /* If we are tracing schedule, we don't want to recurse */
2293         preempt_disable_notrace();
2294 
2295         if (atomic_read(&buffer->record_disabled))
2296                 goto out_nocheck;
2297 
2298         if (trace_recursive_lock())
2299                 goto out_nocheck;
2300 
2301         cpu = raw_smp_processor_id();
2302 
2303         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2304                 goto out;
2305 
2306         cpu_buffer = buffer->buffers[cpu];
2307 
2308         if (atomic_read(&cpu_buffer->record_disabled))
2309                 goto out;
2310 
2311         if (length > BUF_MAX_DATA_SIZE)
2312                 goto out;
2313 
2314         event = rb_reserve_next_event(buffer, cpu_buffer, length);
2315         if (!event)
2316                 goto out;
2317 
2318         return event;
2319 
2320  out:
2321         trace_recursive_unlock();
2322 
2323  out_nocheck:
2324         preempt_enable_notrace();
2325         return NULL;
2326 }
2327 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2328 
2329 static void
2330 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2331                       struct ring_buffer_event *event)
2332 {
2333         u64 delta;
2334 
2335         /*
2336          * The event first in the commit queue updates the
2337          * time stamp.
2338          */
2339         if (rb_event_is_commit(cpu_buffer, event)) {
2340                 /*
2341                  * A commit event that is first on a page
2342                  * updates the write timestamp with the page stamp
2343                  */
2344                 if (!rb_event_index(event))
2345                         cpu_buffer->write_stamp =
2346                                 cpu_buffer->commit_page->page->time_stamp;
2347                 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2348                         delta = event->array[0];
2349                         delta <<= TS_SHIFT;
2350                         delta += event->time_delta;
2351                         cpu_buffer->write_stamp += delta;
2352                 } else
2353                         cpu_buffer->write_stamp += event->time_delta;
2354         }
2355 }
2356 
2357 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2358                       struct ring_buffer_event *event)
2359 {
2360         local_inc(&cpu_buffer->entries);
2361         rb_update_write_stamp(cpu_buffer, event);
2362         rb_end_commit(cpu_buffer);
2363 }
2364 
2365 /**
2366  * ring_buffer_unlock_commit - commit a reserved
2367  * @buffer: The buffer to commit to
2368  * @event: The event pointer to commit.
2369  *
2370  * This commits the data to the ring buffer, and releases any locks held.
2371  *
2372  * Must be paired with ring_buffer_lock_reserve.
2373  */
2374 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2375                               struct ring_buffer_event *event)
2376 {
2377         struct ring_buffer_per_cpu *cpu_buffer;
2378         int cpu = raw_smp_processor_id();
2379 
2380         cpu_buffer = buffer->buffers[cpu];
2381 
2382         rb_commit(cpu_buffer, event);
2383 
2384         trace_recursive_unlock();
2385 
2386         preempt_enable_notrace();
2387 
2388         return 0;
2389 }
2390 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2391 
2392 static inline void rb_event_discard(struct ring_buffer_event *event)
2393 {
2394         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2395                 event = skip_time_extend(event);
2396 
2397         /* array[0] holds the actual length for the discarded event */
2398         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2399         event->type_len = RINGBUF_TYPE_PADDING;
2400         /* time delta must be non zero */
2401         if (!event->time_delta)
2402                 event->time_delta = 1;
2403 }
2404 
2405 /*
2406  * Decrement the entries to the page that an event is on.
2407  * The event does not even need to exist, only the pointer
2408  * to the page it is on. This may only be called before the commit
2409  * takes place.
2410  */
2411 static inline void
2412 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2413                    struct ring_buffer_event *event)
2414 {
2415         unsigned long addr = (unsigned long)event;
2416         struct buffer_page *bpage = cpu_buffer->commit_page;
2417         struct buffer_page *start;
2418 
2419         addr &= PAGE_MASK;
2420 
2421         /* Do the likely case first */
2422         if (likely(bpage->page == (void *)addr)) {
2423                 local_dec(&bpage->entries);
2424                 return;
2425         }
2426 
2427         /*
2428          * Because the commit page may be on the reader page we
2429          * start with the next page and check the end loop there.
2430          */
2431         rb_inc_page(cpu_buffer, &bpage);
2432         start = bpage;
2433         do {
2434                 if (bpage->page == (void *)addr) {
2435                         local_dec(&bpage->entries);
2436                         return;
2437                 }
2438                 rb_inc_page(cpu_buffer, &bpage);
2439         } while (bpage != start);
2440 
2441         /* commit not part of this buffer?? */
2442         RB_WARN_ON(cpu_buffer, 1);
2443 }
2444 
2445 /**
2446  * ring_buffer_commit_discard - discard an event that has not been committed
2447  * @buffer: the ring buffer
2448  * @event: non committed event to discard
2449  *
2450  * Sometimes an event that is in the ring buffer needs to be ignored.
2451  * This function lets the user discard an event in the ring buffer
2452  * and then that event will not be read later.
2453  *
2454  * This function only works if it is called before the the item has been
2455  * committed. It will try to free the event from the ring buffer
2456  * if another event has not been added behind it.
2457  *
2458  * If another event has been added behind it, it will set the event
2459  * up as discarded, and perform the commit.
2460  *
2461  * If this function is called, do not call ring_buffer_unlock_commit on
2462  * the event.
2463  */
2464 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2465                                 struct ring_buffer_event *event)
2466 {
2467         struct ring_buffer_per_cpu *cpu_buffer;
2468         int cpu;
2469 
2470         /* The event is discarded regardless */
2471         rb_event_discard(event);
2472 
2473         cpu = smp_processor_id();
2474         cpu_buffer = buffer->buffers[cpu];
2475 
2476         /*
2477          * This must only be called if the event has not been
2478          * committed yet. Thus we can assume that preemption
2479          * is still disabled.
2480          */
2481         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2482 
2483         rb_decrement_entry(cpu_buffer, event);
2484         if (rb_try_to_discard(cpu_buffer, event))
2485                 goto out;
2486 
2487         /*
2488          * The commit is still visible by the reader, so we
2489          * must still update the timestamp.
2490          */
2491         rb_update_write_stamp(cpu_buffer, event);
2492  out:
2493         rb_end_commit(cpu_buffer);
2494 
2495         trace_recursive_unlock();
2496 
2497         preempt_enable_notrace();
2498 
2499 }
2500 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2501 
2502 /**
2503  * ring_buffer_write - write data to the buffer without reserving
2504  * @buffer: The ring buffer to write to.
2505  * @length: The length of the data being written (excluding the event header)
2506  * @data: The data to write to the buffer.
2507  *
2508  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2509  * one function. If you already have the data to write to the buffer, it
2510  * may be easier to simply call this function.
2511  *
2512  * Note, like ring_buffer_lock_reserve, the length is the length of the data
2513  * and not the length of the event which would hold the header.
2514  */
2515 int ring_buffer_write(struct ring_buffer *buffer,
2516                         unsigned long length,
2517                         void *data)
2518 {
2519         struct ring_buffer_per_cpu *cpu_buffer;
2520         struct ring_buffer_event *event;
2521         void *body;
2522         int ret = -EBUSY;
2523         int cpu;
2524 
2525         if (ring_buffer_flags != RB_BUFFERS_ON)
2526                 return -EBUSY;
2527 
2528         preempt_disable_notrace();
2529 
2530         if (atomic_read(&buffer->record_disabled))
2531                 goto out;
2532 
2533         cpu = raw_smp_processor_id();
2534 
2535         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2536                 goto out;
2537 
2538         cpu_buffer = buffer->buffers[cpu];
2539 
2540         if (atomic_read(&cpu_buffer->record_disabled))
2541                 goto out;
2542 
2543         if (length > BUF_MAX_DATA_SIZE)
2544                 goto out;
2545 
2546         event = rb_reserve_next_event(buffer, cpu_buffer, length);
2547         if (!event)
2548                 goto out;
2549 
2550         body = rb_event_data(event);
2551 
2552         memcpy(body, data, length);
2553 
2554         rb_commit(cpu_buffer, event);
2555 
2556         ret = 0;
2557  out:
2558         preempt_enable_notrace();
2559 
2560         return ret;
2561 }
2562 EXPORT_SYMBOL_GPL(ring_buffer_write);
2563 
2564 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2565 {
2566         struct buffer_page *reader = cpu_buffer->reader_page;
2567         struct buffer_page *head = rb_set_head_page(cpu_buffer);
2568         struct buffer_page *commit = cpu_buffer->commit_page;
2569 
2570         /* In case of error, head will be NULL */
2571         if (unlikely(!head))
2572                 return 1;
2573 
2574         return reader->read == rb_page_commit(reader) &&
2575                 (commit == reader ||
2576                  (commit == head &&
2577                   head->read == rb_page_commit(commit)));
2578 }
2579 
2580 /**
2581  * ring_buffer_record_disable - stop all writes into the buffer
2582  * @buffer: The ring buffer to stop writes to.
2583  *
2584  * This prevents all writes to the buffer. Any attempt to write
2585  * to the buffer after this will fail and return NULL.
2586  *
2587  * The caller should call synchronize_sched() after this.
2588  */
2589 void ring_buffer_record_disable(struct ring_buffer *buffer)
2590 {
2591         atomic_inc(&buffer->record_disabled);
2592 }
2593 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2594 
2595 /**
2596  * ring_buffer_record_enable - enable writes to the buffer
2597  * @buffer: The ring buffer to enable writes
2598  *
2599  * Note, multiple disables will need the same number of enables
2600  * to truly enable the writing (much like preempt_disable).
2601  */
2602 void ring_buffer_record_enable(struct ring_buffer *buffer)
2603 {
2604         atomic_dec(&buffer->record_disabled);
2605 }
2606 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2607 
2608 /**
2609  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2610  * @buffer: The ring buffer to stop writes to.
2611  * @cpu: The CPU buffer to stop
2612  *
2613  * This prevents all writes to the buffer. Any attempt to write
2614  * to the buffer after this will fail and return NULL.
2615  *
2616  * The caller should call synchronize_sched() after this.
2617  */
2618 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2619 {
2620         struct ring_buffer_per_cpu *cpu_buffer;
2621 
2622         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2623                 return;
2624 
2625         cpu_buffer = buffer->buffers[cpu];
2626         atomic_inc(&cpu_buffer->record_disabled);
2627 }
2628 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2629 
2630 /**
2631  * ring_buffer_record_enable_cpu - enable writes to the buffer
2632  * @buffer: The ring buffer to enable writes
2633  * @cpu: The CPU to enable.
2634  *
2635  * Note, multiple disables will need the same number of enables
2636  * to truly enable the writing (much like preempt_disable).
2637  */
2638 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2639 {
2640         struct ring_buffer_per_cpu *cpu_buffer;
2641 
2642         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2643                 return;
2644 
2645         cpu_buffer = buffer->buffers[cpu];
2646         atomic_dec(&cpu_buffer->record_disabled);
2647 }
2648 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2649 
2650 /*
2651  * The total entries in the ring buffer is the running counter
2652  * of entries entered into the ring buffer, minus the sum of
2653  * the entries read from the ring buffer and the number of
2654  * entries that were overwritten.
2655  */
2656 static inline unsigned long
2657 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2658 {
2659         return local_read(&cpu_buffer->entries) -
2660                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2661 }
2662 
2663 /**
2664  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2665  * @buffer: The ring buffer
2666  * @cpu: The per CPU buffer to get the entries from.
2667  */
2668 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2669 {
2670         struct ring_buffer_per_cpu *cpu_buffer;
2671 
2672         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2673                 return 0;
2674 
2675         cpu_buffer = buffer->buffers[cpu];
2676 
2677         return rb_num_of_entries(cpu_buffer);
2678 }
2679 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
2680 
2681 /**
2682  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2683  * @buffer: The ring buffer
2684  * @cpu: The per CPU buffer to get the number of overruns from
2685  */
2686 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2687 {
2688         struct ring_buffer_per_cpu *cpu_buffer;
2689         unsigned long ret;
2690 
2691         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2692                 return 0;
2693 
2694         cpu_buffer = buffer->buffers[cpu];
2695         ret = local_read(&cpu_buffer->overrun);
2696 
2697         return ret;
2698 }
2699 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
2700 
2701 /**
2702  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2703  * @buffer: The ring buffer
2704  * @cpu: The per CPU buffer to get the number of overruns from
2705  */
2706 unsigned long
2707 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2708 {
2709         struct ring_buffer_per_cpu *cpu_buffer;
2710         unsigned long ret;
2711 
2712         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2713                 return 0;
2714 
2715         cpu_buffer = buffer->buffers[cpu];
2716         ret = local_read(&cpu_buffer->commit_overrun);
2717 
2718         return ret;
2719 }
2720 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2721 
2722 /**
2723  * ring_buffer_entries - get the number of entries in a buffer
2724  * @buffer: The ring buffer
2725  *
2726  * Returns the total number of entries in the ring buffer
2727  * (all CPU entries)
2728  */
2729 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2730 {
2731         struct ring_buffer_per_cpu *cpu_buffer;
2732         unsigned long entries = 0;
2733         int cpu;
2734 
2735         /* if you care about this being correct, lock the buffer */
2736         for_each_buffer_cpu(buffer, cpu) {
2737                 cpu_buffer = buffer->buffers[cpu];
2738                 entries += rb_num_of_entries(cpu_buffer);
2739         }
2740 
2741         return entries;
2742 }
2743 EXPORT_SYMBOL_GPL(ring_buffer_entries);
2744 
2745 /**
2746  * ring_buffer_overruns - get the number of overruns in buffer
2747  * @buffer: The ring buffer
2748  *
2749  * Returns the total number of overruns in the ring buffer
2750  * (all CPU entries)
2751  */
2752 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2753 {
2754         struct ring_buffer_per_cpu *cpu_buffer;
2755         unsigned long overruns = 0;
2756         int cpu;
2757 
2758         /* if you care about this being correct, lock the buffer */
2759         for_each_buffer_cpu(buffer, cpu) {
2760                 cpu_buffer = buffer->buffers[cpu];
2761                 overruns += local_read(&cpu_buffer->overrun);
2762         }
2763 
2764         return overruns;
2765 }
2766 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2767 
2768 static void rb_iter_reset(struct ring_buffer_iter *iter)
2769 {
2770         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2771 
2772         /* Iterator usage is expected to have record disabled */
2773         if (list_empty(&cpu_buffer->reader_page->list)) {
2774                 iter->head_page = rb_set_head_page(cpu_buffer);
2775                 if (unlikely(!iter->head_page))
2776                         return;
2777                 iter->head = iter->head_page->read;
2778         } else {
2779                 iter->head_page = cpu_buffer->reader_page;
2780                 iter->head = cpu_buffer->reader_page->read;
2781         }
2782         if (iter->head)
2783                 iter->read_stamp = cpu_buffer->read_stamp;
2784         else
2785                 iter->read_stamp = iter->head_page->page->time_stamp;
2786         iter->cache_reader_page = cpu_buffer->reader_page;
2787         iter->cache_read = cpu_buffer->read;
2788 }
2789 
2790 /**
2791  * ring_buffer_iter_reset - reset an iterator
2792  * @iter: The iterator to reset
2793  *
2794  * Resets the iterator, so that it will start from the beginning
2795  * again.
2796  */
2797 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2798 {
2799         struct ring_buffer_per_cpu *cpu_buffer;
2800         unsigned long flags;
2801 
2802         if (!iter)
2803                 return;
2804 
2805         cpu_buffer = iter->cpu_buffer;
2806 
2807         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2808         rb_iter_reset(iter);
2809         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2810 }
2811 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2812 
2813 /**
2814  * ring_buffer_iter_empty - check if an iterator has no more to read
2815  * @iter: The iterator to check
2816  */
2817 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2818 {
2819         struct ring_buffer_per_cpu *cpu_buffer;
2820 
2821         cpu_buffer = iter->cpu_buffer;
2822 
2823         return iter->head_page == cpu_buffer->commit_page &&
2824                 iter->head == rb_commit_index(cpu_buffer);
2825 }
2826 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2827 
2828 static void
2829 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2830                      struct ring_buffer_event *event)
2831 {
2832         u64 delta;
2833 
2834         switch (event->type_len) {
2835         case RINGBUF_TYPE_PADDING:
2836                 return;
2837 
2838         case RINGBUF_TYPE_TIME_EXTEND:
2839                 delta = event->array[0];
2840                 delta <<= TS_SHIFT;
2841                 delta += event->time_delta;
2842                 cpu_buffer->read_stamp += delta;
2843                 return;
2844 
2845         case RINGBUF_TYPE_TIME_STAMP:
2846                 /* FIXME: not implemented */
2847                 return;
2848 
2849         case RINGBUF_TYPE_DATA:
2850                 cpu_buffer->read_stamp += event->time_delta;
2851                 return;
2852 
2853         default:
2854                 BUG();
2855         }
2856         return;
2857 }
2858 
2859 static void
2860 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2861                           struct ring_buffer_event *event)
2862 {
2863         u64 delta;
2864 
2865         switch (event->type_len) {
2866         case RINGBUF_TYPE_PADDING:
2867                 return;
2868 
2869         case RINGBUF_TYPE_TIME_EXTEND:
2870                 delta = event->array[0];
2871                 delta <<= TS_SHIFT;
2872                 delta += event->time_delta;
2873                 iter->read_stamp += delta;
2874                 return;
2875 
2876         case RINGBUF_TYPE_TIME_STAMP:
2877                 /* FIXME: not implemented */
2878                 return;
2879 
2880         case RINGBUF_TYPE_DATA:
2881                 iter->read_stamp += event->time_delta;
2882                 return;
2883 
2884         default:
2885                 BUG();
2886         }
2887         return;
2888 }
2889 
2890 static struct buffer_page *
2891 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2892 {
2893         struct buffer_page *reader = NULL;
2894         unsigned long overwrite;
2895         unsigned long flags;
2896         int nr_loops = 0;
2897         int ret;
2898 
2899         local_irq_save(flags);
2900         arch_spin_lock(&cpu_buffer->lock);
2901 
2902  again:
2903         /*
2904          * This should normally only loop twice. But because the
2905          * start of the reader inserts an empty page, it causes
2906          * a case where we will loop three times. There should be no
2907          * reason to loop four times (that I know of).
2908          */
2909         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2910                 reader = NULL;
2911                 goto out;
2912         }
2913 
2914         reader = cpu_buffer->reader_page;
2915 
2916         /* If there's more to read, return this page */
2917         if (cpu_buffer->reader_page->read < rb_page_size(reader))
2918                 goto out;
2919 
2920         /* Never should we have an index greater than the size */
2921         if (RB_WARN_ON(cpu_buffer,
2922                        cpu_buffer->reader_page->read > rb_page_size(reader)))
2923                 goto out;
2924 
2925         /* check if we caught up to the tail */
2926         reader = NULL;
2927         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2928                 goto out;
2929 
2930         /*
2931          * Reset the reader page to size zero.
2932          */
2933         local_set(&cpu_buffer->reader_page->write, 0);
2934         local_set(&cpu_buffer->reader_page->entries, 0);
2935         local_set(&cpu_buffer->reader_page->page->commit, 0);
2936         cpu_buffer->reader_page->real_end = 0;
2937 
2938  spin:
2939         /*
2940          * Splice the empty reader page into the list around the head.
2941          */
2942         reader = rb_set_head_page(cpu_buffer);
2943         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2944         cpu_buffer->reader_page->list.prev = reader->list.prev;
2945 
2946         /*
2947          * cpu_buffer->pages just needs to point to the buffer, it
2948          *  has no specific buffer page to point to. Lets move it out
2949          *  of our way so we don't accidentally swap it.
2950          */
2951         cpu_buffer->pages = reader->list.prev;
2952 
2953         /* The reader page will be pointing to the new head */
2954         rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
2955 
2956         /*
2957          * We want to make sure we read the overruns after we set up our
2958          * pointers to the next object. The writer side does a
2959          * cmpxchg to cross pages which acts as the mb on the writer
2960          * side. Note, the reader will constantly fail the swap
2961          * while the writer is updating the pointers, so this
2962          * guarantees that the overwrite recorded here is the one we
2963          * want to compare with the last_overrun.
2964          */
2965         smp_mb();
2966         overwrite = local_read(&(cpu_buffer->overrun));
2967 
2968         /*
2969          * Here's the tricky part.
2970          *
2971          * We need to move the pointer past the header page.
2972          * But we can only do that if a writer is not currently
2973          * moving it. The page before the header page has the
2974          * flag bit '1' set if it is pointing to the page we want.
2975          * but if the writer is in the process of moving it
2976          * than it will be '2' or already moved ''.
2977          */
2978 
2979         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
2980 
2981         /*
2982          * If we did not convert it, then we must try again.
2983          */
2984         if (!ret)
2985                 goto spin;
2986 
2987         /*
2988          * Yeah! We succeeded in replacing the page.
2989          *
2990          * Now make the new head point back to the reader page.
2991          */
2992         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2993         rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2994 
2995         /* Finally update the reader page to the new head */
2996         cpu_buffer->reader_page = reader;
2997         rb_reset_reader_page(cpu_buffer);
2998 
2999         if (overwrite != cpu_buffer->last_overrun) {
3000                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3001                 cpu_buffer->last_overrun = overwrite;
3002         }
3003 
3004         goto again;
3005 
3006  out:
3007         arch_spin_unlock(&cpu_buffer->lock);
3008         local_irq_restore(flags);
3009 
3010         return reader;
3011 }
3012 
3013 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3014 {
3015         struct ring_buffer_event *event;
3016         struct buffer_page *reader;
3017         unsigned length;
3018 
3019         reader = rb_get_reader_page(cpu_buffer);
3020 
3021         /* This function should not be called when buffer is empty */
3022         if (RB_WARN_ON(cpu_buffer, !reader))
3023                 return;
3024 
3025         event = rb_reader_event(cpu_buffer);
3026 
3027         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3028                 cpu_buffer->read++;
3029 
3030         rb_update_read_stamp(cpu_buffer, event);
3031 
3032         length = rb_event_length(event);
3033         cpu_buffer->reader_page->read += length;
3034 }
3035 
3036 static void rb_advance_iter(struct ring_buffer_iter *iter)
3037 {
3038         struct ring_buffer_per_cpu *cpu_buffer;
3039         struct ring_buffer_event *event;
3040         unsigned length;
3041 
3042         cpu_buffer = iter->cpu_buffer;
3043 
3044         /*
3045          * Check if we are at the end of the buffer.
3046          */
3047         if (iter->head >= rb_page_size(iter->head_page)) {
3048                 /* discarded commits can make the page empty */
3049                 if (iter->head_page == cpu_buffer->commit_page)
3050                         return;
3051                 rb_inc_iter(iter);
3052                 return;
3053         }
3054 
3055         event = rb_iter_head_event(iter);
3056 
3057         length = rb_event_length(event);
3058 
3059         /*
3060          * This should not be called to advance the header if we are
3061          * at the tail of the buffer.
3062          */
3063         if (RB_WARN_ON(cpu_buffer,
3064                        (iter->head_page == cpu_buffer->commit_page) &&
3065                        (iter->head + length > rb_commit_index(cpu_buffer))))
3066                 return;
3067 
3068         rb_update_iter_read_stamp(iter, event);
3069 
3070         iter->head += length;
3071 
3072         /* check for end of page padding */
3073         if ((iter->head >= rb_page_size(iter->head_page)) &&
3074             (iter->head_page != cpu_buffer->commit_page))
3075                 rb_advance_iter(iter);
3076 }
3077 
3078 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3079 {
3080         return cpu_buffer->lost_events;
3081 }
3082 
3083 static struct ring_buffer_event *
3084 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3085                unsigned long *lost_events)
3086 {
3087         struct ring_buffer_event *event;
3088         struct buffer_page *reader;
3089         int nr_loops = 0;
3090 
3091  again:
3092         /*
3093          * We repeat when a time extend is encountered.
3094          * Since the time extend is always attached to a data event,
3095          * we should never loop more than once.
3096          * (We never hit the following condition more than twice).
3097          */
3098         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3099                 return NULL;
3100 
3101         reader = rb_get_reader_page(cpu_buffer);
3102         if (!reader)
3103                 return NULL;
3104 
3105         event = rb_reader_event(cpu_buffer);
3106 
3107         switch (event->type_len) {
3108         case RINGBUF_TYPE_PADDING:
3109                 if (rb_null_event(event))
3110                         RB_WARN_ON(cpu_buffer, 1);
3111                 /*
3112                  * Because the writer could be discarding every
3113                  * event it creates (which would probably be bad)
3114                  * if we were to go back to "again" then we may never
3115                  * catch up, and will trigger the warn on, or lock
3116                  * the box. Return the padding, and we will release
3117                  * the current locks, and try again.
3118                  */
3119                 return event;
3120 
3121         case RINGBUF_TYPE_TIME_EXTEND:
3122                 /* Internal data, OK to advance */
3123                 rb_advance_reader(cpu_buffer);
3124                 goto again;
3125 
3126         case RINGBUF_TYPE_TIME_STAMP:
3127                 /* FIXME: not implemented */
3128                 rb_advance_reader(cpu_buffer);
3129                 goto again;
3130 
3131         case RINGBUF_TYPE_DATA:
3132                 if (ts) {
3133                         *ts = cpu_buffer->read_stamp + event->time_delta;
3134                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3135                                                          cpu_buffer->cpu, ts);
3136                 }
3137                 if (lost_events)
3138                         *lost_events = rb_lost_events(cpu_buffer);
3139                 return event;
3140 
3141         default:
3142                 BUG();
3143         }
3144 
3145         return NULL;
3146 }
3147 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3148 
3149 static struct ring_buffer_event *
3150 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3151 {
3152         struct ring_buffer *buffer;
3153         struct ring_buffer_per_cpu *cpu_buffer;
3154         struct ring_buffer_event *event;
3155         int nr_loops = 0;
3156 
3157         cpu_buffer = iter->cpu_buffer;
3158         buffer = cpu_buffer->buffer;
3159 
3160         /*
3161          * Check if someone performed a consuming read to
3162          * the buffer. A consuming read invalidates the iterator
3163          * and we need to reset the iterator in this case.
3164          */
3165         if (unlikely(iter->cache_read != cpu_buffer->read ||
3166                      iter->cache_reader_page != cpu_buffer->reader_page))
3167                 rb_iter_reset(iter);
3168 
3169  again:
3170         if (ring_buffer_iter_empty(iter))
3171                 return NULL;
3172 
3173         /*
3174          * We repeat when a time extend is encountered.
3175          * Since the time extend is always attached to a data event,
3176          * we should never loop more than once.
3177          * (We never hit the following condition more than twice).
3178          */
3179         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3180                 return NULL;
3181 
3182         if (rb_per_cpu_empty(cpu_buffer))
3183                 return NULL;
3184 
3185         if (iter->head >= local_read(&iter->head_page->page->commit)) {
3186                 rb_inc_iter(iter);
3187                 goto again;
3188         }
3189 
3190         event = rb_iter_head_event(iter);
3191 
3192         switch (event->type_len) {
3193         case RINGBUF_TYPE_PADDING:
3194                 if (rb_null_event(event)) {
3195                         rb_inc_iter(iter);
3196                         goto again;
3197                 }
3198                 rb_advance_iter(iter);
3199                 return event;
3200 
3201         case RINGBUF_TYPE_TIME_EXTEND:
3202                 /* Internal data, OK to advance */
3203                 rb_advance_iter(iter);
3204                 goto again;
3205 
3206         case RINGBUF_TYPE_TIME_STAMP:
3207                 /* FIXME: not implemented */
3208                 rb_advance_iter(iter);
3209                 goto again;
3210 
3211         case RINGBUF_TYPE_DATA:
3212                 if (ts) {
3213                         *ts = iter->read_stamp + event->time_delta;
3214                         ring_buffer_normalize_time_stamp(buffer,
3215                                                          cpu_buffer->cpu, ts);
3216                 }
3217                 return event;
3218 
3219         default:
3220                 BUG();
3221         }
3222 
3223         return NULL;
3224 }
3225 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3226 
3227 static inline int rb_ok_to_lock(void)
3228 {
3229         /*
3230          * If an NMI die dumps out the content of the ring buffer
3231          * do not grab locks. We also permanently disable the ring
3232          * buffer too. A one time deal is all you get from reading
3233          * the ring buffer from an NMI.
3234          */
3235         if (likely(!in_nmi()))
3236                 return 1;
3237 
3238         tracing_off_permanent();
3239         return 0;
3240 }
3241 
3242 /**
3243  * ring_buffer_peek - peek at the next event to be read
3244  * @buffer: The ring buffer to read
3245  * @cpu: The cpu to peak at
3246  * @ts: The timestamp counter of this event.
3247  * @lost_events: a variable to store if events were lost (may be NULL)
3248  *
3249  * This will return the event that will be read next, but does
3250  * not consume the data.
3251  */
3252 struct ring_buffer_event *
3253 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3254                  unsigned long *lost_events)
3255 {
3256         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3257         struct ring_buffer_event *event;
3258         unsigned long flags;
3259         int dolock;
3260 
3261         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3262                 return NULL;
3263 
3264         dolock = rb_ok_to_lock();
3265  again:
3266         local_irq_save(flags);
3267         if (dolock)
3268                 spin_lock(&cpu_buffer->reader_lock);
3269         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3270         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3271                 rb_advance_reader(cpu_buffer);
3272         if (dolock)
3273                 spin_unlock(&cpu_buffer->reader_lock);
3274         local_irq_restore(flags);
3275 
3276         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3277                 goto again;
3278 
3279         return event;
3280 }
3281 
3282 /**
3283  * ring_buffer_iter_peek - peek at the next event to be read
3284  * @iter: The ring buffer iterator
3285  * @ts: The timestamp counter of this event.
3286  *
3287  * This will return the event that will be read next, but does
3288  * not increment the iterator.
3289  */
3290 struct ring_buffer_event *
3291 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3292 {
3293         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3294         struct ring_buffer_event *event;
3295         unsigned long flags;
3296 
3297  again:
3298         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3299         event = rb_iter_peek(iter, ts);
3300         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3301 
3302         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3303                 goto again;
3304 
3305         return event;
3306 }
3307 
3308 /**
3309  * ring_buffer_consume - return an event and consume it
3310  * @buffer: The ring buffer to get the next event from
3311  * @cpu: the cpu to read the buffer from
3312  * @ts: a variable to store the timestamp (may be NULL)
3313  * @lost_events: a variable to store if events were lost (may be NULL)
3314  *
3315  * Returns the next event in the ring buffer, and that event is consumed.
3316  * Meaning, that sequential reads will keep returning a different event,
3317  * and eventually empty the ring buffer if the producer is slower.
3318  */
3319 struct ring_buffer_event *
3320 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3321                     unsigned long *lost_events)
3322 {
3323         struct ring_buffer_per_cpu *cpu_buffer;
3324         struct ring_buffer_event *event = NULL;
3325         unsigned long flags;
3326         int dolock;
3327 
3328         dolock = rb_ok_to_lock();
3329 
3330  again:
3331         /* might be called in atomic */
3332         preempt_disable();
3333 
3334         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3335                 goto out;
3336 
3337         cpu_buffer = buffer->buffers[cpu];
3338         local_irq_save(flags);
3339         if (dolock)
3340                 spin_lock(&cpu_buffer->reader_lock);
3341 
3342         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3343         if (event) {
3344                 cpu_buffer->lost_events = 0;
3345                 rb_advance_reader(cpu_buffer);
3346         }
3347 
3348         if (dolock)
3349                 spin_unlock(&cpu_buffer->reader_lock);
3350         local_irq_restore(flags);
3351 
3352  out:
3353         preempt_enable();
3354 
3355         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3356                 goto again;
3357 
3358         return event;
3359 }
3360 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3361 
3362 /**
3363  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3364  * @buffer: The ring buffer to read from
3365  * @cpu: The cpu buffer to iterate over
3366  *
3367  * This performs the initial preparations necessary to iterate
3368  * through the buffer.  Memory is allocated, buffer recording
3369  * is disabled, and the iterator pointer is returned to the caller.
3370  *
3371  * Disabling buffer recordng prevents the reading from being
3372  * corrupted. This is not a consuming read, so a producer is not
3373  * expected.
3374  *
3375  * After a sequence of ring_buffer_read_prepare calls, the user is
3376  * expected to make at least one call to ring_buffer_prepare_sync.
3377  * Afterwards, ring_buffer_read_start is invoked to get things going
3378  * for real.
3379  *
3380  * This overall must be paired with ring_buffer_finish.
3381  */
3382 struct ring_buffer_iter *
3383 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3384 {
3385         struct ring_buffer_per_cpu *cpu_buffer;
3386         struct ring_buffer_iter *iter;
3387 
3388         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3389                 return NULL;
3390 
3391         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3392         if (!iter)
3393                 return NULL;
3394 
3395         cpu_buffer = buffer->buffers[cpu];
3396 
3397         iter->cpu_buffer = cpu_buffer;
3398 
3399         atomic_inc(&cpu_buffer->record_disabled);
3400 
3401         return iter;
3402 }
3403 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3404 
3405 /**
3406  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3407  *
3408  * All previously invoked ring_buffer_read_prepare calls to prepare
3409  * iterators will be synchronized.  Afterwards, read_buffer_read_start
3410  * calls on those iterators are allowed.
3411  */
3412 void
3413 ring_buffer_read_prepare_sync(void)
3414 {
3415         synchronize_sched();
3416 }
3417 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3418 
3419 /**
3420  * ring_buffer_read_start - start a non consuming read of the buffer
3421  * @iter: The iterator returned by ring_buffer_read_prepare
3422  *
3423  * This finalizes the startup of an iteration through the buffer.
3424  * The iterator comes from a call to ring_buffer_read_prepare and
3425  * an intervening ring_buffer_read_prepare_sync must have been
3426  * performed.
3427  *
3428  * Must be paired with ring_buffer_finish.
3429  */
3430 void
3431 ring_buffer_read_start(struct ring_buffer_iter *iter)
3432 {
3433         struct ring_buffer_per_cpu *cpu_buffer;
3434         unsigned long flags;
3435 
3436         if (!iter)
3437                 return;
3438 
3439         cpu_buffer = iter->cpu_buffer;
3440 
3441         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3442         arch_spin_lock(&cpu_buffer->lock);
3443         rb_iter_reset(iter);
3444         arch_spin_unlock(&cpu_buffer->lock);
3445         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3446 }
3447 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3448 
3449 /**
3450  * ring_buffer_finish - finish reading the iterator of the buffer
3451  * @iter: The iterator retrieved by ring_buffer_start
3452  *
3453  * This re-enables the recording to the buffer, and frees the
3454  * iterator.
3455  */
3456 void
3457 ring_buffer_read_finish(struct ring_buffer_iter *iter)
3458 {
3459         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3460 
3461         atomic_dec(&cpu_buffer->record_disabled);
3462         kfree(iter);
3463 }
3464 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3465 
3466 /**
3467  * ring_buffer_read - read the next item in the ring buffer by the iterator
3468  * @iter: The ring buffer iterator
3469  * @ts: The time stamp of the event read.
3470  *
3471  * This reads the next event in the ring buffer and increments the iterator.
3472  */
3473 struct ring_buffer_event *
3474 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3475 {
3476         struct ring_buffer_event *event;
3477         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3478         unsigned long flags;
3479 
3480         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3481  again:
3482         event = rb_iter_peek(iter, ts);
3483         if (!event)
3484                 goto out;
3485 
3486         if (event->type_len == RINGBUF_TYPE_PADDING)
3487                 goto again;
3488 
3489         rb_advance_iter(iter);
3490  out:
3491         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3492 
3493         return event;
3494 }
3495 EXPORT_SYMBOL_GPL(ring_buffer_read);
3496 
3497 /**
3498  * ring_buffer_size - return the size of the ring buffer (in bytes)
3499  * @buffer: The ring buffer.
3500  */
3501 unsigned long ring_buffer_size(struct ring_buffer *buffer)
3502 {
3503         return BUF_PAGE_SIZE * buffer->pages;
3504 }
3505 EXPORT_SYMBOL_GPL(ring_buffer_size);
3506 
3507 static void
3508 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3509 {
3510         rb_head_page_deactivate(cpu_buffer);
3511 
3512         cpu_buffer->head_page
3513                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
3514         local_set(&cpu_buffer->head_page->write, 0);
3515         local_set(&cpu_buffer->head_page->entries, 0);
3516         local_set(&cpu_buffer->head_page->page->commit, 0);
3517 
3518         cpu_buffer->head_page->read = 0;
3519 
3520         cpu_buffer->tail_page = cpu_buffer->head_page;
3521         cpu_buffer->commit_page = cpu_buffer->head_page;
3522 
3523         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3524         local_set(&cpu_buffer->reader_page->write, 0);
3525         local_set(&cpu_buffer->reader_page->entries, 0);
3526         local_set(&cpu_buffer->reader_page->page->commit, 0);
3527         cpu_buffer->reader_page->read = 0;
3528 
3529         local_set(&cpu_buffer->commit_overrun, 0);
3530         local_set(&cpu_buffer->overrun, 0);
3531         local_set(&cpu_buffer->entries, 0);
3532         local_set(&cpu_buffer->committing, 0);
3533         local_set(&cpu_buffer->commits, 0);
3534         cpu_buffer->read = 0;
3535 
3536         cpu_buffer->write_stamp = 0;
3537         cpu_buffer->read_stamp = 0;
3538 
3539         cpu_buffer->lost_events = 0;
3540         cpu_buffer->last_overrun = 0;
3541 
3542         rb_head_page_activate(cpu_buffer);
3543 }
3544 
3545 /**
3546  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3547  * @buffer: The ring buffer to reset a per cpu buffer of
3548  * @cpu: The CPU buffer to be reset
3549  */
3550 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3551 {
3552         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3553         unsigned long flags;
3554 
3555         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3556                 return;
3557 
3558         atomic_inc(&cpu_buffer->record_disabled);
3559 
3560         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3561 
3562         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3563                 goto out;
3564 
3565         arch_spin_lock(&cpu_buffer->lock);
3566 
3567         rb_reset_cpu(cpu_buffer);
3568 
3569         arch_spin_unlock(&cpu_buffer->lock);
3570 
3571  out:
3572         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3573 
3574         atomic_dec(&cpu_buffer->record_disabled);
3575 }
3576 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
3577 
3578 /**
3579  * ring_buffer_reset - reset a ring buffer
3580  * @buffer: The ring buffer to reset all cpu buffers
3581  */
3582 void ring_buffer_reset(struct ring_buffer *buffer)
3583 {
3584         int cpu;
3585 
3586         for_each_buffer_cpu(buffer, cpu)
3587                 ring_buffer_reset_cpu(buffer, cpu);
3588 }
3589 EXPORT_SYMBOL_GPL(ring_buffer_reset);
3590 
3591 /**
3592  * rind_buffer_empty - is the ring buffer empty?
3593  * @buffer: The ring buffer to test
3594  */
3595 int ring_buffer_empty(struct ring_buffer *buffer)
3596 {
3597         struct ring_buffer_per_cpu *cpu_buffer;
3598         unsigned long flags;
3599         int dolock;
3600         int cpu;
3601         int ret;
3602 
3603         dolock = rb_ok_to_lock();
3604 
3605         /* yes this is racy, but if you don't like the race, lock the buffer */
3606         for_each_buffer_cpu(buffer, cpu) {
3607                 cpu_buffer = buffer->buffers[cpu];
3608                 local_irq_save(flags);
3609                 if (dolock)
3610                         spin_lock(&cpu_buffer->reader_lock);
3611                 ret = rb_per_cpu_empty(cpu_buffer);
3612                 if (dolock)
3613                         spin_unlock(&cpu_buffer->reader_lock);
3614                 local_irq_restore(flags);
3615 
3616                 if (!ret)
3617                         return 0;
3618         }
3619 
3620         return 1;
3621 }
3622 EXPORT_SYMBOL_GPL(ring_buffer_empty);
3623 
3624 /**
3625  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3626  * @buffer: The ring buffer
3627  * @cpu: The CPU buffer to test
3628  */
3629 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3630 {
3631         struct ring_buffer_per_cpu *cpu_buffer;
3632         unsigned long flags;
3633         int dolock;
3634         int ret;
3635 
3636         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3637                 return 1;
3638 
3639         dolock = rb_ok_to_lock();
3640 
3641         cpu_buffer = buffer->buffers[cpu];
3642         local_irq_save(flags);
3643         if (dolock)
3644                 spin_lock(&cpu_buffer->reader_lock);
3645         ret = rb_per_cpu_empty(cpu_buffer);
3646         if (dolock)
3647                 spin_unlock(&cpu_buffer->reader_lock);
3648         local_irq_restore(flags);
3649 
3650         return ret;
3651 }
3652 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
3653 
3654 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3655 /**
3656  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3657  * @buffer_a: One buffer to swap with
3658  * @buffer_b: The other buffer to swap with
3659  *
3660  * This function is useful for tracers that want to take a "snapshot"
3661  * of a CPU buffer and has another back up buffer lying around.
3662  * it is expected that the tracer handles the cpu buffer not being
3663  * used at the moment.
3664  */
3665 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3666                          struct ring_buffer *buffer_b, int cpu)
3667 {
3668         struct ring_buffer_per_cpu *cpu_buffer_a;
3669         struct ring_buffer_per_cpu *cpu_buffer_b;
3670         int ret = -EINVAL;
3671 
3672         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
3673             !cpumask_test_cpu(cpu, buffer_b->cpumask))
3674                 goto out;
3675 
3676         /* At least make sure the two buffers are somewhat the same */
3677         if (buffer_a->pages != buffer_b->pages)
3678                 goto out;
3679 
3680         ret = -EAGAIN;
3681 
3682         if (ring_buffer_flags != RB_BUFFERS_ON)
3683                 goto out;
3684 
3685         if (atomic_read(&buffer_a->record_disabled))
3686                 goto out;
3687 
3688         if (atomic_read(&buffer_b->record_disabled))
3689                 goto out;
3690 
3691         cpu_buffer_a = buffer_a->buffers[cpu];
3692         cpu_buffer_b = buffer_b->buffers[cpu];
3693 
3694         if (atomic_read(&cpu_buffer_a->record_disabled))
3695                 goto out;
3696 
3697         if (atomic_read(&cpu_buffer_b->record_disabled))
3698                 goto out;
3699 
3700         /*
3701          * We can't do a synchronize_sched here because this
3702          * function can be called in atomic context.
3703          * Normally this will be called from the same CPU as cpu.
3704          * If not it's up to the caller to protect this.
3705          */
3706         atomic_inc(&cpu_buffer_a->record_disabled);
3707         atomic_inc(&cpu_buffer_b->record_disabled);
3708 
3709         ret = -EBUSY;
3710         if (local_read(&cpu_buffer_a->committing))
3711                 goto out_dec;
3712         if (local_read(&cpu_buffer_b->committing))
3713                 goto out_dec;
3714 
3715         buffer_a->buffers[cpu] = cpu_buffer_b;
3716         buffer_b->buffers[cpu] = cpu_buffer_a;
3717 
3718         cpu_buffer_b->buffer = buffer_a;
3719         cpu_buffer_a->buffer = buffer_b;
3720 
3721         ret = 0;
3722 
3723 out_dec:
3724         atomic_dec(&cpu_buffer_a->record_disabled);
3725         atomic_dec(&cpu_buffer_b->record_disabled);
3726 out:
3727         return ret;
3728 }
3729 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
3730 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
3731 
3732 /**
3733  * ring_buffer_alloc_read_page - allocate a page to read from buffer
3734  * @buffer: the buffer to allocate for.
3735  *
3736  * This function is used in conjunction with ring_buffer_read_page.
3737  * When reading a full page from the ring buffer, these functions
3738  * can be used to speed up the process. The calling function should
3739  * allocate a few pages first with this function. Then when it
3740  * needs to get pages from the ring buffer, it passes the result
3741  * of this function into ring_buffer_read_page, which will swap
3742  * the page that was allocated, with the read page of the buffer.
3743  *
3744  * Returns:
3745  *  The page allocated, or NULL on error.
3746  */
3747 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
3748 {
3749         struct buffer_data_page *bpage;
3750         struct page *page;
3751 
3752         page = alloc_pages_node(cpu_to_node(cpu),
3753                                 GFP_KERNEL | __GFP_NORETRY, 0);
3754         if (!page)
3755                 return NULL;
3756 
3757         bpage = page_address(page);
3758 
3759         rb_init_page(bpage);
3760 
3761         return bpage;
3762 }
3763 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
3764 
3765 /**
3766  * ring_buffer_free_read_page - free an allocated read page
3767  * @buffer: the buffer the page was allocate for
3768  * @data: the page to free
3769  *
3770  * Free a page allocated from ring_buffer_alloc_read_page.
3771  */
3772 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
3773 {
3774         free_page((unsigned long)data);
3775 }
3776 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
3777 
3778 /**
3779  * ring_buffer_read_page - extract a page from the ring buffer
3780  * @buffer: buffer to extract from
3781  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
3782  * @len: amount to extract
3783  * @cpu: the cpu of the buffer to extract
3784  * @full: should the extraction only happen when the page is full.
3785  *
3786  * This function will pull out a page from the ring buffer and consume it.
3787  * @data_page must be the address of the variable that was returned
3788  * from ring_buffer_alloc_read_page. This is because the page might be used
3789  * to swap with a page in the ring buffer.
3790  *
3791  * for example:
3792  *      rpage = ring_buffer_alloc_read_page(buffer);
3793  *      if (!rpage)
3794  *              return error;
3795  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
3796  *      if (ret >= 0)
3797  *              process_page(rpage, ret);
3798  *
3799  * When @full is set, the function will not return true unless
3800  * the writer is off the reader page.
3801  *
3802  * Note: it is up to the calling functions to handle sleeps and wakeups.
3803  *  The ring buffer can be used anywhere in the kernel and can not
3804  *  blindly call wake_up. The layer that uses the ring buffer must be
3805  *  responsible for that.
3806  *
3807  * Returns:
3808  *  >=0 if data has been transferred, returns the offset of consumed data.
3809  *  <0 if no data has been transferred.
3810  */
3811 int ring_buffer_read_page(struct ring_buffer *buffer,
3812                           void **data_page, size_t len, int cpu, int full)
3813 {
3814         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3815         struct ring_buffer_event *event;
3816         struct buffer_data_page *bpage;
3817         struct buffer_page *reader;
3818         unsigned long missed_events;
3819         unsigned long flags;
3820         unsigned int commit;
3821         unsigned int read;
3822         u64 save_timestamp;
3823         int ret = -1;
3824 
3825         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3826                 goto out;
3827 
3828         /*
3829          * If len is not big enough to hold the page header, then
3830          * we can not copy anything.
3831          */
3832         if (len <= BUF_PAGE_HDR_SIZE)
3833                 goto out;
3834 
3835         len -= BUF_PAGE_HDR_SIZE;
3836 
3837         if (!data_page)
3838                 goto out;
3839 
3840         bpage = *data_page;
3841         if (!bpage)
3842                 goto out;
3843 
3844         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3845 
3846         reader = rb_get_reader_page(cpu_buffer);
3847         if (!reader)
3848                 goto out_unlock;
3849 
3850         event = rb_reader_event(cpu_buffer);
3851 
3852         read = reader->read;
3853         commit = rb_page_commit(reader);
3854 
3855         /* Check if any events were dropped */
3856         missed_events = cpu_buffer->lost_events;
3857 
3858         /*
3859          * If this page has been partially read or
3860          * if len is not big enough to read the rest of the page or
3861          * a writer is still on the page, then
3862          * we must copy the data from the page to the buffer.
3863          * Otherwise, we can simply swap the page with the one passed in.
3864          */
3865         if (read || (len < (commit - read)) ||
3866             cpu_buffer->reader_page == cpu_buffer->commit_page) {
3867                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
3868                 unsigned int rpos = read;
3869                 unsigned int pos = 0;
3870                 unsigned int size;
3871 
3872                 if (full)
3873                         goto out_unlock;
3874 
3875                 if (len > (commit - read))
3876                         len = (commit - read);
3877 
3878                 /* Always keep the time extend and data together */
3879                 size = rb_event_ts_length(event);
3880 
3881                 if (len < size)
3882                         goto out_unlock;
3883 
3884                 /* save the current timestamp, since the user will need it */
3885                 save_timestamp = cpu_buffer->read_stamp;
3886 
3887                 /* Need to copy one event at a time */
3888                 do {
3889                         /* We need the size of one event, because
3890                          * rb_advance_reader only advances by one event,
3891                          * whereas rb_event_ts_length may include the size of
3892                          * one or two events.
3893                          * We have already ensured there's enough space if this
3894                          * is a time extend. */
3895                         size = rb_event_length(event);
3896                         memcpy(bpage->data + pos, rpage->data + rpos, size);
3897 
3898                         len -= size;
3899 
3900                         rb_advance_reader(cpu_buffer);
3901                         rpos = reader->read;
3902                         pos += size;
3903 
3904                         if (rpos >= commit)
3905                                 break;
3906 
3907                         event = rb_reader_event(cpu_buffer);
3908                         /* Always keep the time extend and data together */
3909                         size = rb_event_ts_length(event);
3910                 } while (len >= size);
3911 
3912                 /* update bpage */
3913                 local_set(&bpage->commit, pos);
3914                 bpage->time_stamp = save_timestamp;
3915 
3916                 /* we copied everything to the beginning */
3917                 read = 0;
3918         } else {
3919                 /* update the entry counter */
3920                 cpu_buffer->read += rb_page_entries(reader);
3921 
3922                 /* swap the pages */
3923                 rb_init_page(bpage);
3924                 bpage = reader->page;
3925                 reader->page = *data_page;
3926                 local_set(&reader->write, 0);
3927                 local_set(&reader->entries, 0);
3928                 reader->read = 0;
3929                 *data_page = bpage;
3930 
3931                 /*
3932                  * Use the real_end for the data size,
3933                  * This gives us a chance to store the lost events
3934                  * on the page.
3935                  */
3936                 if (reader->real_end)
3937                         local_set(&bpage->commit, reader->real_end);
3938         }
3939         ret = read;
3940 
3941         cpu_buffer->lost_events = 0;
3942 
3943         commit = local_read(&bpage->commit);
3944         /*
3945          * Set a flag in the commit field if we lost events
3946          */
3947         if (missed_events) {
3948                 /* If there is room at the end of the page to save the
3949                  * missed events, then record it there.
3950                  */
3951                 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
3952                         memcpy(&bpage->data[commit], &missed_events,
3953                                sizeof(missed_events));
3954                         local_add(RB_MISSED_STORED, &bpage->commit);
3955                         commit += sizeof(missed_events);
3956                 }
3957                 local_add(RB_MISSED_EVENTS, &bpage->commit);
3958         }
3959 
3960         /*
3961          * This page may be off to user land. Zero it out here.
3962          */
3963         if (commit < BUF_PAGE_SIZE)
3964                 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3965 
3966  out_unlock:
3967         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3968 
3969  out:
3970         return ret;
3971 }
3972 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3973 
3974 #ifdef CONFIG_TRACING
3975 static ssize_t
3976 rb_simple_read(struct file *filp, char __user *ubuf,
3977                size_t cnt, loff_t *ppos)
3978 {
3979         unsigned long *p = filp->private_data;
3980         char buf[64];
3981         int r;
3982 
3983         if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3984                 r = sprintf(buf, "permanently disabled\n");
3985         else
3986                 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3987 
3988         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3989 }
3990 
3991 static ssize_t
3992 rb_simple_write(struct file *filp, const char __user *ubuf,
3993                 size_t cnt, loff_t *ppos)
3994 {
3995         unsigned long *p = filp->private_data;
3996         unsigned long val;
3997         int ret;
3998 
3999         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4000         if (ret)
4001                 return ret;
4002 
4003         if (val)
4004                 set_bit(RB_BUFFERS_ON_BIT, p);
4005         else
4006                 clear_bit(RB_BUFFERS_ON_BIT, p);
4007 
4008         (*ppos)++;
4009 
4010         return cnt;
4011 }
4012 
4013 static const struct file_operations rb_simple_fops = {
4014         .open           = tracing_open_generic,
4015         .read           = rb_simple_read,
4016         .write          = rb_simple_write,
4017         .llseek         = default_llseek,
4018 };
4019 
4020 
4021 static __init int rb_init_debugfs(void)
4022 {
4023         struct dentry *d_tracer;
4024 
4025         d_tracer = tracing_init_dentry();
4026 
4027         trace_create_file("tracing_on", 0644, d_tracer,
4028                             &ring_buffer_flags, &rb_simple_fops);
4029 
4030         return 0;
4031 }
4032 
4033 fs_initcall(rb_init_debugfs);
4034 #endif
4035 
4036 #ifdef CONFIG_HOTPLUG_CPU
4037 static int rb_cpu_notify(struct notifier_block *self,
4038                          unsigned long action, void *hcpu)
4039 {
4040         struct ring_buffer *buffer =
4041                 container_of(self, struct ring_buffer, cpu_notify);
4042         long cpu = (long)hcpu;
4043 
4044         switch (action) {
4045         case CPU_UP_PREPARE:
4046         case CPU_UP_PREPARE_FROZEN:
4047                 if (cpumask_test_cpu(cpu, buffer->cpumask))
4048                         return NOTIFY_OK;
4049 
4050                 buffer->buffers[cpu] =
4051                         rb_allocate_cpu_buffer(buffer, cpu);
4052                 if (!buffer->buffers[cpu]) {
4053                         WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4054                              cpu);
4055                         return NOTIFY_OK;
4056                 }
4057                 smp_wmb();
4058                 cpumask_set_cpu(cpu, buffer->cpumask);
4059                 break;
4060         case CPU_DOWN_PREPARE:
4061         case CPU_DOWN_PREPARE_FROZEN:
4062                 /*
4063                  * Do nothing.
4064                  *  If we were to free the buffer, then the user would
4065                  *  lose any trace that was in the buffer.
4066                  */
4067                 break;
4068         default:
4069                 break;
4070         }
4071         return NOTIFY_OK;
4072 }
4073 #endif
4074 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp