~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/alpha/kernel/perf_event.c

Version: ~ [ linux-5.11-rc3 ] ~ [ linux-5.10.7 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.89 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.167 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.215 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.251 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.251 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Hardware performance events for the Alpha.
  4  *
  5  * We implement HW counts on the EV67 and subsequent CPUs only.
  6  *
  7  * (C) 2010 Michael J. Cree
  8  *
  9  * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
 10  * ARM code, which are copyright by their respective authors.
 11  */
 12 
 13 #include <linux/perf_event.h>
 14 #include <linux/kprobes.h>
 15 #include <linux/kernel.h>
 16 #include <linux/kdebug.h>
 17 #include <linux/mutex.h>
 18 #include <linux/init.h>
 19 
 20 #include <asm/hwrpb.h>
 21 #include <linux/atomic.h>
 22 #include <asm/irq.h>
 23 #include <asm/irq_regs.h>
 24 #include <asm/pal.h>
 25 #include <asm/wrperfmon.h>
 26 #include <asm/hw_irq.h>
 27 
 28 
 29 /* The maximum number of PMCs on any Alpha CPU whatsoever. */
 30 #define MAX_HWEVENTS 3
 31 #define PMC_NO_INDEX -1
 32 
 33 /* For tracking PMCs and the hw events they monitor on each CPU. */
 34 struct cpu_hw_events {
 35         int                     enabled;
 36         /* Number of events scheduled; also number entries valid in arrays below. */
 37         int                     n_events;
 38         /* Number events added since last hw_perf_disable(). */
 39         int                     n_added;
 40         /* Events currently scheduled. */
 41         struct perf_event       *event[MAX_HWEVENTS];
 42         /* Event type of each scheduled event. */
 43         unsigned long           evtype[MAX_HWEVENTS];
 44         /* Current index of each scheduled event; if not yet determined
 45          * contains PMC_NO_INDEX.
 46          */
 47         int                     current_idx[MAX_HWEVENTS];
 48         /* The active PMCs' config for easy use with wrperfmon(). */
 49         unsigned long           config;
 50         /* The active counters' indices for easy use with wrperfmon(). */
 51         unsigned long           idx_mask;
 52 };
 53 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 54 
 55 
 56 
 57 /*
 58  * A structure to hold the description of the PMCs available on a particular
 59  * type of Alpha CPU.
 60  */
 61 struct alpha_pmu_t {
 62         /* Mapping of the perf system hw event types to indigenous event types */
 63         const int *event_map;
 64         /* The number of entries in the event_map */
 65         int  max_events;
 66         /* The number of PMCs on this Alpha */
 67         int  num_pmcs;
 68         /*
 69          * All PMC counters reside in the IBOX register PCTR.  This is the
 70          * LSB of the counter.
 71          */
 72         int  pmc_count_shift[MAX_HWEVENTS];
 73         /*
 74          * The mask that isolates the PMC bits when the LSB of the counter
 75          * is shifted to bit 0.
 76          */
 77         unsigned long pmc_count_mask[MAX_HWEVENTS];
 78         /* The maximum period the PMC can count. */
 79         unsigned long pmc_max_period[MAX_HWEVENTS];
 80         /*
 81          * The maximum value that may be written to the counter due to
 82          * hardware restrictions is pmc_max_period - pmc_left.
 83          */
 84         long pmc_left[3];
 85          /* Subroutine for allocation of PMCs.  Enforces constraints. */
 86         int (*check_constraints)(struct perf_event **, unsigned long *, int);
 87         /* Subroutine for checking validity of a raw event for this PMU. */
 88         int (*raw_event_valid)(u64 config);
 89 };
 90 
 91 /*
 92  * The Alpha CPU PMU description currently in operation.  This is set during
 93  * the boot process to the specific CPU of the machine.
 94  */
 95 static const struct alpha_pmu_t *alpha_pmu;
 96 
 97 
 98 #define HW_OP_UNSUPPORTED -1
 99 
100 /*
101  * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
102  * follow. Since they are identical we refer to them collectively as the
103  * EV67 henceforth.
104  */
105 
106 /*
107  * EV67 PMC event types
108  *
109  * There is no one-to-one mapping of the possible hw event types to the
110  * actual codes that are used to program the PMCs hence we introduce our
111  * own hw event type identifiers.
112  */
113 enum ev67_pmc_event_type {
114         EV67_CYCLES = 1,
115         EV67_INSTRUCTIONS,
116         EV67_BCACHEMISS,
117         EV67_MBOXREPLAY,
118         EV67_LAST_ET
119 };
120 #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
121 
122 
123 /* Mapping of the hw event types to the perf tool interface */
124 static const int ev67_perfmon_event_map[] = {
125         [PERF_COUNT_HW_CPU_CYCLES]       = EV67_CYCLES,
126         [PERF_COUNT_HW_INSTRUCTIONS]     = EV67_INSTRUCTIONS,
127         [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
128         [PERF_COUNT_HW_CACHE_MISSES]     = EV67_BCACHEMISS,
129 };
130 
131 struct ev67_mapping_t {
132         int config;
133         int idx;
134 };
135 
136 /*
137  * The mapping used for one event only - these must be in same order as enum
138  * ev67_pmc_event_type definition.
139  */
140 static const struct ev67_mapping_t ev67_mapping[] = {
141         {EV67_PCTR_INSTR_CYCLES, 1},     /* EV67_CYCLES, */
142         {EV67_PCTR_INSTR_CYCLES, 0},     /* EV67_INSTRUCTIONS */
143         {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
144         {EV67_PCTR_CYCLES_MBOX, 1}       /* EV67_MBOXREPLAY */
145 };
146 
147 
148 /*
149  * Check that a group of events can be simultaneously scheduled on to the
150  * EV67 PMU.  Also allocate counter indices and config.
151  */
152 static int ev67_check_constraints(struct perf_event **event,
153                                 unsigned long *evtype, int n_ev)
154 {
155         int idx0;
156         unsigned long config;
157 
158         idx0 = ev67_mapping[evtype[0]-1].idx;
159         config = ev67_mapping[evtype[0]-1].config;
160         if (n_ev == 1)
161                 goto success;
162 
163         BUG_ON(n_ev != 2);
164 
165         if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
166                 /* MBOX replay traps must be on PMC 1 */
167                 idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
168                 /* Only cycles can accompany MBOX replay traps */
169                 if (evtype[idx0] == EV67_CYCLES) {
170                         config = EV67_PCTR_CYCLES_MBOX;
171                         goto success;
172                 }
173         }
174 
175         if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
176                 /* Bcache misses must be on PMC 1 */
177                 idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
178                 /* Only instructions can accompany Bcache misses */
179                 if (evtype[idx0] == EV67_INSTRUCTIONS) {
180                         config = EV67_PCTR_INSTR_BCACHEMISS;
181                         goto success;
182                 }
183         }
184 
185         if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
186                 /* Instructions must be on PMC 0 */
187                 idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
188                 /* By this point only cycles can accompany instructions */
189                 if (evtype[idx0^1] == EV67_CYCLES) {
190                         config = EV67_PCTR_INSTR_CYCLES;
191                         goto success;
192                 }
193         }
194 
195         /* Otherwise, darn it, there is a conflict.  */
196         return -1;
197 
198 success:
199         event[0]->hw.idx = idx0;
200         event[0]->hw.config_base = config;
201         if (n_ev == 2) {
202                 event[1]->hw.idx = idx0 ^ 1;
203                 event[1]->hw.config_base = config;
204         }
205         return 0;
206 }
207 
208 
209 static int ev67_raw_event_valid(u64 config)
210 {
211         return config >= EV67_CYCLES && config < EV67_LAST_ET;
212 };
213 
214 
215 static const struct alpha_pmu_t ev67_pmu = {
216         .event_map = ev67_perfmon_event_map,
217         .max_events = ARRAY_SIZE(ev67_perfmon_event_map),
218         .num_pmcs = 2,
219         .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
220         .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK,  EV67_PCTR_1_COUNT_MASK,  0},
221         .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
222         .pmc_left = {16, 4, 0},
223         .check_constraints = ev67_check_constraints,
224         .raw_event_valid = ev67_raw_event_valid,
225 };
226 
227 
228 
229 /*
230  * Helper routines to ensure that we read/write only the correct PMC bits
231  * when calling the wrperfmon PALcall.
232  */
233 static inline void alpha_write_pmc(int idx, unsigned long val)
234 {
235         val &= alpha_pmu->pmc_count_mask[idx];
236         val <<= alpha_pmu->pmc_count_shift[idx];
237         val |= (1<<idx);
238         wrperfmon(PERFMON_CMD_WRITE, val);
239 }
240 
241 static inline unsigned long alpha_read_pmc(int idx)
242 {
243         unsigned long val;
244 
245         val = wrperfmon(PERFMON_CMD_READ, 0);
246         val >>= alpha_pmu->pmc_count_shift[idx];
247         val &= alpha_pmu->pmc_count_mask[idx];
248         return val;
249 }
250 
251 /* Set a new period to sample over */
252 static int alpha_perf_event_set_period(struct perf_event *event,
253                                 struct hw_perf_event *hwc, int idx)
254 {
255         long left = local64_read(&hwc->period_left);
256         long period = hwc->sample_period;
257         int ret = 0;
258 
259         if (unlikely(left <= -period)) {
260                 left = period;
261                 local64_set(&hwc->period_left, left);
262                 hwc->last_period = period;
263                 ret = 1;
264         }
265 
266         if (unlikely(left <= 0)) {
267                 left += period;
268                 local64_set(&hwc->period_left, left);
269                 hwc->last_period = period;
270                 ret = 1;
271         }
272 
273         /*
274          * Hardware restrictions require that the counters must not be
275          * written with values that are too close to the maximum period.
276          */
277         if (unlikely(left < alpha_pmu->pmc_left[idx]))
278                 left = alpha_pmu->pmc_left[idx];
279 
280         if (left > (long)alpha_pmu->pmc_max_period[idx])
281                 left = alpha_pmu->pmc_max_period[idx];
282 
283         local64_set(&hwc->prev_count, (unsigned long)(-left));
284 
285         alpha_write_pmc(idx, (unsigned long)(-left));
286 
287         perf_event_update_userpage(event);
288 
289         return ret;
290 }
291 
292 
293 /*
294  * Calculates the count (the 'delta') since the last time the PMC was read.
295  *
296  * As the PMCs' full period can easily be exceeded within the perf system
297  * sampling period we cannot use any high order bits as a guard bit in the
298  * PMCs to detect overflow as is done by other architectures.  The code here
299  * calculates the delta on the basis that there is no overflow when ovf is
300  * zero.  The value passed via ovf by the interrupt handler corrects for
301  * overflow.
302  *
303  * This can be racey on rare occasions -- a call to this routine can occur
304  * with an overflowed counter just before the PMI service routine is called.
305  * The check for delta negative hopefully always rectifies this situation.
306  */
307 static unsigned long alpha_perf_event_update(struct perf_event *event,
308                                         struct hw_perf_event *hwc, int idx, long ovf)
309 {
310         long prev_raw_count, new_raw_count;
311         long delta;
312 
313 again:
314         prev_raw_count = local64_read(&hwc->prev_count);
315         new_raw_count = alpha_read_pmc(idx);
316 
317         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
318                              new_raw_count) != prev_raw_count)
319                 goto again;
320 
321         delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
322 
323         /* It is possible on very rare occasions that the PMC has overflowed
324          * but the interrupt is yet to come.  Detect and fix this situation.
325          */
326         if (unlikely(delta < 0)) {
327                 delta += alpha_pmu->pmc_max_period[idx] + 1;
328         }
329 
330         local64_add(delta, &event->count);
331         local64_sub(delta, &hwc->period_left);
332 
333         return new_raw_count;
334 }
335 
336 
337 /*
338  * Collect all HW events into the array event[].
339  */
340 static int collect_events(struct perf_event *group, int max_count,
341                           struct perf_event *event[], unsigned long *evtype,
342                           int *current_idx)
343 {
344         struct perf_event *pe;
345         int n = 0;
346 
347         if (!is_software_event(group)) {
348                 if (n >= max_count)
349                         return -1;
350                 event[n] = group;
351                 evtype[n] = group->hw.event_base;
352                 current_idx[n++] = PMC_NO_INDEX;
353         }
354         for_each_sibling_event(pe, group) {
355                 if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
356                         if (n >= max_count)
357                                 return -1;
358                         event[n] = pe;
359                         evtype[n] = pe->hw.event_base;
360                         current_idx[n++] = PMC_NO_INDEX;
361                 }
362         }
363         return n;
364 }
365 
366 
367 
368 /*
369  * Check that a group of events can be simultaneously scheduled on to the PMU.
370  */
371 static int alpha_check_constraints(struct perf_event **events,
372                                    unsigned long *evtypes, int n_ev)
373 {
374 
375         /* No HW events is possible from hw_perf_group_sched_in(). */
376         if (n_ev == 0)
377                 return 0;
378 
379         if (n_ev > alpha_pmu->num_pmcs)
380                 return -1;
381 
382         return alpha_pmu->check_constraints(events, evtypes, n_ev);
383 }
384 
385 
386 /*
387  * If new events have been scheduled then update cpuc with the new
388  * configuration.  This may involve shifting cycle counts from one PMC to
389  * another.
390  */
391 static void maybe_change_configuration(struct cpu_hw_events *cpuc)
392 {
393         int j;
394 
395         if (cpuc->n_added == 0)
396                 return;
397 
398         /* Find counters that are moving to another PMC and update */
399         for (j = 0; j < cpuc->n_events; j++) {
400                 struct perf_event *pe = cpuc->event[j];
401 
402                 if (cpuc->current_idx[j] != PMC_NO_INDEX &&
403                         cpuc->current_idx[j] != pe->hw.idx) {
404                         alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
405                         cpuc->current_idx[j] = PMC_NO_INDEX;
406                 }
407         }
408 
409         /* Assign to counters all unassigned events. */
410         cpuc->idx_mask = 0;
411         for (j = 0; j < cpuc->n_events; j++) {
412                 struct perf_event *pe = cpuc->event[j];
413                 struct hw_perf_event *hwc = &pe->hw;
414                 int idx = hwc->idx;
415 
416                 if (cpuc->current_idx[j] == PMC_NO_INDEX) {
417                         alpha_perf_event_set_period(pe, hwc, idx);
418                         cpuc->current_idx[j] = idx;
419                 }
420 
421                 if (!(hwc->state & PERF_HES_STOPPED))
422                         cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
423         }
424         cpuc->config = cpuc->event[0]->hw.config_base;
425 }
426 
427 
428 
429 /* Schedule perf HW event on to PMU.
430  *  - this function is called from outside this module via the pmu struct
431  *    returned from perf event initialisation.
432  */
433 static int alpha_pmu_add(struct perf_event *event, int flags)
434 {
435         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
436         struct hw_perf_event *hwc = &event->hw;
437         int n0;
438         int ret;
439         unsigned long irq_flags;
440 
441         /*
442          * The Sparc code has the IRQ disable first followed by the perf
443          * disable, however this can lead to an overflowed counter with the
444          * PMI disabled on rare occasions.  The alpha_perf_event_update()
445          * routine should detect this situation by noting a negative delta,
446          * nevertheless we disable the PMCs first to enable a potential
447          * final PMI to occur before we disable interrupts.
448          */
449         perf_pmu_disable(event->pmu);
450         local_irq_save(irq_flags);
451 
452         /* Default to error to be returned */
453         ret = -EAGAIN;
454 
455         /* Insert event on to PMU and if successful modify ret to valid return */
456         n0 = cpuc->n_events;
457         if (n0 < alpha_pmu->num_pmcs) {
458                 cpuc->event[n0] = event;
459                 cpuc->evtype[n0] = event->hw.event_base;
460                 cpuc->current_idx[n0] = PMC_NO_INDEX;
461 
462                 if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
463                         cpuc->n_events++;
464                         cpuc->n_added++;
465                         ret = 0;
466                 }
467         }
468 
469         hwc->state = PERF_HES_UPTODATE;
470         if (!(flags & PERF_EF_START))
471                 hwc->state |= PERF_HES_STOPPED;
472 
473         local_irq_restore(irq_flags);
474         perf_pmu_enable(event->pmu);
475 
476         return ret;
477 }
478 
479 
480 
481 /* Disable performance monitoring unit
482  *  - this function is called from outside this module via the pmu struct
483  *    returned from perf event initialisation.
484  */
485 static void alpha_pmu_del(struct perf_event *event, int flags)
486 {
487         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
488         struct hw_perf_event *hwc = &event->hw;
489         unsigned long irq_flags;
490         int j;
491 
492         perf_pmu_disable(event->pmu);
493         local_irq_save(irq_flags);
494 
495         for (j = 0; j < cpuc->n_events; j++) {
496                 if (event == cpuc->event[j]) {
497                         int idx = cpuc->current_idx[j];
498 
499                         /* Shift remaining entries down into the existing
500                          * slot.
501                          */
502                         while (++j < cpuc->n_events) {
503                                 cpuc->event[j - 1] = cpuc->event[j];
504                                 cpuc->evtype[j - 1] = cpuc->evtype[j];
505                                 cpuc->current_idx[j - 1] =
506                                         cpuc->current_idx[j];
507                         }
508 
509                         /* Absorb the final count and turn off the event. */
510                         alpha_perf_event_update(event, hwc, idx, 0);
511                         perf_event_update_userpage(event);
512 
513                         cpuc->idx_mask &= ~(1UL<<idx);
514                         cpuc->n_events--;
515                         break;
516                 }
517         }
518 
519         local_irq_restore(irq_flags);
520         perf_pmu_enable(event->pmu);
521 }
522 
523 
524 static void alpha_pmu_read(struct perf_event *event)
525 {
526         struct hw_perf_event *hwc = &event->hw;
527 
528         alpha_perf_event_update(event, hwc, hwc->idx, 0);
529 }
530 
531 
532 static void alpha_pmu_stop(struct perf_event *event, int flags)
533 {
534         struct hw_perf_event *hwc = &event->hw;
535         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
536 
537         if (!(hwc->state & PERF_HES_STOPPED)) {
538                 cpuc->idx_mask &= ~(1UL<<hwc->idx);
539                 hwc->state |= PERF_HES_STOPPED;
540         }
541 
542         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
543                 alpha_perf_event_update(event, hwc, hwc->idx, 0);
544                 hwc->state |= PERF_HES_UPTODATE;
545         }
546 
547         if (cpuc->enabled)
548                 wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
549 }
550 
551 
552 static void alpha_pmu_start(struct perf_event *event, int flags)
553 {
554         struct hw_perf_event *hwc = &event->hw;
555         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
556 
557         if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
558                 return;
559 
560         if (flags & PERF_EF_RELOAD) {
561                 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
562                 alpha_perf_event_set_period(event, hwc, hwc->idx);
563         }
564 
565         hwc->state = 0;
566 
567         cpuc->idx_mask |= 1UL<<hwc->idx;
568         if (cpuc->enabled)
569                 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
570 }
571 
572 
573 /*
574  * Check that CPU performance counters are supported.
575  * - currently support EV67 and later CPUs.
576  * - actually some later revisions of the EV6 have the same PMC model as the
577  *     EV67 but we don't do suffiently deep CPU detection to detect them.
578  *     Bad luck to the very few people who might have one, I guess.
579  */
580 static int supported_cpu(void)
581 {
582         struct percpu_struct *cpu;
583         unsigned long cputype;
584 
585         /* Get cpu type from HW */
586         cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
587         cputype = cpu->type & 0xffffffff;
588         /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
589         return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
590 }
591 
592 
593 
594 static void hw_perf_event_destroy(struct perf_event *event)
595 {
596         /* Nothing to be done! */
597         return;
598 }
599 
600 
601 
602 static int __hw_perf_event_init(struct perf_event *event)
603 {
604         struct perf_event_attr *attr = &event->attr;
605         struct hw_perf_event *hwc = &event->hw;
606         struct perf_event *evts[MAX_HWEVENTS];
607         unsigned long evtypes[MAX_HWEVENTS];
608         int idx_rubbish_bin[MAX_HWEVENTS];
609         int ev;
610         int n;
611 
612         /* We only support a limited range of HARDWARE event types with one
613          * only programmable via a RAW event type.
614          */
615         if (attr->type == PERF_TYPE_HARDWARE) {
616                 if (attr->config >= alpha_pmu->max_events)
617                         return -EINVAL;
618                 ev = alpha_pmu->event_map[attr->config];
619         } else if (attr->type == PERF_TYPE_HW_CACHE) {
620                 return -EOPNOTSUPP;
621         } else if (attr->type == PERF_TYPE_RAW) {
622                 if (!alpha_pmu->raw_event_valid(attr->config))
623                         return -EINVAL;
624                 ev = attr->config;
625         } else {
626                 return -EOPNOTSUPP;
627         }
628 
629         if (ev < 0) {
630                 return ev;
631         }
632 
633         /*
634          * We place the event type in event_base here and leave calculation
635          * of the codes to programme the PMU for alpha_pmu_enable() because
636          * it is only then we will know what HW events are actually
637          * scheduled on to the PMU.  At that point the code to programme the
638          * PMU is put into config_base and the PMC to use is placed into
639          * idx.  We initialise idx (below) to PMC_NO_INDEX to indicate that
640          * it is yet to be determined.
641          */
642         hwc->event_base = ev;
643 
644         /* Collect events in a group together suitable for calling
645          * alpha_check_constraints() to verify that the group as a whole can
646          * be scheduled on to the PMU.
647          */
648         n = 0;
649         if (event->group_leader != event) {
650                 n = collect_events(event->group_leader,
651                                 alpha_pmu->num_pmcs - 1,
652                                 evts, evtypes, idx_rubbish_bin);
653                 if (n < 0)
654                         return -EINVAL;
655         }
656         evtypes[n] = hwc->event_base;
657         evts[n] = event;
658 
659         if (alpha_check_constraints(evts, evtypes, n + 1))
660                 return -EINVAL;
661 
662         /* Indicate that PMU config and idx are yet to be determined. */
663         hwc->config_base = 0;
664         hwc->idx = PMC_NO_INDEX;
665 
666         event->destroy = hw_perf_event_destroy;
667 
668         /*
669          * Most architectures reserve the PMU for their use at this point.
670          * As there is no existing mechanism to arbitrate usage and there
671          * appears to be no other user of the Alpha PMU we just assume
672          * that we can just use it, hence a NO-OP here.
673          *
674          * Maybe an alpha_reserve_pmu() routine should be implemented but is
675          * anything else ever going to use it?
676          */
677 
678         if (!hwc->sample_period) {
679                 hwc->sample_period = alpha_pmu->pmc_max_period[0];
680                 hwc->last_period = hwc->sample_period;
681                 local64_set(&hwc->period_left, hwc->sample_period);
682         }
683 
684         return 0;
685 }
686 
687 /*
688  * Main entry point to initialise a HW performance event.
689  */
690 static int alpha_pmu_event_init(struct perf_event *event)
691 {
692         int err;
693 
694         /* does not support taken branch sampling */
695         if (has_branch_stack(event))
696                 return -EOPNOTSUPP;
697 
698         switch (event->attr.type) {
699         case PERF_TYPE_RAW:
700         case PERF_TYPE_HARDWARE:
701         case PERF_TYPE_HW_CACHE:
702                 break;
703 
704         default:
705                 return -ENOENT;
706         }
707 
708         if (!alpha_pmu)
709                 return -ENODEV;
710 
711         /* Do the real initialisation work. */
712         err = __hw_perf_event_init(event);
713 
714         return err;
715 }
716 
717 /*
718  * Main entry point - enable HW performance counters.
719  */
720 static void alpha_pmu_enable(struct pmu *pmu)
721 {
722         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
723 
724         if (cpuc->enabled)
725                 return;
726 
727         cpuc->enabled = 1;
728         barrier();
729 
730         if (cpuc->n_events > 0) {
731                 /* Update cpuc with information from any new scheduled events. */
732                 maybe_change_configuration(cpuc);
733 
734                 /* Start counting the desired events. */
735                 wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
736                 wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
737                 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
738         }
739 }
740 
741 
742 /*
743  * Main entry point - disable HW performance counters.
744  */
745 
746 static void alpha_pmu_disable(struct pmu *pmu)
747 {
748         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
749 
750         if (!cpuc->enabled)
751                 return;
752 
753         cpuc->enabled = 0;
754         cpuc->n_added = 0;
755 
756         wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
757 }
758 
759 static struct pmu pmu = {
760         .pmu_enable     = alpha_pmu_enable,
761         .pmu_disable    = alpha_pmu_disable,
762         .event_init     = alpha_pmu_event_init,
763         .add            = alpha_pmu_add,
764         .del            = alpha_pmu_del,
765         .start          = alpha_pmu_start,
766         .stop           = alpha_pmu_stop,
767         .read           = alpha_pmu_read,
768         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
769 };
770 
771 
772 /*
773  * Main entry point - don't know when this is called but it
774  * obviously dumps debug info.
775  */
776 void perf_event_print_debug(void)
777 {
778         unsigned long flags;
779         unsigned long pcr;
780         int pcr0, pcr1;
781         int cpu;
782 
783         if (!supported_cpu())
784                 return;
785 
786         local_irq_save(flags);
787 
788         cpu = smp_processor_id();
789 
790         pcr = wrperfmon(PERFMON_CMD_READ, 0);
791         pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
792         pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
793 
794         pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
795 
796         local_irq_restore(flags);
797 }
798 
799 
800 /*
801  * Performance Monitoring Interrupt Service Routine called when a PMC
802  * overflows.  The PMC that overflowed is passed in la_ptr.
803  */
804 static void alpha_perf_event_irq_handler(unsigned long la_ptr,
805                                         struct pt_regs *regs)
806 {
807         struct cpu_hw_events *cpuc;
808         struct perf_sample_data data;
809         struct perf_event *event;
810         struct hw_perf_event *hwc;
811         int idx, j;
812 
813         __this_cpu_inc(irq_pmi_count);
814         cpuc = this_cpu_ptr(&cpu_hw_events);
815 
816         /* Completely counting through the PMC's period to trigger a new PMC
817          * overflow interrupt while in this interrupt routine is utterly
818          * disastrous!  The EV6 and EV67 counters are sufficiently large to
819          * prevent this but to be really sure disable the PMCs.
820          */
821         wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
822 
823         /* la_ptr is the counter that overflowed. */
824         if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
825                 /* This should never occur! */
826                 irq_err_count++;
827                 pr_warn("PMI: silly index %ld\n", la_ptr);
828                 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
829                 return;
830         }
831 
832         idx = la_ptr;
833 
834         for (j = 0; j < cpuc->n_events; j++) {
835                 if (cpuc->current_idx[j] == idx)
836                         break;
837         }
838 
839         if (unlikely(j == cpuc->n_events)) {
840                 /* This can occur if the event is disabled right on a PMC overflow. */
841                 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
842                 return;
843         }
844 
845         event = cpuc->event[j];
846 
847         if (unlikely(!event)) {
848                 /* This should never occur! */
849                 irq_err_count++;
850                 pr_warn("PMI: No event at index %d!\n", idx);
851                 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
852                 return;
853         }
854 
855         hwc = &event->hw;
856         alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
857         perf_sample_data_init(&data, 0, hwc->last_period);
858 
859         if (alpha_perf_event_set_period(event, hwc, idx)) {
860                 if (perf_event_overflow(event, &data, regs)) {
861                         /* Interrupts coming too quickly; "throttle" the
862                          * counter, i.e., disable it for a little while.
863                          */
864                         alpha_pmu_stop(event, 0);
865                 }
866         }
867         wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
868 
869         return;
870 }
871 
872 
873 
874 /*
875  * Init call to initialise performance events at kernel startup.
876  */
877 int __init init_hw_perf_events(void)
878 {
879         pr_info("Performance events: ");
880 
881         if (!supported_cpu()) {
882                 pr_cont("No support for your CPU.\n");
883                 return 0;
884         }
885 
886         pr_cont("Supported CPU type!\n");
887 
888         /* Override performance counter IRQ vector */
889 
890         perf_irq = alpha_perf_event_irq_handler;
891 
892         /* And set up PMU specification */
893         alpha_pmu = &ev67_pmu;
894 
895         perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
896 
897         return 0;
898 }
899 early_initcall(init_hw_perf_events);
900 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp