~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/kernel/perf_event_mipsxx.c

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.12 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.55 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.136 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.191 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Linux performance counter support for MIPS.
  3  *
  4  * Copyright (C) 2010 MIPS Technologies, Inc.
  5  * Copyright (C) 2011 Cavium Networks, Inc.
  6  * Author: Deng-Cheng Zhu
  7  *
  8  * This code is based on the implementation for ARM, which is in turn
  9  * based on the sparc64 perf event code and the x86 code. Performance
 10  * counter access is based on the MIPS Oprofile code. And the callchain
 11  * support references the code of MIPS stacktrace.c.
 12  *
 13  * This program is free software; you can redistribute it and/or modify
 14  * it under the terms of the GNU General Public License version 2 as
 15  * published by the Free Software Foundation.
 16  */
 17 
 18 #include <linux/cpumask.h>
 19 #include <linux/interrupt.h>
 20 #include <linux/smp.h>
 21 #include <linux/kernel.h>
 22 #include <linux/perf_event.h>
 23 #include <linux/uaccess.h>
 24 
 25 #include <asm/irq.h>
 26 #include <asm/irq_regs.h>
 27 #include <asm/stacktrace.h>
 28 #include <asm/time.h> /* For perf_irq */
 29 
 30 #define MIPS_MAX_HWEVENTS 4
 31 #define MIPS_TCS_PER_COUNTER 2
 32 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
 33 
 34 struct cpu_hw_events {
 35         /* Array of events on this cpu. */
 36         struct perf_event       *events[MIPS_MAX_HWEVENTS];
 37 
 38         /*
 39          * Set the bit (indexed by the counter number) when the counter
 40          * is used for an event.
 41          */
 42         unsigned long           used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
 43 
 44         /*
 45          * Software copy of the control register for each performance counter.
 46          * MIPS CPUs vary in performance counters. They use this differently,
 47          * and even may not use it.
 48          */
 49         unsigned int            saved_ctrl[MIPS_MAX_HWEVENTS];
 50 };
 51 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
 52         .saved_ctrl = {0},
 53 };
 54 
 55 /* The description of MIPS performance events. */
 56 struct mips_perf_event {
 57         unsigned int event_id;
 58         /*
 59          * MIPS performance counters are indexed starting from 0.
 60          * CNTR_EVEN indicates the indexes of the counters to be used are
 61          * even numbers.
 62          */
 63         unsigned int cntr_mask;
 64         #define CNTR_EVEN       0x55555555
 65         #define CNTR_ODD        0xaaaaaaaa
 66         #define CNTR_ALL        0xffffffff
 67         enum {
 68                 T  = 0,
 69                 V  = 1,
 70                 P  = 2,
 71         } range;
 72 };
 73 
 74 static struct mips_perf_event raw_event;
 75 static DEFINE_MUTEX(raw_event_mutex);
 76 
 77 #define C(x) PERF_COUNT_HW_CACHE_##x
 78 
 79 struct mips_pmu {
 80         u64             max_period;
 81         u64             valid_count;
 82         u64             overflow;
 83         const char      *name;
 84         int             irq;
 85         u64             (*read_counter)(unsigned int idx);
 86         void            (*write_counter)(unsigned int idx, u64 val);
 87         const struct mips_perf_event *(*map_raw_event)(u64 config);
 88         const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
 89         const struct mips_perf_event (*cache_event_map)
 90                                 [PERF_COUNT_HW_CACHE_MAX]
 91                                 [PERF_COUNT_HW_CACHE_OP_MAX]
 92                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
 93         unsigned int    num_counters;
 94 };
 95 
 96 static struct mips_pmu mipspmu;
 97 
 98 #define M_PERFCTL_EVENT(event)          (((event) << MIPS_PERFCTRL_EVENT_S) & \
 99                                          MIPS_PERFCTRL_EVENT)
100 #define M_PERFCTL_VPEID(vpe)            ((vpe)    << MIPS_PERFCTRL_VPEID_S)
101 
102 #ifdef CONFIG_CPU_BMIPS5000
103 #define M_PERFCTL_MT_EN(filter)         0
104 #else /* !CONFIG_CPU_BMIPS5000 */
105 #define M_PERFCTL_MT_EN(filter)         (filter)
106 #endif /* CONFIG_CPU_BMIPS5000 */
107 
108 #define    M_TC_EN_ALL                  M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
109 #define    M_TC_EN_VPE                  M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
110 #define    M_TC_EN_TC                   M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
111 
112 #define M_PERFCTL_COUNT_EVENT_WHENEVER  (MIPS_PERFCTRL_EXL |            \
113                                          MIPS_PERFCTRL_K |              \
114                                          MIPS_PERFCTRL_U |              \
115                                          MIPS_PERFCTRL_S |              \
116                                          MIPS_PERFCTRL_IE)
117 
118 #ifdef CONFIG_MIPS_MT_SMP
119 #define M_PERFCTL_CONFIG_MASK           0x3fff801f
120 #else
121 #define M_PERFCTL_CONFIG_MASK           0x1f
122 #endif
123 
124 
125 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
126 static DEFINE_RWLOCK(pmuint_rwlock);
127 
128 #if defined(CONFIG_CPU_BMIPS5000)
129 #define vpe_id()        (cpu_has_mipsmt_pertccounters ? \
130                          0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
131 #else
132 #define vpe_id()        (cpu_has_mipsmt_pertccounters ? \
133                          0 : cpu_vpe_id(&current_cpu_data))
134 #endif
135 
136 /* Copied from op_model_mipsxx.c */
137 static unsigned int vpe_shift(void)
138 {
139         if (num_possible_cpus() > 1)
140                 return 1;
141 
142         return 0;
143 }
144 
145 static unsigned int counters_total_to_per_cpu(unsigned int counters)
146 {
147         return counters >> vpe_shift();
148 }
149 
150 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
151 #define vpe_id()        0
152 
153 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
154 
155 static void resume_local_counters(void);
156 static void pause_local_counters(void);
157 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
158 static int mipsxx_pmu_handle_shared_irq(void);
159 
160 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
161 {
162         if (vpe_id() == 1)
163                 idx = (idx + 2) & 3;
164         return idx;
165 }
166 
167 static u64 mipsxx_pmu_read_counter(unsigned int idx)
168 {
169         idx = mipsxx_pmu_swizzle_perf_idx(idx);
170 
171         switch (idx) {
172         case 0:
173                 /*
174                  * The counters are unsigned, we must cast to truncate
175                  * off the high bits.
176                  */
177                 return (u32)read_c0_perfcntr0();
178         case 1:
179                 return (u32)read_c0_perfcntr1();
180         case 2:
181                 return (u32)read_c0_perfcntr2();
182         case 3:
183                 return (u32)read_c0_perfcntr3();
184         default:
185                 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
186                 return 0;
187         }
188 }
189 
190 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
191 {
192         idx = mipsxx_pmu_swizzle_perf_idx(idx);
193 
194         switch (idx) {
195         case 0:
196                 return read_c0_perfcntr0_64();
197         case 1:
198                 return read_c0_perfcntr1_64();
199         case 2:
200                 return read_c0_perfcntr2_64();
201         case 3:
202                 return read_c0_perfcntr3_64();
203         default:
204                 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
205                 return 0;
206         }
207 }
208 
209 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
210 {
211         idx = mipsxx_pmu_swizzle_perf_idx(idx);
212 
213         switch (idx) {
214         case 0:
215                 write_c0_perfcntr0(val);
216                 return;
217         case 1:
218                 write_c0_perfcntr1(val);
219                 return;
220         case 2:
221                 write_c0_perfcntr2(val);
222                 return;
223         case 3:
224                 write_c0_perfcntr3(val);
225                 return;
226         }
227 }
228 
229 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
230 {
231         idx = mipsxx_pmu_swizzle_perf_idx(idx);
232 
233         switch (idx) {
234         case 0:
235                 write_c0_perfcntr0_64(val);
236                 return;
237         case 1:
238                 write_c0_perfcntr1_64(val);
239                 return;
240         case 2:
241                 write_c0_perfcntr2_64(val);
242                 return;
243         case 3:
244                 write_c0_perfcntr3_64(val);
245                 return;
246         }
247 }
248 
249 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
250 {
251         idx = mipsxx_pmu_swizzle_perf_idx(idx);
252 
253         switch (idx) {
254         case 0:
255                 return read_c0_perfctrl0();
256         case 1:
257                 return read_c0_perfctrl1();
258         case 2:
259                 return read_c0_perfctrl2();
260         case 3:
261                 return read_c0_perfctrl3();
262         default:
263                 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
264                 return 0;
265         }
266 }
267 
268 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
269 {
270         idx = mipsxx_pmu_swizzle_perf_idx(idx);
271 
272         switch (idx) {
273         case 0:
274                 write_c0_perfctrl0(val);
275                 return;
276         case 1:
277                 write_c0_perfctrl1(val);
278                 return;
279         case 2:
280                 write_c0_perfctrl2(val);
281                 return;
282         case 3:
283                 write_c0_perfctrl3(val);
284                 return;
285         }
286 }
287 
288 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
289                                     struct hw_perf_event *hwc)
290 {
291         int i;
292 
293         /*
294          * We only need to care the counter mask. The range has been
295          * checked definitely.
296          */
297         unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
298 
299         for (i = mipspmu.num_counters - 1; i >= 0; i--) {
300                 /*
301                  * Note that some MIPS perf events can be counted by both
302                  * even and odd counters, wheresas many other are only by
303                  * even _or_ odd counters. This introduces an issue that
304                  * when the former kind of event takes the counter the
305                  * latter kind of event wants to use, then the "counter
306                  * allocation" for the latter event will fail. In fact if
307                  * they can be dynamically swapped, they both feel happy.
308                  * But here we leave this issue alone for now.
309                  */
310                 if (test_bit(i, &cntr_mask) &&
311                         !test_and_set_bit(i, cpuc->used_mask))
312                         return i;
313         }
314 
315         return -EAGAIN;
316 }
317 
318 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
319 {
320         struct perf_event *event = container_of(evt, struct perf_event, hw);
321         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
322         unsigned int range = evt->event_base >> 24;
323 
324         WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
325 
326         cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
327                 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
328                 /* Make sure interrupt enabled. */
329                 MIPS_PERFCTRL_IE;
330 
331         if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
332                 /* enable the counter for the calling thread */
333                 cpuc->saved_ctrl[idx] |=
334                         (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
335         } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
336                 /* The counter is processor wide. Set it up to count all TCs. */
337                 pr_debug("Enabling perf counter for all TCs\n");
338                 cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
339         } else {
340                 unsigned int cpu, ctrl;
341 
342                 /*
343                  * Set up the counter for a particular CPU when event->cpu is
344                  * a valid CPU number. Otherwise set up the counter for the CPU
345                  * scheduling this thread.
346                  */
347                 cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
348 
349                 ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
350                 ctrl |= M_TC_EN_VPE;
351                 cpuc->saved_ctrl[idx] |= ctrl;
352                 pr_debug("Enabling perf counter for CPU%d\n", cpu);
353         }
354         /*
355          * We do not actually let the counter run. Leave it until start().
356          */
357 }
358 
359 static void mipsxx_pmu_disable_event(int idx)
360 {
361         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
362         unsigned long flags;
363 
364         WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
365 
366         local_irq_save(flags);
367         cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
368                 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
369         mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
370         local_irq_restore(flags);
371 }
372 
373 static int mipspmu_event_set_period(struct perf_event *event,
374                                     struct hw_perf_event *hwc,
375                                     int idx)
376 {
377         u64 left = local64_read(&hwc->period_left);
378         u64 period = hwc->sample_period;
379         int ret = 0;
380 
381         if (unlikely((left + period) & (1ULL << 63))) {
382                 /* left underflowed by more than period. */
383                 left = period;
384                 local64_set(&hwc->period_left, left);
385                 hwc->last_period = period;
386                 ret = 1;
387         } else  if (unlikely((left + period) <= period)) {
388                 /* left underflowed by less than period. */
389                 left += period;
390                 local64_set(&hwc->period_left, left);
391                 hwc->last_period = period;
392                 ret = 1;
393         }
394 
395         if (left > mipspmu.max_period) {
396                 left = mipspmu.max_period;
397                 local64_set(&hwc->period_left, left);
398         }
399 
400         local64_set(&hwc->prev_count, mipspmu.overflow - left);
401 
402         mipspmu.write_counter(idx, mipspmu.overflow - left);
403 
404         perf_event_update_userpage(event);
405 
406         return ret;
407 }
408 
409 static void mipspmu_event_update(struct perf_event *event,
410                                  struct hw_perf_event *hwc,
411                                  int idx)
412 {
413         u64 prev_raw_count, new_raw_count;
414         u64 delta;
415 
416 again:
417         prev_raw_count = local64_read(&hwc->prev_count);
418         new_raw_count = mipspmu.read_counter(idx);
419 
420         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
421                                 new_raw_count) != prev_raw_count)
422                 goto again;
423 
424         delta = new_raw_count - prev_raw_count;
425 
426         local64_add(delta, &event->count);
427         local64_sub(delta, &hwc->period_left);
428 }
429 
430 static void mipspmu_start(struct perf_event *event, int flags)
431 {
432         struct hw_perf_event *hwc = &event->hw;
433 
434         if (flags & PERF_EF_RELOAD)
435                 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
436 
437         hwc->state = 0;
438 
439         /* Set the period for the event. */
440         mipspmu_event_set_period(event, hwc, hwc->idx);
441 
442         /* Enable the event. */
443         mipsxx_pmu_enable_event(hwc, hwc->idx);
444 }
445 
446 static void mipspmu_stop(struct perf_event *event, int flags)
447 {
448         struct hw_perf_event *hwc = &event->hw;
449 
450         if (!(hwc->state & PERF_HES_STOPPED)) {
451                 /* We are working on a local event. */
452                 mipsxx_pmu_disable_event(hwc->idx);
453                 barrier();
454                 mipspmu_event_update(event, hwc, hwc->idx);
455                 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
456         }
457 }
458 
459 static int mipspmu_add(struct perf_event *event, int flags)
460 {
461         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
462         struct hw_perf_event *hwc = &event->hw;
463         int idx;
464         int err = 0;
465 
466         perf_pmu_disable(event->pmu);
467 
468         /* To look for a free counter for this event. */
469         idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
470         if (idx < 0) {
471                 err = idx;
472                 goto out;
473         }
474 
475         /*
476          * If there is an event in the counter we are going to use then
477          * make sure it is disabled.
478          */
479         event->hw.idx = idx;
480         mipsxx_pmu_disable_event(idx);
481         cpuc->events[idx] = event;
482 
483         hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
484         if (flags & PERF_EF_START)
485                 mipspmu_start(event, PERF_EF_RELOAD);
486 
487         /* Propagate our changes to the userspace mapping. */
488         perf_event_update_userpage(event);
489 
490 out:
491         perf_pmu_enable(event->pmu);
492         return err;
493 }
494 
495 static void mipspmu_del(struct perf_event *event, int flags)
496 {
497         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
498         struct hw_perf_event *hwc = &event->hw;
499         int idx = hwc->idx;
500 
501         WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
502 
503         mipspmu_stop(event, PERF_EF_UPDATE);
504         cpuc->events[idx] = NULL;
505         clear_bit(idx, cpuc->used_mask);
506 
507         perf_event_update_userpage(event);
508 }
509 
510 static void mipspmu_read(struct perf_event *event)
511 {
512         struct hw_perf_event *hwc = &event->hw;
513 
514         /* Don't read disabled counters! */
515         if (hwc->idx < 0)
516                 return;
517 
518         mipspmu_event_update(event, hwc, hwc->idx);
519 }
520 
521 static void mipspmu_enable(struct pmu *pmu)
522 {
523 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
524         write_unlock(&pmuint_rwlock);
525 #endif
526         resume_local_counters();
527 }
528 
529 /*
530  * MIPS performance counters can be per-TC. The control registers can
531  * not be directly accessed across CPUs. Hence if we want to do global
532  * control, we need cross CPU calls. on_each_cpu() can help us, but we
533  * can not make sure this function is called with interrupts enabled. So
534  * here we pause local counters and then grab a rwlock and leave the
535  * counters on other CPUs alone. If any counter interrupt raises while
536  * we own the write lock, simply pause local counters on that CPU and
537  * spin in the handler. Also we know we won't be switched to another
538  * CPU after pausing local counters and before grabbing the lock.
539  */
540 static void mipspmu_disable(struct pmu *pmu)
541 {
542         pause_local_counters();
543 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
544         write_lock(&pmuint_rwlock);
545 #endif
546 }
547 
548 static atomic_t active_events = ATOMIC_INIT(0);
549 static DEFINE_MUTEX(pmu_reserve_mutex);
550 static int (*save_perf_irq)(void);
551 
552 static int mipspmu_get_irq(void)
553 {
554         int err;
555 
556         if (mipspmu.irq >= 0) {
557                 /* Request my own irq handler. */
558                 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
559                                   IRQF_PERCPU | IRQF_NOBALANCING |
560                                   IRQF_NO_THREAD | IRQF_NO_SUSPEND |
561                                   IRQF_SHARED,
562                                   "mips_perf_pmu", &mipspmu);
563                 if (err) {
564                         pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
565                                 mipspmu.irq);
566                 }
567         } else if (cp0_perfcount_irq < 0) {
568                 /*
569                  * We are sharing the irq number with the timer interrupt.
570                  */
571                 save_perf_irq = perf_irq;
572                 perf_irq = mipsxx_pmu_handle_shared_irq;
573                 err = 0;
574         } else {
575                 pr_warn("The platform hasn't properly defined its interrupt controller\n");
576                 err = -ENOENT;
577         }
578 
579         return err;
580 }
581 
582 static void mipspmu_free_irq(void)
583 {
584         if (mipspmu.irq >= 0)
585                 free_irq(mipspmu.irq, &mipspmu);
586         else if (cp0_perfcount_irq < 0)
587                 perf_irq = save_perf_irq;
588 }
589 
590 /*
591  * mipsxx/rm9000/loongson2 have different performance counters, they have
592  * specific low-level init routines.
593  */
594 static void reset_counters(void *arg);
595 static int __hw_perf_event_init(struct perf_event *event);
596 
597 static void hw_perf_event_destroy(struct perf_event *event)
598 {
599         if (atomic_dec_and_mutex_lock(&active_events,
600                                 &pmu_reserve_mutex)) {
601                 /*
602                  * We must not call the destroy function with interrupts
603                  * disabled.
604                  */
605                 on_each_cpu(reset_counters,
606                         (void *)(long)mipspmu.num_counters, 1);
607                 mipspmu_free_irq();
608                 mutex_unlock(&pmu_reserve_mutex);
609         }
610 }
611 
612 static int mipspmu_event_init(struct perf_event *event)
613 {
614         int err = 0;
615 
616         /* does not support taken branch sampling */
617         if (has_branch_stack(event))
618                 return -EOPNOTSUPP;
619 
620         switch (event->attr.type) {
621         case PERF_TYPE_RAW:
622         case PERF_TYPE_HARDWARE:
623         case PERF_TYPE_HW_CACHE:
624                 break;
625 
626         default:
627                 return -ENOENT;
628         }
629 
630         if (event->cpu >= 0 && !cpu_online(event->cpu))
631                 return -ENODEV;
632 
633         if (!atomic_inc_not_zero(&active_events)) {
634                 mutex_lock(&pmu_reserve_mutex);
635                 if (atomic_read(&active_events) == 0)
636                         err = mipspmu_get_irq();
637 
638                 if (!err)
639                         atomic_inc(&active_events);
640                 mutex_unlock(&pmu_reserve_mutex);
641         }
642 
643         if (err)
644                 return err;
645 
646         return __hw_perf_event_init(event);
647 }
648 
649 static struct pmu pmu = {
650         .pmu_enable     = mipspmu_enable,
651         .pmu_disable    = mipspmu_disable,
652         .event_init     = mipspmu_event_init,
653         .add            = mipspmu_add,
654         .del            = mipspmu_del,
655         .start          = mipspmu_start,
656         .stop           = mipspmu_stop,
657         .read           = mipspmu_read,
658 };
659 
660 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
661 {
662 /*
663  * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
664  * event_id.
665  */
666 #ifdef CONFIG_MIPS_MT_SMP
667         if (num_possible_cpus() > 1)
668                 return ((unsigned int)pev->range << 24) |
669                         (pev->cntr_mask & 0xffff00) |
670                         (pev->event_id & 0xff);
671         else
672 #endif /* CONFIG_MIPS_MT_SMP */
673                 return ((pev->cntr_mask & 0xffff00) |
674                         (pev->event_id & 0xff));
675 }
676 
677 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
678 {
679 
680         if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
681                 return ERR_PTR(-EOPNOTSUPP);
682         return &(*mipspmu.general_event_map)[idx];
683 }
684 
685 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
686 {
687         unsigned int cache_type, cache_op, cache_result;
688         const struct mips_perf_event *pev;
689 
690         cache_type = (config >> 0) & 0xff;
691         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
692                 return ERR_PTR(-EINVAL);
693 
694         cache_op = (config >> 8) & 0xff;
695         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
696                 return ERR_PTR(-EINVAL);
697 
698         cache_result = (config >> 16) & 0xff;
699         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
700                 return ERR_PTR(-EINVAL);
701 
702         pev = &((*mipspmu.cache_event_map)
703                                         [cache_type]
704                                         [cache_op]
705                                         [cache_result]);
706 
707         if (pev->cntr_mask == 0)
708                 return ERR_PTR(-EOPNOTSUPP);
709 
710         return pev;
711 
712 }
713 
714 static int validate_group(struct perf_event *event)
715 {
716         struct perf_event *sibling, *leader = event->group_leader;
717         struct cpu_hw_events fake_cpuc;
718 
719         memset(&fake_cpuc, 0, sizeof(fake_cpuc));
720 
721         if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
722                 return -EINVAL;
723 
724         for_each_sibling_event(sibling, leader) {
725                 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
726                         return -EINVAL;
727         }
728 
729         if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
730                 return -EINVAL;
731 
732         return 0;
733 }
734 
735 /* This is needed by specific irq handlers in perf_event_*.c */
736 static void handle_associated_event(struct cpu_hw_events *cpuc,
737                                     int idx, struct perf_sample_data *data,
738                                     struct pt_regs *regs)
739 {
740         struct perf_event *event = cpuc->events[idx];
741         struct hw_perf_event *hwc = &event->hw;
742 
743         mipspmu_event_update(event, hwc, idx);
744         data->period = event->hw.last_period;
745         if (!mipspmu_event_set_period(event, hwc, idx))
746                 return;
747 
748         if (perf_event_overflow(event, data, regs))
749                 mipsxx_pmu_disable_event(idx);
750 }
751 
752 
753 static int __n_counters(void)
754 {
755         if (!cpu_has_perf)
756                 return 0;
757         if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
758                 return 1;
759         if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
760                 return 2;
761         if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
762                 return 3;
763 
764         return 4;
765 }
766 
767 static int n_counters(void)
768 {
769         int counters;
770 
771         switch (current_cpu_type()) {
772         case CPU_R10000:
773                 counters = 2;
774                 break;
775 
776         case CPU_R12000:
777         case CPU_R14000:
778         case CPU_R16000:
779                 counters = 4;
780                 break;
781 
782         default:
783                 counters = __n_counters();
784         }
785 
786         return counters;
787 }
788 
789 static void reset_counters(void *arg)
790 {
791         int counters = (int)(long)arg;
792         switch (counters) {
793         case 4:
794                 mipsxx_pmu_write_control(3, 0);
795                 mipspmu.write_counter(3, 0);
796         case 3:
797                 mipsxx_pmu_write_control(2, 0);
798                 mipspmu.write_counter(2, 0);
799         case 2:
800                 mipsxx_pmu_write_control(1, 0);
801                 mipspmu.write_counter(1, 0);
802         case 1:
803                 mipsxx_pmu_write_control(0, 0);
804                 mipspmu.write_counter(0, 0);
805         }
806 }
807 
808 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
809 static const struct mips_perf_event mipsxxcore_event_map
810                                 [PERF_COUNT_HW_MAX] = {
811         [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
812         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
813         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
814         [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
815 };
816 
817 /* 74K/proAptiv core has different branch event code. */
818 static const struct mips_perf_event mipsxxcore_event_map2
819                                 [PERF_COUNT_HW_MAX] = {
820         [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
821         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
822         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
823         [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
824 };
825 
826 static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
827         [PERF_COUNT_HW_CPU_CYCLES]          = { 0x00, CNTR_EVEN | CNTR_ODD },
828         [PERF_COUNT_HW_INSTRUCTIONS]        = { 0x01, CNTR_EVEN | CNTR_ODD },
829         /* These only count dcache, not icache */
830         [PERF_COUNT_HW_CACHE_REFERENCES]    = { 0x45, CNTR_EVEN | CNTR_ODD },
831         [PERF_COUNT_HW_CACHE_MISSES]        = { 0x48, CNTR_EVEN | CNTR_ODD },
832         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
833         [PERF_COUNT_HW_BRANCH_MISSES]       = { 0x16, CNTR_EVEN | CNTR_ODD },
834 };
835 
836 static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
837         [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
838         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
839         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
840         [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
841 };
842 
843 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
844         [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
845         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
846         [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
847         [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL  },
848         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
849         [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
850         [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
851 };
852 
853 static const struct mips_perf_event bmips5000_event_map
854                                 [PERF_COUNT_HW_MAX] = {
855         [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
856         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
857         [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
858 };
859 
860 static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
861         [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
862         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
863         [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
864         [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
865         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
866         [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
867 };
868 
869 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
870 static const struct mips_perf_event mipsxxcore_cache_map
871                                 [PERF_COUNT_HW_CACHE_MAX]
872                                 [PERF_COUNT_HW_CACHE_OP_MAX]
873                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
874 [C(L1D)] = {
875         /*
876          * Like some other architectures (e.g. ARM), the performance
877          * counters don't differentiate between read and write
878          * accesses/misses, so this isn't strictly correct, but it's the
879          * best we can do. Writes and reads get combined.
880          */
881         [C(OP_READ)] = {
882                 [C(RESULT_ACCESS)]      = { 0x0a, CNTR_EVEN, T },
883                 [C(RESULT_MISS)]        = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
884         },
885         [C(OP_WRITE)] = {
886                 [C(RESULT_ACCESS)]      = { 0x0a, CNTR_EVEN, T },
887                 [C(RESULT_MISS)]        = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
888         },
889 },
890 [C(L1I)] = {
891         [C(OP_READ)] = {
892                 [C(RESULT_ACCESS)]      = { 0x09, CNTR_EVEN, T },
893                 [C(RESULT_MISS)]        = { 0x09, CNTR_ODD, T },
894         },
895         [C(OP_WRITE)] = {
896                 [C(RESULT_ACCESS)]      = { 0x09, CNTR_EVEN, T },
897                 [C(RESULT_MISS)]        = { 0x09, CNTR_ODD, T },
898         },
899         [C(OP_PREFETCH)] = {
900                 [C(RESULT_ACCESS)]      = { 0x14, CNTR_EVEN, T },
901                 /*
902                  * Note that MIPS has only "hit" events countable for
903                  * the prefetch operation.
904                  */
905         },
906 },
907 [C(LL)] = {
908         [C(OP_READ)] = {
909                 [C(RESULT_ACCESS)]      = { 0x15, CNTR_ODD, P },
910                 [C(RESULT_MISS)]        = { 0x16, CNTR_EVEN, P },
911         },
912         [C(OP_WRITE)] = {
913                 [C(RESULT_ACCESS)]      = { 0x15, CNTR_ODD, P },
914                 [C(RESULT_MISS)]        = { 0x16, CNTR_EVEN, P },
915         },
916 },
917 [C(DTLB)] = {
918         [C(OP_READ)] = {
919                 [C(RESULT_ACCESS)]      = { 0x06, CNTR_EVEN, T },
920                 [C(RESULT_MISS)]        = { 0x06, CNTR_ODD, T },
921         },
922         [C(OP_WRITE)] = {
923                 [C(RESULT_ACCESS)]      = { 0x06, CNTR_EVEN, T },
924                 [C(RESULT_MISS)]        = { 0x06, CNTR_ODD, T },
925         },
926 },
927 [C(ITLB)] = {
928         [C(OP_READ)] = {
929                 [C(RESULT_ACCESS)]      = { 0x05, CNTR_EVEN, T },
930                 [C(RESULT_MISS)]        = { 0x05, CNTR_ODD, T },
931         },
932         [C(OP_WRITE)] = {
933                 [C(RESULT_ACCESS)]      = { 0x05, CNTR_EVEN, T },
934                 [C(RESULT_MISS)]        = { 0x05, CNTR_ODD, T },
935         },
936 },
937 [C(BPU)] = {
938         /* Using the same code for *HW_BRANCH* */
939         [C(OP_READ)] = {
940                 [C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN, T },
941                 [C(RESULT_MISS)]        = { 0x02, CNTR_ODD, T },
942         },
943         [C(OP_WRITE)] = {
944                 [C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN, T },
945                 [C(RESULT_MISS)]        = { 0x02, CNTR_ODD, T },
946         },
947 },
948 };
949 
950 /* 74K/proAptiv core has completely different cache event map. */
951 static const struct mips_perf_event mipsxxcore_cache_map2
952                                 [PERF_COUNT_HW_CACHE_MAX]
953                                 [PERF_COUNT_HW_CACHE_OP_MAX]
954                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
955 [C(L1D)] = {
956         /*
957          * Like some other architectures (e.g. ARM), the performance
958          * counters don't differentiate between read and write
959          * accesses/misses, so this isn't strictly correct, but it's the
960          * best we can do. Writes and reads get combined.
961          */
962         [C(OP_READ)] = {
963                 [C(RESULT_ACCESS)]      = { 0x17, CNTR_ODD, T },
964                 [C(RESULT_MISS)]        = { 0x18, CNTR_ODD, T },
965         },
966         [C(OP_WRITE)] = {
967                 [C(RESULT_ACCESS)]      = { 0x17, CNTR_ODD, T },
968                 [C(RESULT_MISS)]        = { 0x18, CNTR_ODD, T },
969         },
970 },
971 [C(L1I)] = {
972         [C(OP_READ)] = {
973                 [C(RESULT_ACCESS)]      = { 0x06, CNTR_EVEN, T },
974                 [C(RESULT_MISS)]        = { 0x06, CNTR_ODD, T },
975         },
976         [C(OP_WRITE)] = {
977                 [C(RESULT_ACCESS)]      = { 0x06, CNTR_EVEN, T },
978                 [C(RESULT_MISS)]        = { 0x06, CNTR_ODD, T },
979         },
980         [C(OP_PREFETCH)] = {
981                 [C(RESULT_ACCESS)]      = { 0x34, CNTR_EVEN, T },
982                 /*
983                  * Note that MIPS has only "hit" events countable for
984                  * the prefetch operation.
985                  */
986         },
987 },
988 [C(LL)] = {
989         [C(OP_READ)] = {
990                 [C(RESULT_ACCESS)]      = { 0x1c, CNTR_ODD, P },
991                 [C(RESULT_MISS)]        = { 0x1d, CNTR_EVEN, P },
992         },
993         [C(OP_WRITE)] = {
994                 [C(RESULT_ACCESS)]      = { 0x1c, CNTR_ODD, P },
995                 [C(RESULT_MISS)]        = { 0x1d, CNTR_EVEN, P },
996         },
997 },
998 /*
999  * 74K core does not have specific DTLB events. proAptiv core has
1000  * "speculative" DTLB events which are numbered 0x63 (even/odd) and
1001  * not included here. One can use raw events if really needed.
1002  */
1003 [C(ITLB)] = {
1004         [C(OP_READ)] = {
1005                 [C(RESULT_ACCESS)]      = { 0x04, CNTR_EVEN, T },
1006                 [C(RESULT_MISS)]        = { 0x04, CNTR_ODD, T },
1007         },
1008         [C(OP_WRITE)] = {
1009                 [C(RESULT_ACCESS)]      = { 0x04, CNTR_EVEN, T },
1010                 [C(RESULT_MISS)]        = { 0x04, CNTR_ODD, T },
1011         },
1012 },
1013 [C(BPU)] = {
1014         /* Using the same code for *HW_BRANCH* */
1015         [C(OP_READ)] = {
1016                 [C(RESULT_ACCESS)]      = { 0x27, CNTR_EVEN, T },
1017                 [C(RESULT_MISS)]        = { 0x27, CNTR_ODD, T },
1018         },
1019         [C(OP_WRITE)] = {
1020                 [C(RESULT_ACCESS)]      = { 0x27, CNTR_EVEN, T },
1021                 [C(RESULT_MISS)]        = { 0x27, CNTR_ODD, T },
1022         },
1023 },
1024 };
1025 
1026 static const struct mips_perf_event i6x00_cache_map
1027                                 [PERF_COUNT_HW_CACHE_MAX]
1028                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1029                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1030 [C(L1D)] = {
1031         [C(OP_READ)] = {
1032                 [C(RESULT_ACCESS)]      = { 0x46, CNTR_EVEN | CNTR_ODD },
1033                 [C(RESULT_MISS)]        = { 0x49, CNTR_EVEN | CNTR_ODD },
1034         },
1035         [C(OP_WRITE)] = {
1036                 [C(RESULT_ACCESS)]      = { 0x47, CNTR_EVEN | CNTR_ODD },
1037                 [C(RESULT_MISS)]        = { 0x4a, CNTR_EVEN | CNTR_ODD },
1038         },
1039 },
1040 [C(L1I)] = {
1041         [C(OP_READ)] = {
1042                 [C(RESULT_ACCESS)]      = { 0x84, CNTR_EVEN | CNTR_ODD },
1043                 [C(RESULT_MISS)]        = { 0x85, CNTR_EVEN | CNTR_ODD },
1044         },
1045 },
1046 [C(DTLB)] = {
1047         /* Can't distinguish read & write */
1048         [C(OP_READ)] = {
1049                 [C(RESULT_ACCESS)]      = { 0x40, CNTR_EVEN | CNTR_ODD },
1050                 [C(RESULT_MISS)]        = { 0x41, CNTR_EVEN | CNTR_ODD },
1051         },
1052         [C(OP_WRITE)] = {
1053                 [C(RESULT_ACCESS)]      = { 0x40, CNTR_EVEN | CNTR_ODD },
1054                 [C(RESULT_MISS)]        = { 0x41, CNTR_EVEN | CNTR_ODD },
1055         },
1056 },
1057 [C(BPU)] = {
1058         /* Conditional branches / mispredicted */
1059         [C(OP_READ)] = {
1060                 [C(RESULT_ACCESS)]      = { 0x15, CNTR_EVEN | CNTR_ODD },
1061                 [C(RESULT_MISS)]        = { 0x16, CNTR_EVEN | CNTR_ODD },
1062         },
1063 },
1064 };
1065 
1066 static const struct mips_perf_event loongson3_cache_map
1067                                 [PERF_COUNT_HW_CACHE_MAX]
1068                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1069                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1070 [C(L1D)] = {
1071         /*
1072          * Like some other architectures (e.g. ARM), the performance
1073          * counters don't differentiate between read and write
1074          * accesses/misses, so this isn't strictly correct, but it's the
1075          * best we can do. Writes and reads get combined.
1076          */
1077         [C(OP_READ)] = {
1078                 [C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1079         },
1080         [C(OP_WRITE)] = {
1081                 [C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1082         },
1083 },
1084 [C(L1I)] = {
1085         [C(OP_READ)] = {
1086                 [C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1087         },
1088         [C(OP_WRITE)] = {
1089                 [C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1090         },
1091 },
1092 [C(DTLB)] = {
1093         [C(OP_READ)] = {
1094                 [C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1095         },
1096         [C(OP_WRITE)] = {
1097                 [C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1098         },
1099 },
1100 [C(ITLB)] = {
1101         [C(OP_READ)] = {
1102                 [C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1103         },
1104         [C(OP_WRITE)] = {
1105                 [C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1106         },
1107 },
1108 [C(BPU)] = {
1109         /* Using the same code for *HW_BRANCH* */
1110         [C(OP_READ)] = {
1111                 [C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN },
1112                 [C(RESULT_MISS)]        = { 0x02, CNTR_ODD },
1113         },
1114         [C(OP_WRITE)] = {
1115                 [C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN },
1116                 [C(RESULT_MISS)]        = { 0x02, CNTR_ODD },
1117         },
1118 },
1119 };
1120 
1121 /* BMIPS5000 */
1122 static const struct mips_perf_event bmips5000_cache_map
1123                                 [PERF_COUNT_HW_CACHE_MAX]
1124                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1125                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1126 [C(L1D)] = {
1127         /*
1128          * Like some other architectures (e.g. ARM), the performance
1129          * counters don't differentiate between read and write
1130          * accesses/misses, so this isn't strictly correct, but it's the
1131          * best we can do. Writes and reads get combined.
1132          */
1133         [C(OP_READ)] = {
1134                 [C(RESULT_ACCESS)]      = { 12, CNTR_EVEN, T },
1135                 [C(RESULT_MISS)]        = { 12, CNTR_ODD, T },
1136         },
1137         [C(OP_WRITE)] = {
1138                 [C(RESULT_ACCESS)]      = { 12, CNTR_EVEN, T },
1139                 [C(RESULT_MISS)]        = { 12, CNTR_ODD, T },
1140         },
1141 },
1142 [C(L1I)] = {
1143         [C(OP_READ)] = {
1144                 [C(RESULT_ACCESS)]      = { 10, CNTR_EVEN, T },
1145                 [C(RESULT_MISS)]        = { 10, CNTR_ODD, T },
1146         },
1147         [C(OP_WRITE)] = {
1148                 [C(RESULT_ACCESS)]      = { 10, CNTR_EVEN, T },
1149                 [C(RESULT_MISS)]        = { 10, CNTR_ODD, T },
1150         },
1151         [C(OP_PREFETCH)] = {
1152                 [C(RESULT_ACCESS)]      = { 23, CNTR_EVEN, T },
1153                 /*
1154                  * Note that MIPS has only "hit" events countable for
1155                  * the prefetch operation.
1156                  */
1157         },
1158 },
1159 [C(LL)] = {
1160         [C(OP_READ)] = {
1161                 [C(RESULT_ACCESS)]      = { 28, CNTR_EVEN, P },
1162                 [C(RESULT_MISS)]        = { 28, CNTR_ODD, P },
1163         },
1164         [C(OP_WRITE)] = {
1165                 [C(RESULT_ACCESS)]      = { 28, CNTR_EVEN, P },
1166                 [C(RESULT_MISS)]        = { 28, CNTR_ODD, P },
1167         },
1168 },
1169 [C(BPU)] = {
1170         /* Using the same code for *HW_BRANCH* */
1171         [C(OP_READ)] = {
1172                 [C(RESULT_MISS)]        = { 0x02, CNTR_ODD, T },
1173         },
1174         [C(OP_WRITE)] = {
1175                 [C(RESULT_MISS)]        = { 0x02, CNTR_ODD, T },
1176         },
1177 },
1178 };
1179 
1180 
1181 static const struct mips_perf_event octeon_cache_map
1182                                 [PERF_COUNT_HW_CACHE_MAX]
1183                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1184                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1185 [C(L1D)] = {
1186         [C(OP_READ)] = {
1187                 [C(RESULT_ACCESS)]      = { 0x2b, CNTR_ALL },
1188                 [C(RESULT_MISS)]        = { 0x2e, CNTR_ALL },
1189         },
1190         [C(OP_WRITE)] = {
1191                 [C(RESULT_ACCESS)]      = { 0x30, CNTR_ALL },
1192         },
1193 },
1194 [C(L1I)] = {
1195         [C(OP_READ)] = {
1196                 [C(RESULT_ACCESS)]      = { 0x18, CNTR_ALL },
1197         },
1198         [C(OP_PREFETCH)] = {
1199                 [C(RESULT_ACCESS)]      = { 0x19, CNTR_ALL },
1200         },
1201 },
1202 [C(DTLB)] = {
1203         /*
1204          * Only general DTLB misses are counted use the same event for
1205          * read and write.
1206          */
1207         [C(OP_READ)] = {
1208                 [C(RESULT_MISS)]        = { 0x35, CNTR_ALL },
1209         },
1210         [C(OP_WRITE)] = {
1211                 [C(RESULT_MISS)]        = { 0x35, CNTR_ALL },
1212         },
1213 },
1214 [C(ITLB)] = {
1215         [C(OP_READ)] = {
1216                 [C(RESULT_MISS)]        = { 0x37, CNTR_ALL },
1217         },
1218 },
1219 };
1220 
1221 static const struct mips_perf_event xlp_cache_map
1222                                 [PERF_COUNT_HW_CACHE_MAX]
1223                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1224                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1225 [C(L1D)] = {
1226         [C(OP_READ)] = {
1227                 [C(RESULT_ACCESS)]      = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1228                 [C(RESULT_MISS)]        = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1229         },
1230         [C(OP_WRITE)] = {
1231                 [C(RESULT_ACCESS)]      = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1232                 [C(RESULT_MISS)]        = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1233         },
1234 },
1235 [C(L1I)] = {
1236         [C(OP_READ)] = {
1237                 [C(RESULT_ACCESS)]      = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1238                 [C(RESULT_MISS)]        = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1239         },
1240 },
1241 [C(LL)] = {
1242         [C(OP_READ)] = {
1243                 [C(RESULT_ACCESS)]      = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1244                 [C(RESULT_MISS)]        = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1245         },
1246         [C(OP_WRITE)] = {
1247                 [C(RESULT_ACCESS)]      = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1248                 [C(RESULT_MISS)]        = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1249         },
1250 },
1251 [C(DTLB)] = {
1252         /*
1253          * Only general DTLB misses are counted use the same event for
1254          * read and write.
1255          */
1256         [C(OP_READ)] = {
1257                 [C(RESULT_MISS)]        = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1258         },
1259         [C(OP_WRITE)] = {
1260                 [C(RESULT_MISS)]        = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1261         },
1262 },
1263 [C(ITLB)] = {
1264         [C(OP_READ)] = {
1265                 [C(RESULT_MISS)]        = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1266         },
1267         [C(OP_WRITE)] = {
1268                 [C(RESULT_MISS)]        = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1269         },
1270 },
1271 [C(BPU)] = {
1272         [C(OP_READ)] = {
1273                 [C(RESULT_MISS)]        = { 0x25, CNTR_ALL },
1274         },
1275 },
1276 };
1277 
1278 static int __hw_perf_event_init(struct perf_event *event)
1279 {
1280         struct perf_event_attr *attr = &event->attr;
1281         struct hw_perf_event *hwc = &event->hw;
1282         const struct mips_perf_event *pev;
1283         int err;
1284 
1285         /* Returning MIPS event descriptor for generic perf event. */
1286         if (PERF_TYPE_HARDWARE == event->attr.type) {
1287                 if (event->attr.config >= PERF_COUNT_HW_MAX)
1288                         return -EINVAL;
1289                 pev = mipspmu_map_general_event(event->attr.config);
1290         } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1291                 pev = mipspmu_map_cache_event(event->attr.config);
1292         } else if (PERF_TYPE_RAW == event->attr.type) {
1293                 /* We are working on the global raw event. */
1294                 mutex_lock(&raw_event_mutex);
1295                 pev = mipspmu.map_raw_event(event->attr.config);
1296         } else {
1297                 /* The event type is not (yet) supported. */
1298                 return -EOPNOTSUPP;
1299         }
1300 
1301         if (IS_ERR(pev)) {
1302                 if (PERF_TYPE_RAW == event->attr.type)
1303                         mutex_unlock(&raw_event_mutex);
1304                 return PTR_ERR(pev);
1305         }
1306 
1307         /*
1308          * We allow max flexibility on how each individual counter shared
1309          * by the single CPU operates (the mode exclusion and the range).
1310          */
1311         hwc->config_base = MIPS_PERFCTRL_IE;
1312 
1313         hwc->event_base = mipspmu_perf_event_encode(pev);
1314         if (PERF_TYPE_RAW == event->attr.type)
1315                 mutex_unlock(&raw_event_mutex);
1316 
1317         if (!attr->exclude_user)
1318                 hwc->config_base |= MIPS_PERFCTRL_U;
1319         if (!attr->exclude_kernel) {
1320                 hwc->config_base |= MIPS_PERFCTRL_K;
1321                 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1322                 hwc->config_base |= MIPS_PERFCTRL_EXL;
1323         }
1324         if (!attr->exclude_hv)
1325                 hwc->config_base |= MIPS_PERFCTRL_S;
1326 
1327         hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1328         /*
1329          * The event can belong to another cpu. We do not assign a local
1330          * counter for it for now.
1331          */
1332         hwc->idx = -1;
1333         hwc->config = 0;
1334 
1335         if (!hwc->sample_period) {
1336                 hwc->sample_period  = mipspmu.max_period;
1337                 hwc->last_period    = hwc->sample_period;
1338                 local64_set(&hwc->period_left, hwc->sample_period);
1339         }
1340 
1341         err = 0;
1342         if (event->group_leader != event)
1343                 err = validate_group(event);
1344 
1345         event->destroy = hw_perf_event_destroy;
1346 
1347         if (err)
1348                 event->destroy(event);
1349 
1350         return err;
1351 }
1352 
1353 static void pause_local_counters(void)
1354 {
1355         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1356         int ctr = mipspmu.num_counters;
1357         unsigned long flags;
1358 
1359         local_irq_save(flags);
1360         do {
1361                 ctr--;
1362                 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1363                 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1364                                          ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1365         } while (ctr > 0);
1366         local_irq_restore(flags);
1367 }
1368 
1369 static void resume_local_counters(void)
1370 {
1371         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1372         int ctr = mipspmu.num_counters;
1373 
1374         do {
1375                 ctr--;
1376                 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1377         } while (ctr > 0);
1378 }
1379 
1380 static int mipsxx_pmu_handle_shared_irq(void)
1381 {
1382         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1383         struct perf_sample_data data;
1384         unsigned int counters = mipspmu.num_counters;
1385         u64 counter;
1386         int handled = IRQ_NONE;
1387         struct pt_regs *regs;
1388 
1389         if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1390                 return handled;
1391         /*
1392          * First we pause the local counters, so that when we are locked
1393          * here, the counters are all paused. When it gets locked due to
1394          * perf_disable(), the timer interrupt handler will be delayed.
1395          *
1396          * See also mipsxx_pmu_start().
1397          */
1398         pause_local_counters();
1399 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1400         read_lock(&pmuint_rwlock);
1401 #endif
1402 
1403         regs = get_irq_regs();
1404 
1405         perf_sample_data_init(&data, 0, 0);
1406 
1407         switch (counters) {
1408 #define HANDLE_COUNTER(n)                                               \
1409         case n + 1:                                                     \
1410                 if (test_bit(n, cpuc->used_mask)) {                     \
1411                         counter = mipspmu.read_counter(n);              \
1412                         if (counter & mipspmu.overflow) {               \
1413                                 handle_associated_event(cpuc, n, &data, regs); \
1414                                 handled = IRQ_HANDLED;                  \
1415                         }                                               \
1416                 }
1417         HANDLE_COUNTER(3)
1418         HANDLE_COUNTER(2)
1419         HANDLE_COUNTER(1)
1420         HANDLE_COUNTER(0)
1421         }
1422 
1423 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1424         read_unlock(&pmuint_rwlock);
1425 #endif
1426         resume_local_counters();
1427 
1428         /*
1429          * Do all the work for the pending perf events. We can do this
1430          * in here because the performance counter interrupt is a regular
1431          * interrupt, not NMI.
1432          */
1433         if (handled == IRQ_HANDLED)
1434                 irq_work_run();
1435 
1436         return handled;
1437 }
1438 
1439 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1440 {
1441         return mipsxx_pmu_handle_shared_irq();
1442 }
1443 
1444 /* 24K */
1445 #define IS_BOTH_COUNTERS_24K_EVENT(b)                                   \
1446         ((b) == 0 || (b) == 1 || (b) == 11)
1447 
1448 /* 34K */
1449 #define IS_BOTH_COUNTERS_34K_EVENT(b)                                   \
1450         ((b) == 0 || (b) == 1 || (b) == 11)
1451 #ifdef CONFIG_MIPS_MT_SMP
1452 #define IS_RANGE_P_34K_EVENT(r, b)                                      \
1453         ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||             \
1454          (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||           \
1455          (r) == 176 || ((b) >= 50 && (b) <= 55) ||                      \
1456          ((b) >= 64 && (b) <= 67))
1457 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1458 #endif
1459 
1460 /* 74K */
1461 #define IS_BOTH_COUNTERS_74K_EVENT(b)                                   \
1462         ((b) == 0 || (b) == 1)
1463 
1464 /* proAptiv */
1465 #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b)                              \
1466         ((b) == 0 || (b) == 1)
1467 /* P5600 */
1468 #define IS_BOTH_COUNTERS_P5600_EVENT(b)                                 \
1469         ((b) == 0 || (b) == 1)
1470 
1471 /* 1004K */
1472 #define IS_BOTH_COUNTERS_1004K_EVENT(b)                                 \
1473         ((b) == 0 || (b) == 1 || (b) == 11)
1474 #ifdef CONFIG_MIPS_MT_SMP
1475 #define IS_RANGE_P_1004K_EVENT(r, b)                                    \
1476         ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||             \
1477          (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||            \
1478          (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||        \
1479          (r) == 188 || (b) == 61 || (b) == 62 ||                        \
1480          ((b) >= 64 && (b) <= 67))
1481 #define IS_RANGE_V_1004K_EVENT(r)       ((r) == 47)
1482 #endif
1483 
1484 /* interAptiv */
1485 #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b)                            \
1486         ((b) == 0 || (b) == 1 || (b) == 11)
1487 #ifdef CONFIG_MIPS_MT_SMP
1488 /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1489 #define IS_RANGE_P_INTERAPTIV_EVENT(r, b)                               \
1490         ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||             \
1491          (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 ||            \
1492          (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 &&         \
1493          (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 ||          \
1494          ((b) >= 64 && (b) <= 67))
1495 #define IS_RANGE_V_INTERAPTIV_EVENT(r)  ((r) == 47 || (r) == 175)
1496 #endif
1497 
1498 /* BMIPS5000 */
1499 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b)                             \
1500         ((b) == 0 || (b) == 1)
1501 
1502 
1503 /*
1504  * For most cores the user can use 0-255 raw events, where 0-127 for the events
1505  * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1506  * indicate the even/odd bank selector. So, for example, when user wants to take
1507  * the Event Num of 15 for odd counters (by referring to the user manual), then
1508  * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1509  * to be used.
1510  *
1511  * Some newer cores have even more events, in which case the user can use raw
1512  * events 0-511, where 0-255 are for the events of even counters, and 256-511
1513  * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1514  */
1515 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1516 {
1517         /* currently most cores have 7-bit event numbers */
1518         unsigned int raw_id = config & 0xff;
1519         unsigned int base_id = raw_id & 0x7f;
1520 
1521         switch (current_cpu_type()) {
1522         case CPU_24K:
1523                 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1524                         raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1525                 else
1526                         raw_event.cntr_mask =
1527                                 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1528 #ifdef CONFIG_MIPS_MT_SMP
1529                 /*
1530                  * This is actually doing nothing. Non-multithreading
1531                  * CPUs will not check and calculate the range.
1532                  */
1533                 raw_event.range = P;
1534 #endif
1535                 break;
1536         case CPU_34K:
1537                 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1538                         raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1539                 else
1540                         raw_event.cntr_mask =
1541                                 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1542 #ifdef CONFIG_MIPS_MT_SMP
1543                 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1544                         raw_event.range = P;
1545                 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1546                         raw_event.range = V;
1547                 else
1548                         raw_event.range = T;
1549 #endif
1550                 break;
1551         case CPU_74K:
1552         case CPU_1074K:
1553                 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1554                         raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1555                 else
1556                         raw_event.cntr_mask =
1557                                 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1558 #ifdef CONFIG_MIPS_MT_SMP
1559                 raw_event.range = P;
1560 #endif
1561                 break;
1562         case CPU_PROAPTIV:
1563                 if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1564                         raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1565                 else
1566                         raw_event.cntr_mask =
1567                                 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1568 #ifdef CONFIG_MIPS_MT_SMP
1569                 raw_event.range = P;
1570 #endif
1571                 break;
1572         case CPU_P5600:
1573         case CPU_P6600:
1574                 /* 8-bit event numbers */
1575                 raw_id = config & 0x1ff;
1576                 base_id = raw_id & 0xff;
1577                 if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1578                         raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1579                 else
1580                         raw_event.cntr_mask =
1581                                 raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1582 #ifdef CONFIG_MIPS_MT_SMP
1583                 raw_event.range = P;
1584 #endif
1585                 break;
1586         case CPU_I6400:
1587         case CPU_I6500:
1588                 /* 8-bit event numbers */
1589                 base_id = config & 0xff;
1590                 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1591                 break;
1592         case CPU_1004K:
1593                 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1594                         raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1595                 else
1596                         raw_event.cntr_mask =
1597                                 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1598 #ifdef CONFIG_MIPS_MT_SMP
1599                 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1600                         raw_event.range = P;
1601                 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1602                         raw_event.range = V;
1603                 else
1604                         raw_event.range = T;
1605 #endif
1606                 break;
1607         case CPU_INTERAPTIV:
1608                 if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1609                         raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1610                 else
1611                         raw_event.cntr_mask =
1612                                 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1613 #ifdef CONFIG_MIPS_MT_SMP
1614                 if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1615                         raw_event.range = P;
1616                 else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1617                         raw_event.range = V;
1618                 else
1619                         raw_event.range = T;
1620 #endif
1621                 break;
1622         case CPU_BMIPS5000:
1623                 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1624                         raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1625                 else
1626                         raw_event.cntr_mask =
1627                                 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1628                 break;
1629         case CPU_LOONGSON3:
1630                 raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1631         break;
1632         }
1633 
1634         raw_event.event_id = base_id;
1635 
1636         return &raw_event;
1637 }
1638 
1639 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1640 {
1641         unsigned int raw_id = config & 0xff;
1642         unsigned int base_id = raw_id & 0x7f;
1643 
1644 
1645         raw_event.cntr_mask = CNTR_ALL;
1646         raw_event.event_id = base_id;
1647 
1648         if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1649                 if (base_id > 0x42)
1650                         return ERR_PTR(-EOPNOTSUPP);
1651         } else {
1652                 if (base_id > 0x3a)
1653                         return ERR_PTR(-EOPNOTSUPP);
1654         }
1655 
1656         switch (base_id) {
1657         case 0x00:
1658         case 0x0f:
1659         case 0x1e:
1660         case 0x1f:
1661         case 0x2f:
1662         case 0x34:
1663         case 0x3b ... 0x3f:
1664                 return ERR_PTR(-EOPNOTSUPP);
1665         default:
1666                 break;
1667         }
1668 
1669         return &raw_event;
1670 }
1671 
1672 static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1673 {
1674         unsigned int raw_id = config & 0xff;
1675 
1676         /* Only 1-63 are defined */
1677         if ((raw_id < 0x01) || (raw_id > 0x3f))
1678                 return ERR_PTR(-EOPNOTSUPP);
1679 
1680         raw_event.cntr_mask = CNTR_ALL;
1681         raw_event.event_id = raw_id;
1682 
1683         return &raw_event;
1684 }
1685 
1686 static int __init
1687 init_hw_perf_events(void)
1688 {
1689         int counters, irq;
1690         int counter_bits;
1691 
1692         pr_info("Performance counters: ");
1693 
1694         counters = n_counters();
1695         if (counters == 0) {
1696                 pr_cont("No available PMU.\n");
1697                 return -ENODEV;
1698         }
1699 
1700 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1701         if (!cpu_has_mipsmt_pertccounters)
1702                 counters = counters_total_to_per_cpu(counters);
1703 #endif
1704 
1705         if (get_c0_perfcount_int)
1706                 irq = get_c0_perfcount_int();
1707         else if (cp0_perfcount_irq >= 0)
1708                 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1709         else
1710                 irq = -1;
1711 
1712         mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1713 
1714         switch (current_cpu_type()) {
1715         case CPU_24K:
1716                 mipspmu.name = "mips/24K";
1717                 mipspmu.general_event_map = &mipsxxcore_event_map;
1718                 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1719                 break;
1720         case CPU_34K:
1721                 mipspmu.name = "mips/34K";
1722                 mipspmu.general_event_map = &mipsxxcore_event_map;
1723                 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1724                 break;
1725         case CPU_74K:
1726                 mipspmu.name = "mips/74K";
1727                 mipspmu.general_event_map = &mipsxxcore_event_map2;
1728                 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1729                 break;
1730         case CPU_PROAPTIV:
1731                 mipspmu.name = "mips/proAptiv";
1732                 mipspmu.general_event_map = &mipsxxcore_event_map2;
1733                 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1734                 break;
1735         case CPU_P5600:
1736                 mipspmu.name = "mips/P5600";
1737                 mipspmu.general_event_map = &mipsxxcore_event_map2;
1738                 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1739                 break;
1740         case CPU_P6600:
1741                 mipspmu.name = "mips/P6600";
1742                 mipspmu.general_event_map = &mipsxxcore_event_map2;
1743                 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1744                 break;
1745         case CPU_I6400:
1746                 mipspmu.name = "mips/I6400";
1747                 mipspmu.general_event_map = &i6x00_event_map;
1748                 mipspmu.cache_event_map = &i6x00_cache_map;
1749                 break;
1750         case CPU_I6500:
1751                 mipspmu.name = "mips/I6500";
1752                 mipspmu.general_event_map = &i6x00_event_map;
1753                 mipspmu.cache_event_map = &i6x00_cache_map;
1754                 break;
1755         case CPU_1004K:
1756                 mipspmu.name = "mips/1004K";
1757                 mipspmu.general_event_map = &mipsxxcore_event_map;
1758                 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1759                 break;
1760         case CPU_1074K:
1761                 mipspmu.name = "mips/1074K";
1762                 mipspmu.general_event_map = &mipsxxcore_event_map;
1763                 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1764                 break;
1765         case CPU_INTERAPTIV:
1766                 mipspmu.name = "mips/interAptiv";
1767                 mipspmu.general_event_map = &mipsxxcore_event_map;
1768                 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1769                 break;
1770         case CPU_LOONGSON1:
1771                 mipspmu.name = "mips/loongson1";
1772                 mipspmu.general_event_map = &mipsxxcore_event_map;
1773                 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1774                 break;
1775         case CPU_LOONGSON3:
1776                 mipspmu.name = "mips/loongson3";
1777                 mipspmu.general_event_map = &loongson3_event_map;
1778                 mipspmu.cache_event_map = &loongson3_cache_map;
1779                 break;
1780         case CPU_CAVIUM_OCTEON:
1781         case CPU_CAVIUM_OCTEON_PLUS:
1782         case CPU_CAVIUM_OCTEON2:
1783                 mipspmu.name = "octeon";
1784                 mipspmu.general_event_map = &octeon_event_map;
1785                 mipspmu.cache_event_map = &octeon_cache_map;
1786                 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1787                 break;
1788         case CPU_BMIPS5000:
1789                 mipspmu.name = "BMIPS5000";
1790                 mipspmu.general_event_map = &bmips5000_event_map;
1791                 mipspmu.cache_event_map = &bmips5000_cache_map;
1792                 break;
1793         case CPU_XLP:
1794                 mipspmu.name = "xlp";
1795                 mipspmu.general_event_map = &xlp_event_map;
1796                 mipspmu.cache_event_map = &xlp_cache_map;
1797                 mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1798                 break;
1799         default:
1800                 pr_cont("Either hardware does not support performance "
1801                         "counters, or not yet implemented.\n");
1802                 return -ENODEV;
1803         }
1804 
1805         mipspmu.num_counters = counters;
1806         mipspmu.irq = irq;
1807 
1808         if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
1809                 mipspmu.max_period = (1ULL << 63) - 1;
1810                 mipspmu.valid_count = (1ULL << 63) - 1;
1811                 mipspmu.overflow = 1ULL << 63;
1812                 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1813                 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1814                 counter_bits = 64;
1815         } else {
1816                 mipspmu.max_period = (1ULL << 31) - 1;
1817                 mipspmu.valid_count = (1ULL << 31) - 1;
1818                 mipspmu.overflow = 1ULL << 31;
1819                 mipspmu.read_counter = mipsxx_pmu_read_counter;
1820                 mipspmu.write_counter = mipsxx_pmu_write_counter;
1821                 counter_bits = 32;
1822         }
1823 
1824         on_each_cpu(reset_counters, (void *)(long)counters, 1);
1825 
1826         pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1827                 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1828                 irq < 0 ? " (share with timer interrupt)" : "");
1829 
1830         perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1831 
1832         return 0;
1833 }
1834 early_initcall(init_hw_perf_events);
1835 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp