~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/events/msr.c

Version: ~ [ linux-5.2 ] ~ [ linux-5.1.16 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.57 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.132 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.184 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.184 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.69 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #include <linux/perf_event.h>
  2 #include <asm/intel-family.h>
  3 
  4 enum perf_msr_id {
  5         PERF_MSR_TSC                    = 0,
  6         PERF_MSR_APERF                  = 1,
  7         PERF_MSR_MPERF                  = 2,
  8         PERF_MSR_PPERF                  = 3,
  9         PERF_MSR_SMI                    = 4,
 10         PERF_MSR_PTSC                   = 5,
 11         PERF_MSR_IRPERF                 = 6,
 12 
 13         PERF_MSR_EVENT_MAX,
 14 };
 15 
 16 static bool test_aperfmperf(int idx)
 17 {
 18         return boot_cpu_has(X86_FEATURE_APERFMPERF);
 19 }
 20 
 21 static bool test_ptsc(int idx)
 22 {
 23         return boot_cpu_has(X86_FEATURE_PTSC);
 24 }
 25 
 26 static bool test_irperf(int idx)
 27 {
 28         return boot_cpu_has(X86_FEATURE_IRPERF);
 29 }
 30 
 31 static bool test_intel(int idx)
 32 {
 33         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
 34             boot_cpu_data.x86 != 6)
 35                 return false;
 36 
 37         switch (boot_cpu_data.x86_model) {
 38         case INTEL_FAM6_NEHALEM:
 39         case INTEL_FAM6_NEHALEM_G:
 40         case INTEL_FAM6_NEHALEM_EP:
 41         case INTEL_FAM6_NEHALEM_EX:
 42 
 43         case INTEL_FAM6_WESTMERE:
 44         case INTEL_FAM6_WESTMERE_EP:
 45         case INTEL_FAM6_WESTMERE_EX:
 46 
 47         case INTEL_FAM6_SANDYBRIDGE:
 48         case INTEL_FAM6_SANDYBRIDGE_X:
 49 
 50         case INTEL_FAM6_IVYBRIDGE:
 51         case INTEL_FAM6_IVYBRIDGE_X:
 52 
 53         case INTEL_FAM6_HASWELL_CORE:
 54         case INTEL_FAM6_HASWELL_X:
 55         case INTEL_FAM6_HASWELL_ULT:
 56         case INTEL_FAM6_HASWELL_GT3E:
 57 
 58         case INTEL_FAM6_BROADWELL_CORE:
 59         case INTEL_FAM6_BROADWELL_XEON_D:
 60         case INTEL_FAM6_BROADWELL_GT3E:
 61         case INTEL_FAM6_BROADWELL_X:
 62 
 63         case INTEL_FAM6_ATOM_SILVERMONT1:
 64         case INTEL_FAM6_ATOM_SILVERMONT2:
 65         case INTEL_FAM6_ATOM_AIRMONT:
 66                 if (idx == PERF_MSR_SMI)
 67                         return true;
 68                 break;
 69 
 70         case INTEL_FAM6_SKYLAKE_MOBILE:
 71         case INTEL_FAM6_SKYLAKE_DESKTOP:
 72         case INTEL_FAM6_SKYLAKE_X:
 73         case INTEL_FAM6_KABYLAKE_MOBILE:
 74         case INTEL_FAM6_KABYLAKE_DESKTOP:
 75                 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
 76                         return true;
 77                 break;
 78         }
 79 
 80         return false;
 81 }
 82 
 83 struct perf_msr {
 84         u64     msr;
 85         struct  perf_pmu_events_attr *attr;
 86         bool    (*test)(int idx);
 87 };
 88 
 89 PMU_EVENT_ATTR_STRING(tsc,    evattr_tsc,    "event=0x00");
 90 PMU_EVENT_ATTR_STRING(aperf,  evattr_aperf,  "event=0x01");
 91 PMU_EVENT_ATTR_STRING(mperf,  evattr_mperf,  "event=0x02");
 92 PMU_EVENT_ATTR_STRING(pperf,  evattr_pperf,  "event=0x03");
 93 PMU_EVENT_ATTR_STRING(smi,    evattr_smi,    "event=0x04");
 94 PMU_EVENT_ATTR_STRING(ptsc,   evattr_ptsc,   "event=0x05");
 95 PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
 96 
 97 static struct perf_msr msr[] = {
 98         [PERF_MSR_TSC]    = { 0,                &evattr_tsc,    NULL,            },
 99         [PERF_MSR_APERF]  = { MSR_IA32_APERF,   &evattr_aperf,  test_aperfmperf, },
100         [PERF_MSR_MPERF]  = { MSR_IA32_MPERF,   &evattr_mperf,  test_aperfmperf, },
101         [PERF_MSR_PPERF]  = { MSR_PPERF,        &evattr_pperf,  test_intel,      },
102         [PERF_MSR_SMI]    = { MSR_SMI_COUNT,    &evattr_smi,    test_intel,      },
103         [PERF_MSR_PTSC]   = { MSR_F15H_PTSC,    &evattr_ptsc,   test_ptsc,       },
104         [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF,  &evattr_irperf, test_irperf,     },
105 };
106 
107 static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
108         NULL,
109 };
110 
111 static struct attribute_group events_attr_group = {
112         .name = "events",
113         .attrs = events_attrs,
114 };
115 
116 PMU_FORMAT_ATTR(event, "config:0-63");
117 static struct attribute *format_attrs[] = {
118         &format_attr_event.attr,
119         NULL,
120 };
121 static struct attribute_group format_attr_group = {
122         .name = "format",
123         .attrs = format_attrs,
124 };
125 
126 static const struct attribute_group *attr_groups[] = {
127         &events_attr_group,
128         &format_attr_group,
129         NULL,
130 };
131 
132 static int msr_event_init(struct perf_event *event)
133 {
134         u64 cfg = event->attr.config;
135 
136         if (event->attr.type != event->pmu->type)
137                 return -ENOENT;
138 
139         if (cfg >= PERF_MSR_EVENT_MAX)
140                 return -EINVAL;
141 
142         /* unsupported modes and filters */
143         if (event->attr.exclude_user   ||
144             event->attr.exclude_kernel ||
145             event->attr.exclude_hv     ||
146             event->attr.exclude_idle   ||
147             event->attr.exclude_host   ||
148             event->attr.exclude_guest  ||
149             event->attr.sample_period) /* no sampling */
150                 return -EINVAL;
151 
152         if (!msr[cfg].attr)
153                 return -EINVAL;
154 
155         event->hw.idx = -1;
156         event->hw.event_base = msr[cfg].msr;
157         event->hw.config = cfg;
158 
159         return 0;
160 }
161 
162 static inline u64 msr_read_counter(struct perf_event *event)
163 {
164         u64 now;
165 
166         if (event->hw.event_base)
167                 rdmsrl(event->hw.event_base, now);
168         else
169                 rdtscll(now);
170 
171         return now;
172 }
173 static void msr_event_update(struct perf_event *event)
174 {
175         u64 prev, now;
176         s64 delta;
177 
178         /* Careful, an NMI might modify the previous event value. */
179 again:
180         prev = local64_read(&event->hw.prev_count);
181         now = msr_read_counter(event);
182 
183         if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
184                 goto again;
185 
186         delta = now - prev;
187         if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
188                 delta = sign_extend64(delta, 31);
189 
190         local64_add(delta, &event->count);
191 }
192 
193 static void msr_event_start(struct perf_event *event, int flags)
194 {
195         u64 now;
196 
197         now = msr_read_counter(event);
198         local64_set(&event->hw.prev_count, now);
199 }
200 
201 static void msr_event_stop(struct perf_event *event, int flags)
202 {
203         msr_event_update(event);
204 }
205 
206 static void msr_event_del(struct perf_event *event, int flags)
207 {
208         msr_event_stop(event, PERF_EF_UPDATE);
209 }
210 
211 static int msr_event_add(struct perf_event *event, int flags)
212 {
213         if (flags & PERF_EF_START)
214                 msr_event_start(event, flags);
215 
216         return 0;
217 }
218 
219 static struct pmu pmu_msr = {
220         .task_ctx_nr    = perf_sw_context,
221         .attr_groups    = attr_groups,
222         .event_init     = msr_event_init,
223         .add            = msr_event_add,
224         .del            = msr_event_del,
225         .start          = msr_event_start,
226         .stop           = msr_event_stop,
227         .read           = msr_event_update,
228         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
229 };
230 
231 static int __init msr_init(void)
232 {
233         int i, j = 0;
234 
235         if (!boot_cpu_has(X86_FEATURE_TSC)) {
236                 pr_cont("no MSR PMU driver.\n");
237                 return 0;
238         }
239 
240         /* Probe the MSRs. */
241         for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
242                 u64 val;
243 
244                 /*
245                  * Virt sucks arse; you cannot tell if a R/O MSR is present :/
246                  */
247                 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
248                         msr[i].attr = NULL;
249         }
250 
251         /* List remaining MSRs in the sysfs attrs. */
252         for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
253                 if (msr[i].attr)
254                         events_attrs[j++] = &msr[i].attr->attr.attr;
255         }
256         events_attrs[j] = NULL;
257 
258         perf_pmu_register(&pmu_msr, "msr", -1);
259 
260         return 0;
261 }
262 device_initcall(msr_init);
263 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp