~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/msr.h

Version: ~ [ linux-5.2 ] ~ [ linux-5.1.16 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.57 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.132 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.184 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.184 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.69 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _ASM_X86_MSR_H
  2 #define _ASM_X86_MSR_H
  3 
  4 #include "msr-index.h"
  5 
  6 #ifndef __ASSEMBLY__
  7 
  8 #include <asm/asm.h>
  9 #include <asm/errno.h>
 10 #include <asm/cpumask.h>
 11 #include <uapi/asm/msr.h>
 12 
 13 struct msr {
 14         union {
 15                 struct {
 16                         u32 l;
 17                         u32 h;
 18                 };
 19                 u64 q;
 20         };
 21 };
 22 
 23 struct msr_info {
 24         u32 msr_no;
 25         struct msr reg;
 26         struct msr *msrs;
 27         int err;
 28 };
 29 
 30 struct msr_regs_info {
 31         u32 *regs;
 32         int err;
 33 };
 34 
 35 struct saved_msr {
 36         bool valid;
 37         struct msr_info info;
 38 };
 39 
 40 struct saved_msrs {
 41         unsigned int num;
 42         struct saved_msr *array;
 43 };
 44 
 45 /*
 46  * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
 47  * constraint has different meanings. For i386, "A" means exactly
 48  * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
 49  * it means rax *or* rdx.
 50  */
 51 #ifdef CONFIG_X86_64
 52 /* Using 64-bit values saves one instruction clearing the high half of low */
 53 #define DECLARE_ARGS(val, low, high)    unsigned long low, high
 54 #define EAX_EDX_VAL(val, low, high)     ((low) | (high) << 32)
 55 #define EAX_EDX_RET(val, low, high)     "=a" (low), "=d" (high)
 56 #else
 57 #define DECLARE_ARGS(val, low, high)    unsigned long long val
 58 #define EAX_EDX_VAL(val, low, high)     (val)
 59 #define EAX_EDX_RET(val, low, high)     "=A" (val)
 60 #endif
 61 
 62 #ifdef CONFIG_TRACEPOINTS
 63 /*
 64  * Be very careful with includes. This header is prone to include loops.
 65  */
 66 #include <asm/atomic.h>
 67 #include <linux/tracepoint-defs.h>
 68 
 69 extern struct tracepoint __tracepoint_read_msr;
 70 extern struct tracepoint __tracepoint_write_msr;
 71 extern struct tracepoint __tracepoint_rdpmc;
 72 #define msr_tracepoint_active(t) static_key_false(&(t).key)
 73 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
 74 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
 75 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
 76 #else
 77 #define msr_tracepoint_active(t) false
 78 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
 79 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
 80 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
 81 #endif
 82 
 83 static inline unsigned long long native_read_msr(unsigned int msr)
 84 {
 85         DECLARE_ARGS(val, low, high);
 86 
 87         asm volatile("1: rdmsr\n"
 88                      "2:\n"
 89                      _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
 90                      : EAX_EDX_RET(val, low, high) : "c" (msr));
 91         if (msr_tracepoint_active(__tracepoint_read_msr))
 92                 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
 93         return EAX_EDX_VAL(val, low, high);
 94 }
 95 
 96 static inline unsigned long long native_read_msr_safe(unsigned int msr,
 97                                                       int *err)
 98 {
 99         DECLARE_ARGS(val, low, high);
100 
101         asm volatile("2: rdmsr ; xor %[err],%[err]\n"
102                      "1:\n\t"
103                      ".section .fixup,\"ax\"\n\t"
104                      "3: mov %[fault],%[err]\n\t"
105                      "xorl %%eax, %%eax\n\t"
106                      "xorl %%edx, %%edx\n\t"
107                      "jmp 1b\n\t"
108                      ".previous\n\t"
109                      _ASM_EXTABLE(2b, 3b)
110                      : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
111                      : "c" (msr), [fault] "i" (-EIO));
112         if (msr_tracepoint_active(__tracepoint_read_msr))
113                 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
114         return EAX_EDX_VAL(val, low, high);
115 }
116 
117 /* Can be uninlined because referenced by paravirt */
118 static inline void notrace
119 __native_write_msr_notrace(unsigned int msr, u32 low, u32 high)
120 {
121         asm volatile("1: wrmsr\n"
122                      "2:\n"
123                      _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
124                      : : "c" (msr), "a"(low), "d" (high) : "memory");
125 }
126 
127 /* Can be uninlined because referenced by paravirt */
128 static inline void notrace
129 native_write_msr(unsigned int msr, u32 low, u32 high)
130 {
131         __native_write_msr_notrace(msr, low, high);
132         if (msr_tracepoint_active(__tracepoint_write_msr))
133                 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
134 }
135 
136 static inline void
137 wrmsr_notrace(unsigned int msr, u32 low, u32 high)
138 {
139         __native_write_msr_notrace(msr, low, high);
140 }
141 
142 /* Can be uninlined because referenced by paravirt */
143 static inline int notrace
144 native_write_msr_safe(unsigned int msr, u32 low, u32 high)
145 {
146         int err;
147 
148         asm volatile("2: wrmsr ; xor %[err],%[err]\n"
149                      "1:\n\t"
150                      ".section .fixup,\"ax\"\n\t"
151                      "3:  mov %[fault],%[err] ; jmp 1b\n\t"
152                      ".previous\n\t"
153                      _ASM_EXTABLE(2b, 3b)
154                      : [err] "=a" (err)
155                      : "c" (msr), "" (low), "d" (high),
156                        [fault] "i" (-EIO)
157                      : "memory");
158         if (msr_tracepoint_active(__tracepoint_write_msr))
159                 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
160         return err;
161 }
162 
163 extern int rdmsr_safe_regs(u32 regs[8]);
164 extern int wrmsr_safe_regs(u32 regs[8]);
165 
166 /**
167  * rdtsc() - returns the current TSC without ordering constraints
168  *
169  * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
170  * only ordering constraint it supplies is the ordering implied by
171  * "asm volatile": it will put the RDTSC in the place you expect.  The
172  * CPU can and will speculatively execute that RDTSC, though, so the
173  * results can be non-monotonic if compared on different CPUs.
174  */
175 static __always_inline unsigned long long rdtsc(void)
176 {
177         DECLARE_ARGS(val, low, high);
178 
179         asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
180 
181         return EAX_EDX_VAL(val, low, high);
182 }
183 
184 /**
185  * rdtsc_ordered() - read the current TSC in program order
186  *
187  * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
188  * It is ordered like a load to a global in-memory counter.  It should
189  * be impossible to observe non-monotonic rdtsc_unordered() behavior
190  * across multiple CPUs as long as the TSC is synced.
191  */
192 static __always_inline unsigned long long rdtsc_ordered(void)
193 {
194         /*
195          * The RDTSC instruction is not ordered relative to memory
196          * access.  The Intel SDM and the AMD APM are both vague on this
197          * point, but empirically an RDTSC instruction can be
198          * speculatively executed before prior loads.  An RDTSC
199          * immediately after an appropriate barrier appears to be
200          * ordered as a normal load, that is, it provides the same
201          * ordering guarantees as reading from a global memory location
202          * that some other imaginary CPU is updating continuously with a
203          * time stamp.
204          */
205         alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
206                           "lfence", X86_FEATURE_LFENCE_RDTSC);
207         return rdtsc();
208 }
209 
210 /* Deprecated, keep it for a cycle for easier merging: */
211 #define rdtscll(now)    do { (now) = rdtsc_ordered(); } while (0)
212 
213 static inline unsigned long long native_read_pmc(int counter)
214 {
215         DECLARE_ARGS(val, low, high);
216 
217         asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
218         if (msr_tracepoint_active(__tracepoint_rdpmc))
219                 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
220         return EAX_EDX_VAL(val, low, high);
221 }
222 
223 #ifdef CONFIG_PARAVIRT
224 #include <asm/paravirt.h>
225 #else
226 #include <linux/errno.h>
227 /*
228  * Access to machine-specific registers (available on 586 and better only)
229  * Note: the rd* operations modify the parameters directly (without using
230  * pointer indirection), this allows gcc to optimize better
231  */
232 
233 #define rdmsr(msr, low, high)                                   \
234 do {                                                            \
235         u64 __val = native_read_msr((msr));                     \
236         (void)((low) = (u32)__val);                             \
237         (void)((high) = (u32)(__val >> 32));                    \
238 } while (0)
239 
240 static inline void wrmsr(unsigned int msr, u32 low, u32 high)
241 {
242         native_write_msr(msr, low, high);
243 }
244 
245 #define rdmsrl(msr, val)                        \
246         ((val) = native_read_msr((msr)))
247 
248 static inline void wrmsrl(unsigned int msr, u64 val)
249 {
250         native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
251 }
252 
253 /* wrmsr with exception handling */
254 static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
255 {
256         return native_write_msr_safe(msr, low, high);
257 }
258 
259 /* rdmsr with exception handling */
260 #define rdmsr_safe(msr, low, high)                              \
261 ({                                                              \
262         int __err;                                              \
263         u64 __val = native_read_msr_safe((msr), &__err);        \
264         (*low) = (u32)__val;                                    \
265         (*high) = (u32)(__val >> 32);                           \
266         __err;                                                  \
267 })
268 
269 static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
270 {
271         int err;
272 
273         *p = native_read_msr_safe(msr, &err);
274         return err;
275 }
276 
277 #define rdpmc(counter, low, high)                       \
278 do {                                                    \
279         u64 _l = native_read_pmc((counter));            \
280         (low)  = (u32)_l;                               \
281         (high) = (u32)(_l >> 32);                       \
282 } while (0)
283 
284 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
285 
286 #endif  /* !CONFIG_PARAVIRT */
287 
288 /*
289  * 64-bit version of wrmsr_safe():
290  */
291 static inline int wrmsrl_safe(u32 msr, u64 val)
292 {
293         return wrmsr_safe(msr, (u32)val,  (u32)(val >> 32));
294 }
295 
296 #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
297 
298 #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
299 
300 struct msr *msrs_alloc(void);
301 void msrs_free(struct msr *msrs);
302 int msr_set_bit(u32 msr, u8 bit);
303 int msr_clear_bit(u32 msr, u8 bit);
304 
305 #ifdef CONFIG_SMP
306 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
307 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
308 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
309 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
310 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
311 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
312 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
313 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
314 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
315 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
316 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
317 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
318 #else  /*  CONFIG_SMP  */
319 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
320 {
321         rdmsr(msr_no, *l, *h);
322         return 0;
323 }
324 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
325 {
326         wrmsr(msr_no, l, h);
327         return 0;
328 }
329 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
330 {
331         rdmsrl(msr_no, *q);
332         return 0;
333 }
334 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
335 {
336         wrmsrl(msr_no, q);
337         return 0;
338 }
339 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
340                                 struct msr *msrs)
341 {
342         rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
343 }
344 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
345                                 struct msr *msrs)
346 {
347         wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
348 }
349 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
350                                     u32 *l, u32 *h)
351 {
352         return rdmsr_safe(msr_no, l, h);
353 }
354 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
355 {
356         return wrmsr_safe(msr_no, l, h);
357 }
358 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
359 {
360         return rdmsrl_safe(msr_no, q);
361 }
362 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
363 {
364         return wrmsrl_safe(msr_no, q);
365 }
366 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
367 {
368         return rdmsr_safe_regs(regs);
369 }
370 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
371 {
372         return wrmsr_safe_regs(regs);
373 }
374 #endif  /* CONFIG_SMP */
375 #endif /* __ASSEMBLY__ */
376 #endif /* _ASM_X86_MSR_H */
377 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp