~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/processor.h

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_PROCESSOR_H
  3 #define _ASM_X86_PROCESSOR_H
  4 
  5 #include <asm/processor-flags.h>
  6 
  7 /* Forward declaration, a strange C thing */
  8 struct task_struct;
  9 struct mm_struct;
 10 struct io_bitmap;
 11 struct vm86;
 12 
 13 #include <asm/math_emu.h>
 14 #include <asm/segment.h>
 15 #include <asm/types.h>
 16 #include <uapi/asm/sigcontext.h>
 17 #include <asm/current.h>
 18 #include <asm/cpufeatures.h>
 19 #include <asm/page.h>
 20 #include <asm/pgtable_types.h>
 21 #include <asm/percpu.h>
 22 #include <asm/msr.h>
 23 #include <asm/desc_defs.h>
 24 #include <asm/nops.h>
 25 #include <asm/special_insns.h>
 26 #include <asm/fpu/types.h>
 27 #include <asm/unwind_hints.h>
 28 #include <asm/vmxfeatures.h>
 29 #include <asm/vdso/processor.h>
 30 
 31 #include <linux/personality.h>
 32 #include <linux/cache.h>
 33 #include <linux/threads.h>
 34 #include <linux/math64.h>
 35 #include <linux/err.h>
 36 #include <linux/irqflags.h>
 37 #include <linux/mem_encrypt.h>
 38 
 39 /*
 40  * We handle most unaligned accesses in hardware.  On the other hand
 41  * unaligned DMA can be quite expensive on some Nehalem processors.
 42  *
 43  * Based on this we disable the IP header alignment in network drivers.
 44  */
 45 #define NET_IP_ALIGN    0
 46 
 47 #define HBP_NUM 4
 48 
 49 /*
 50  * These alignment constraints are for performance in the vSMP case,
 51  * but in the task_struct case we must also meet hardware imposed
 52  * alignment requirements of the FPU state:
 53  */
 54 #ifdef CONFIG_X86_VSMP
 55 # define ARCH_MIN_TASKALIGN             (1 << INTERNODE_CACHE_SHIFT)
 56 # define ARCH_MIN_MMSTRUCT_ALIGN        (1 << INTERNODE_CACHE_SHIFT)
 57 #else
 58 # define ARCH_MIN_TASKALIGN             __alignof__(union fpregs_state)
 59 # define ARCH_MIN_MMSTRUCT_ALIGN        0
 60 #endif
 61 
 62 enum tlb_infos {
 63         ENTRIES,
 64         NR_INFO
 65 };
 66 
 67 extern u16 __read_mostly tlb_lli_4k[NR_INFO];
 68 extern u16 __read_mostly tlb_lli_2m[NR_INFO];
 69 extern u16 __read_mostly tlb_lli_4m[NR_INFO];
 70 extern u16 __read_mostly tlb_lld_4k[NR_INFO];
 71 extern u16 __read_mostly tlb_lld_2m[NR_INFO];
 72 extern u16 __read_mostly tlb_lld_4m[NR_INFO];
 73 extern u16 __read_mostly tlb_lld_1g[NR_INFO];
 74 
 75 /*
 76  *  CPU type and hardware bug flags. Kept separately for each CPU.
 77  *  Members of this structure are referenced in head_32.S, so think twice
 78  *  before touching them. [mj]
 79  */
 80 
 81 struct cpuinfo_x86 {
 82         __u8                    x86;            /* CPU family */
 83         __u8                    x86_vendor;     /* CPU vendor */
 84         __u8                    x86_model;
 85         __u8                    x86_stepping;
 86 #ifdef CONFIG_X86_64
 87         /* Number of 4K pages in DTLB/ITLB combined(in pages): */
 88         int                     x86_tlbsize;
 89 #endif
 90 #ifdef CONFIG_X86_VMX_FEATURE_NAMES
 91         __u32                   vmx_capability[NVMXINTS];
 92 #endif
 93         __u8                    x86_virt_bits;
 94         __u8                    x86_phys_bits;
 95         /* CPUID returned core id bits: */
 96         __u8                    x86_coreid_bits;
 97         __u8                    cu_id;
 98         /* Max extended CPUID function supported: */
 99         __u32                   extended_cpuid_level;
100         /* Maximum supported CPUID level, -1=no CPUID: */
101         int                     cpuid_level;
102         /*
103          * Align to size of unsigned long because the x86_capability array
104          * is passed to bitops which require the alignment. Use unnamed
105          * union to enforce the array is aligned to size of unsigned long.
106          */
107         union {
108                 __u32           x86_capability[NCAPINTS + NBUGINTS];
109                 unsigned long   x86_capability_alignment;
110         };
111         char                    x86_vendor_id[16];
112         char                    x86_model_id[64];
113         /* in KB - valid for CPUS which support this call: */
114         unsigned int            x86_cache_size;
115         int                     x86_cache_alignment;    /* In bytes */
116         /* Cache QoS architectural values, valid only on the BSP: */
117         int                     x86_cache_max_rmid;     /* max index */
118         int                     x86_cache_occ_scale;    /* scale to bytes */
119         int                     x86_cache_mbm_width_offset;
120         int                     x86_power;
121         unsigned long           loops_per_jiffy;
122         /* cpuid returned max cores value: */
123         u16                     x86_max_cores;
124         u16                     apicid;
125         u16                     initial_apicid;
126         u16                     x86_clflush_size;
127         /* number of cores as seen by the OS: */
128         u16                     booted_cores;
129         /* Physical processor id: */
130         u16                     phys_proc_id;
131         /* Logical processor id: */
132         u16                     logical_proc_id;
133         /* Core id: */
134         u16                     cpu_core_id;
135         u16                     cpu_die_id;
136         u16                     logical_die_id;
137         /* Index into per_cpu list: */
138         u16                     cpu_index;
139         u32                     microcode;
140         /* Address space bits used by the cache internally */
141         u8                      x86_cache_bits;
142         unsigned                initialized : 1;
143 } __randomize_layout;
144 
145 struct cpuid_regs {
146         u32 eax, ebx, ecx, edx;
147 };
148 
149 enum cpuid_regs_idx {
150         CPUID_EAX = 0,
151         CPUID_EBX,
152         CPUID_ECX,
153         CPUID_EDX,
154 };
155 
156 #define X86_VENDOR_INTEL        0
157 #define X86_VENDOR_CYRIX        1
158 #define X86_VENDOR_AMD          2
159 #define X86_VENDOR_UMC          3
160 #define X86_VENDOR_CENTAUR      5
161 #define X86_VENDOR_TRANSMETA    7
162 #define X86_VENDOR_NSC          8
163 #define X86_VENDOR_HYGON        9
164 #define X86_VENDOR_ZHAOXIN      10
165 #define X86_VENDOR_NUM          11
166 
167 #define X86_VENDOR_UNKNOWN      0xff
168 
169 /*
170  * capabilities of CPUs
171  */
172 extern struct cpuinfo_x86       boot_cpu_data;
173 extern struct cpuinfo_x86       new_cpu_data;
174 
175 extern __u32                    cpu_caps_cleared[NCAPINTS + NBUGINTS];
176 extern __u32                    cpu_caps_set[NCAPINTS + NBUGINTS];
177 
178 #ifdef CONFIG_SMP
179 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
180 #define cpu_data(cpu)           per_cpu(cpu_info, cpu)
181 #else
182 #define cpu_info                boot_cpu_data
183 #define cpu_data(cpu)           boot_cpu_data
184 #endif
185 
186 extern const struct seq_operations cpuinfo_op;
187 
188 #define cache_line_size()       (boot_cpu_data.x86_cache_alignment)
189 
190 extern void cpu_detect(struct cpuinfo_x86 *c);
191 
192 static inline unsigned long long l1tf_pfn_limit(void)
193 {
194         return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
195 }
196 
197 extern void early_cpu_init(void);
198 extern void identify_boot_cpu(void);
199 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
200 extern void print_cpu_info(struct cpuinfo_x86 *);
201 void print_cpu_msr(struct cpuinfo_x86 *);
202 
203 #ifdef CONFIG_X86_32
204 extern int have_cpuid_p(void);
205 #else
206 static inline int have_cpuid_p(void)
207 {
208         return 1;
209 }
210 #endif
211 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
212                                 unsigned int *ecx, unsigned int *edx)
213 {
214         /* ecx is often an input as well as an output. */
215         asm volatile("cpuid"
216             : "=a" (*eax),
217               "=b" (*ebx),
218               "=c" (*ecx),
219               "=d" (*edx)
220             : "" (*eax), "2" (*ecx)
221             : "memory");
222 }
223 
224 #define native_cpuid_reg(reg)                                   \
225 static inline unsigned int native_cpuid_##reg(unsigned int op)  \
226 {                                                               \
227         unsigned int eax = op, ebx, ecx = 0, edx;               \
228                                                                 \
229         native_cpuid(&eax, &ebx, &ecx, &edx);                   \
230                                                                 \
231         return reg;                                             \
232 }
233 
234 /*
235  * Native CPUID functions returning a single datum.
236  */
237 native_cpuid_reg(eax)
238 native_cpuid_reg(ebx)
239 native_cpuid_reg(ecx)
240 native_cpuid_reg(edx)
241 
242 /*
243  * Friendlier CR3 helpers.
244  */
245 static inline unsigned long read_cr3_pa(void)
246 {
247         return __read_cr3() & CR3_ADDR_MASK;
248 }
249 
250 static inline unsigned long native_read_cr3_pa(void)
251 {
252         return __native_read_cr3() & CR3_ADDR_MASK;
253 }
254 
255 static inline void load_cr3(pgd_t *pgdir)
256 {
257         write_cr3(__sme_pa(pgdir));
258 }
259 
260 /*
261  * Note that while the legacy 'TSS' name comes from 'Task State Segment',
262  * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
263  * unrelated to the task-switch mechanism:
264  */
265 #ifdef CONFIG_X86_32
266 /* This is the TSS defined by the hardware. */
267 struct x86_hw_tss {
268         unsigned short          back_link, __blh;
269         unsigned long           sp0;
270         unsigned short          ss0, __ss0h;
271         unsigned long           sp1;
272 
273         /*
274          * We don't use ring 1, so ss1 is a convenient scratch space in
275          * the same cacheline as sp0.  We use ss1 to cache the value in
276          * MSR_IA32_SYSENTER_CS.  When we context switch
277          * MSR_IA32_SYSENTER_CS, we first check if the new value being
278          * written matches ss1, and, if it's not, then we wrmsr the new
279          * value and update ss1.
280          *
281          * The only reason we context switch MSR_IA32_SYSENTER_CS is
282          * that we set it to zero in vm86 tasks to avoid corrupting the
283          * stack if we were to go through the sysenter path from vm86
284          * mode.
285          */
286         unsigned short          ss1;    /* MSR_IA32_SYSENTER_CS */
287 
288         unsigned short          __ss1h;
289         unsigned long           sp2;
290         unsigned short          ss2, __ss2h;
291         unsigned long           __cr3;
292         unsigned long           ip;
293         unsigned long           flags;
294         unsigned long           ax;
295         unsigned long           cx;
296         unsigned long           dx;
297         unsigned long           bx;
298         unsigned long           sp;
299         unsigned long           bp;
300         unsigned long           si;
301         unsigned long           di;
302         unsigned short          es, __esh;
303         unsigned short          cs, __csh;
304         unsigned short          ss, __ssh;
305         unsigned short          ds, __dsh;
306         unsigned short          fs, __fsh;
307         unsigned short          gs, __gsh;
308         unsigned short          ldt, __ldth;
309         unsigned short          trace;
310         unsigned short          io_bitmap_base;
311 
312 } __attribute__((packed));
313 #else
314 struct x86_hw_tss {
315         u32                     reserved1;
316         u64                     sp0;
317 
318         /*
319          * We store cpu_current_top_of_stack in sp1 so it's always accessible.
320          * Linux does not use ring 1, so sp1 is not otherwise needed.
321          */
322         u64                     sp1;
323 
324         /*
325          * Since Linux does not use ring 2, the 'sp2' slot is unused by
326          * hardware.  entry_SYSCALL_64 uses it as scratch space to stash
327          * the user RSP value.
328          */
329         u64                     sp2;
330 
331         u64                     reserved2;
332         u64                     ist[7];
333         u32                     reserved3;
334         u32                     reserved4;
335         u16                     reserved5;
336         u16                     io_bitmap_base;
337 
338 } __attribute__((packed));
339 #endif
340 
341 /*
342  * IO-bitmap sizes:
343  */
344 #define IO_BITMAP_BITS                  65536
345 #define IO_BITMAP_BYTES                 (IO_BITMAP_BITS / BITS_PER_BYTE)
346 #define IO_BITMAP_LONGS                 (IO_BITMAP_BYTES / sizeof(long))
347 
348 #define IO_BITMAP_OFFSET_VALID_MAP                              \
349         (offsetof(struct tss_struct, io_bitmap.bitmap) -        \
350          offsetof(struct tss_struct, x86_tss))
351 
352 #define IO_BITMAP_OFFSET_VALID_ALL                              \
353         (offsetof(struct tss_struct, io_bitmap.mapall) -        \
354          offsetof(struct tss_struct, x86_tss))
355 
356 #ifdef CONFIG_X86_IOPL_IOPERM
357 /*
358  * sizeof(unsigned long) coming from an extra "long" at the end of the
359  * iobitmap. The limit is inclusive, i.e. the last valid byte.
360  */
361 # define __KERNEL_TSS_LIMIT     \
362         (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
363          sizeof(unsigned long) - 1)
364 #else
365 # define __KERNEL_TSS_LIMIT     \
366         (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
367 #endif
368 
369 /* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
370 #define IO_BITMAP_OFFSET_INVALID        (__KERNEL_TSS_LIMIT + 1)
371 
372 struct entry_stack {
373         char    stack[PAGE_SIZE];
374 };
375 
376 struct entry_stack_page {
377         struct entry_stack stack;
378 } __aligned(PAGE_SIZE);
379 
380 /*
381  * All IO bitmap related data stored in the TSS:
382  */
383 struct x86_io_bitmap {
384         /* The sequence number of the last active bitmap. */
385         u64                     prev_sequence;
386 
387         /*
388          * Store the dirty size of the last io bitmap offender. The next
389          * one will have to do the cleanup as the switch out to a non io
390          * bitmap user will just set x86_tss.io_bitmap_base to a value
391          * outside of the TSS limit. So for sane tasks there is no need to
392          * actually touch the io_bitmap at all.
393          */
394         unsigned int            prev_max;
395 
396         /*
397          * The extra 1 is there because the CPU will access an
398          * additional byte beyond the end of the IO permission
399          * bitmap. The extra byte must be all 1 bits, and must
400          * be within the limit.
401          */
402         unsigned long           bitmap[IO_BITMAP_LONGS + 1];
403 
404         /*
405          * Special I/O bitmap to emulate IOPL(3). All bytes zero,
406          * except the additional byte at the end.
407          */
408         unsigned long           mapall[IO_BITMAP_LONGS + 1];
409 };
410 
411 struct tss_struct {
412         /*
413          * The fixed hardware portion.  This must not cross a page boundary
414          * at risk of violating the SDM's advice and potentially triggering
415          * errata.
416          */
417         struct x86_hw_tss       x86_tss;
418 
419         struct x86_io_bitmap    io_bitmap;
420 } __aligned(PAGE_SIZE);
421 
422 DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
423 
424 /* Per CPU interrupt stacks */
425 struct irq_stack {
426         char            stack[IRQ_STACK_SIZE];
427 } __aligned(IRQ_STACK_SIZE);
428 
429 #ifdef CONFIG_X86_32
430 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
431 #else
432 /* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
433 #define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
434 #endif
435 
436 #ifdef CONFIG_X86_64
437 struct fixed_percpu_data {
438         /*
439          * GCC hardcodes the stack canary as %gs:40.  Since the
440          * irq_stack is the object at %gs:0, we reserve the bottom
441          * 48 bytes of the irq stack for the canary.
442          */
443         char            gs_base[40];
444         unsigned long   stack_canary;
445 };
446 
447 DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
448 DECLARE_INIT_PER_CPU(fixed_percpu_data);
449 
450 static inline unsigned long cpu_kernelmode_gs_base(int cpu)
451 {
452         return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
453 }
454 
455 DECLARE_PER_CPU(void *, hardirq_stack_ptr);
456 DECLARE_PER_CPU(bool, hardirq_stack_inuse);
457 extern asmlinkage void ignore_sysret(void);
458 
459 /* Save actual FS/GS selectors and bases to current->thread */
460 void current_save_fsgs(void);
461 #else   /* X86_64 */
462 #ifdef CONFIG_STACKPROTECTOR
463 /*
464  * Make sure stack canary segment base is cached-aligned:
465  *   "For Intel Atom processors, avoid non zero segment base address
466  *    that is not aligned to cache line boundary at all cost."
467  * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
468  */
469 struct stack_canary {
470         char __pad[20];         /* canary at %gs:20 */
471         unsigned long canary;
472 };
473 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
474 #endif
475 DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
476 DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
477 #endif  /* !X86_64 */
478 
479 extern unsigned int fpu_kernel_xstate_size;
480 extern unsigned int fpu_user_xstate_size;
481 
482 struct perf_event;
483 
484 struct thread_struct {
485         /* Cached TLS descriptors: */
486         struct desc_struct      tls_array[GDT_ENTRY_TLS_ENTRIES];
487 #ifdef CONFIG_X86_32
488         unsigned long           sp0;
489 #endif
490         unsigned long           sp;
491 #ifdef CONFIG_X86_32
492         unsigned long           sysenter_cs;
493 #else
494         unsigned short          es;
495         unsigned short          ds;
496         unsigned short          fsindex;
497         unsigned short          gsindex;
498 #endif
499 
500 #ifdef CONFIG_X86_64
501         unsigned long           fsbase;
502         unsigned long           gsbase;
503 #else
504         /*
505          * XXX: this could presumably be unsigned short.  Alternatively,
506          * 32-bit kernels could be taught to use fsindex instead.
507          */
508         unsigned long fs;
509         unsigned long gs;
510 #endif
511 
512         /* Save middle states of ptrace breakpoints */
513         struct perf_event       *ptrace_bps[HBP_NUM];
514         /* Debug status used for traps, single steps, etc... */
515         unsigned long           virtual_dr6;
516         /* Keep track of the exact dr7 value set by the user */
517         unsigned long           ptrace_dr7;
518         /* Fault info: */
519         unsigned long           cr2;
520         unsigned long           trap_nr;
521         unsigned long           error_code;
522 #ifdef CONFIG_VM86
523         /* Virtual 86 mode info */
524         struct vm86             *vm86;
525 #endif
526         /* IO permissions: */
527         struct io_bitmap        *io_bitmap;
528 
529         /*
530          * IOPL. Priviledge level dependent I/O permission which is
531          * emulated via the I/O bitmap to prevent user space from disabling
532          * interrupts.
533          */
534         unsigned long           iopl_emul;
535 
536         unsigned int            sig_on_uaccess_err:1;
537 
538         /* Floating point and extended processor state */
539         struct fpu              fpu;
540         /*
541          * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
542          * the end.
543          */
544 };
545 
546 /* Whitelist the FPU state from the task_struct for hardened usercopy. */
547 static inline void arch_thread_struct_whitelist(unsigned long *offset,
548                                                 unsigned long *size)
549 {
550         *offset = offsetof(struct thread_struct, fpu.state);
551         *size = fpu_kernel_xstate_size;
552 }
553 
554 static inline void
555 native_load_sp0(unsigned long sp0)
556 {
557         this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
558 }
559 
560 static __always_inline void native_swapgs(void)
561 {
562 #ifdef CONFIG_X86_64
563         asm volatile("swapgs" ::: "memory");
564 #endif
565 }
566 
567 static inline unsigned long current_top_of_stack(void)
568 {
569         /*
570          *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
571          *  and around vm86 mode and sp0 on x86_64 is special because of the
572          *  entry trampoline.
573          */
574         return this_cpu_read_stable(cpu_current_top_of_stack);
575 }
576 
577 static inline bool on_thread_stack(void)
578 {
579         return (unsigned long)(current_top_of_stack() -
580                                current_stack_pointer) < THREAD_SIZE;
581 }
582 
583 #ifdef CONFIG_PARAVIRT_XXL
584 #include <asm/paravirt.h>
585 #else
586 #define __cpuid                 native_cpuid
587 
588 static inline void load_sp0(unsigned long sp0)
589 {
590         native_load_sp0(sp0);
591 }
592 
593 #endif /* CONFIG_PARAVIRT_XXL */
594 
595 /* Free all resources held by a thread. */
596 extern void release_thread(struct task_struct *);
597 
598 unsigned long get_wchan(struct task_struct *p);
599 
600 /*
601  * Generic CPUID function
602  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
603  * resulting in stale register contents being returned.
604  */
605 static inline void cpuid(unsigned int op,
606                          unsigned int *eax, unsigned int *ebx,
607                          unsigned int *ecx, unsigned int *edx)
608 {
609         *eax = op;
610         *ecx = 0;
611         __cpuid(eax, ebx, ecx, edx);
612 }
613 
614 /* Some CPUID calls want 'count' to be placed in ecx */
615 static inline void cpuid_count(unsigned int op, int count,
616                                unsigned int *eax, unsigned int *ebx,
617                                unsigned int *ecx, unsigned int *edx)
618 {
619         *eax = op;
620         *ecx = count;
621         __cpuid(eax, ebx, ecx, edx);
622 }
623 
624 /*
625  * CPUID functions returning a single datum
626  */
627 static inline unsigned int cpuid_eax(unsigned int op)
628 {
629         unsigned int eax, ebx, ecx, edx;
630 
631         cpuid(op, &eax, &ebx, &ecx, &edx);
632 
633         return eax;
634 }
635 
636 static inline unsigned int cpuid_ebx(unsigned int op)
637 {
638         unsigned int eax, ebx, ecx, edx;
639 
640         cpuid(op, &eax, &ebx, &ecx, &edx);
641 
642         return ebx;
643 }
644 
645 static inline unsigned int cpuid_ecx(unsigned int op)
646 {
647         unsigned int eax, ebx, ecx, edx;
648 
649         cpuid(op, &eax, &ebx, &ecx, &edx);
650 
651         return ecx;
652 }
653 
654 static inline unsigned int cpuid_edx(unsigned int op)
655 {
656         unsigned int eax, ebx, ecx, edx;
657 
658         cpuid(op, &eax, &ebx, &ecx, &edx);
659 
660         return edx;
661 }
662 
663 extern void select_idle_routine(const struct cpuinfo_x86 *c);
664 extern void amd_e400_c1e_apic_setup(void);
665 
666 extern unsigned long            boot_option_idle_override;
667 
668 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
669                          IDLE_POLL};
670 
671 extern void enable_sep_cpu(void);
672 extern int sysenter_setup(void);
673 
674 
675 /* Defined in head.S */
676 extern struct desc_ptr          early_gdt_descr;
677 
678 extern void switch_to_new_gdt(int);
679 extern void load_direct_gdt(int);
680 extern void load_fixmap_gdt(int);
681 extern void load_percpu_segment(int);
682 extern void cpu_init(void);
683 extern void cpu_init_exception_handling(void);
684 extern void cr4_init(void);
685 
686 static inline unsigned long get_debugctlmsr(void)
687 {
688         unsigned long debugctlmsr = 0;
689 
690 #ifndef CONFIG_X86_DEBUGCTLMSR
691         if (boot_cpu_data.x86 < 6)
692                 return 0;
693 #endif
694         rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
695 
696         return debugctlmsr;
697 }
698 
699 static inline void update_debugctlmsr(unsigned long debugctlmsr)
700 {
701 #ifndef CONFIG_X86_DEBUGCTLMSR
702         if (boot_cpu_data.x86 < 6)
703                 return;
704 #endif
705         wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
706 }
707 
708 extern void set_task_blockstep(struct task_struct *task, bool on);
709 
710 /* Boot loader type from the setup header: */
711 extern int                      bootloader_type;
712 extern int                      bootloader_version;
713 
714 extern char                     ignore_fpu_irq;
715 
716 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
717 #define ARCH_HAS_PREFETCHW
718 #define ARCH_HAS_SPINLOCK_PREFETCH
719 
720 #ifdef CONFIG_X86_32
721 # define BASE_PREFETCH          ""
722 # define ARCH_HAS_PREFETCH
723 #else
724 # define BASE_PREFETCH          "prefetcht0 %P1"
725 #endif
726 
727 /*
728  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
729  *
730  * It's not worth to care about 3dnow prefetches for the K6
731  * because they are microcoded there and very slow.
732  */
733 static inline void prefetch(const void *x)
734 {
735         alternative_input(BASE_PREFETCH, "prefetchnta %P1",
736                           X86_FEATURE_XMM,
737                           "m" (*(const char *)x));
738 }
739 
740 /*
741  * 3dnow prefetch to get an exclusive cache line.
742  * Useful for spinlocks to avoid one state transition in the
743  * cache coherency protocol:
744  */
745 static __always_inline void prefetchw(const void *x)
746 {
747         alternative_input(BASE_PREFETCH, "prefetchw %P1",
748                           X86_FEATURE_3DNOWPREFETCH,
749                           "m" (*(const char *)x));
750 }
751 
752 static inline void spin_lock_prefetch(const void *x)
753 {
754         prefetchw(x);
755 }
756 
757 #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
758                            TOP_OF_KERNEL_STACK_PADDING)
759 
760 #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
761 
762 #define task_pt_regs(task) \
763 ({                                                                      \
764         unsigned long __ptr = (unsigned long)task_stack_page(task);     \
765         __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;             \
766         ((struct pt_regs *)__ptr) - 1;                                  \
767 })
768 
769 #ifdef CONFIG_X86_32
770 #define INIT_THREAD  {                                                    \
771         .sp0                    = TOP_OF_INIT_STACK,                      \
772         .sysenter_cs            = __KERNEL_CS,                            \
773 }
774 
775 #define KSTK_ESP(task)          (task_pt_regs(task)->sp)
776 
777 #else
778 #define INIT_THREAD { }
779 
780 extern unsigned long KSTK_ESP(struct task_struct *task);
781 
782 #endif /* CONFIG_X86_64 */
783 
784 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
785                                                unsigned long new_sp);
786 
787 /*
788  * This decides where the kernel will search for a free chunk of vm
789  * space during mmap's.
790  */
791 #define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3))
792 #define TASK_UNMAPPED_BASE              __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
793 
794 #define KSTK_EIP(task)          (task_pt_regs(task)->ip)
795 
796 /* Get/set a process' ability to use the timestamp counter instruction */
797 #define GET_TSC_CTL(adr)        get_tsc_mode((adr))
798 #define SET_TSC_CTL(val)        set_tsc_mode((val))
799 
800 extern int get_tsc_mode(unsigned long adr);
801 extern int set_tsc_mode(unsigned int val);
802 
803 DECLARE_PER_CPU(u64, msr_misc_features_shadow);
804 
805 #ifdef CONFIG_CPU_SUP_AMD
806 extern u32 amd_get_nodes_per_socket(void);
807 extern u32 amd_get_highest_perf(void);
808 #else
809 static inline u32 amd_get_nodes_per_socket(void)        { return 0; }
810 static inline u32 amd_get_highest_perf(void)            { return 0; }
811 #endif
812 
813 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
814 {
815         uint32_t base, eax, signature[3];
816 
817         for (base = 0x40000000; base < 0x40010000; base += 0x100) {
818                 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
819 
820                 if (!memcmp(sig, signature, 12) &&
821                     (leaves == 0 || ((eax - base) >= leaves)))
822                         return base;
823         }
824 
825         return 0;
826 }
827 
828 extern unsigned long arch_align_stack(unsigned long sp);
829 void free_init_pages(const char *what, unsigned long begin, unsigned long end);
830 extern void free_kernel_image_pages(const char *what, void *begin, void *end);
831 
832 void default_idle(void);
833 #ifdef  CONFIG_XEN
834 bool xen_set_default_idle(void);
835 #else
836 #define xen_set_default_idle 0
837 #endif
838 
839 void stop_this_cpu(void *dummy);
840 void microcode_check(void);
841 
842 enum l1tf_mitigations {
843         L1TF_MITIGATION_OFF,
844         L1TF_MITIGATION_FLUSH_NOWARN,
845         L1TF_MITIGATION_FLUSH,
846         L1TF_MITIGATION_FLUSH_NOSMT,
847         L1TF_MITIGATION_FULL,
848         L1TF_MITIGATION_FULL_FORCE
849 };
850 
851 extern enum l1tf_mitigations l1tf_mitigation;
852 
853 enum mds_mitigations {
854         MDS_MITIGATION_OFF,
855         MDS_MITIGATION_FULL,
856         MDS_MITIGATION_VMWERV,
857 };
858 
859 #endif /* _ASM_X86_PROCESSOR_H */
860 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp