~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/paravirt_types.h

Version: ~ [ linux-5.12-rc7 ] ~ [ linux-5.11.13 ] ~ [ linux-5.10.29 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.111 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.186 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.230 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.266 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.266 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_PARAVIRT_TYPES_H
  3 #define _ASM_X86_PARAVIRT_TYPES_H
  4 
  5 /* Bitmask of what can be clobbered: usually at least eax. */
  6 #define CLBR_NONE 0
  7 #define CLBR_EAX  (1 << 0)
  8 #define CLBR_ECX  (1 << 1)
  9 #define CLBR_EDX  (1 << 2)
 10 #define CLBR_EDI  (1 << 3)
 11 
 12 #ifdef CONFIG_X86_32
 13 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
 14 #define CLBR_ANY  ((1 << 4) - 1)
 15 
 16 #define CLBR_ARG_REGS   (CLBR_EAX | CLBR_EDX | CLBR_ECX)
 17 #define CLBR_RET_REG    (CLBR_EAX | CLBR_EDX)
 18 #define CLBR_SCRATCH    (0)
 19 #else
 20 #define CLBR_RAX  CLBR_EAX
 21 #define CLBR_RCX  CLBR_ECX
 22 #define CLBR_RDX  CLBR_EDX
 23 #define CLBR_RDI  CLBR_EDI
 24 #define CLBR_RSI  (1 << 4)
 25 #define CLBR_R8   (1 << 5)
 26 #define CLBR_R9   (1 << 6)
 27 #define CLBR_R10  (1 << 7)
 28 #define CLBR_R11  (1 << 8)
 29 
 30 #define CLBR_ANY  ((1 << 9) - 1)
 31 
 32 #define CLBR_ARG_REGS   (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
 33                          CLBR_RCX | CLBR_R8 | CLBR_R9)
 34 #define CLBR_RET_REG    (CLBR_RAX)
 35 #define CLBR_SCRATCH    (CLBR_R10 | CLBR_R11)
 36 
 37 #endif /* X86_64 */
 38 
 39 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
 40 
 41 #ifndef __ASSEMBLY__
 42 
 43 #include <asm/desc_defs.h>
 44 #include <asm/kmap_types.h>
 45 #include <asm/pgtable_types.h>
 46 #include <asm/nospec-branch.h>
 47 
 48 struct page;
 49 struct thread_struct;
 50 struct desc_ptr;
 51 struct tss_struct;
 52 struct mm_struct;
 53 struct desc_struct;
 54 struct task_struct;
 55 struct cpumask;
 56 struct flush_tlb_info;
 57 struct mmu_gather;
 58 struct vm_area_struct;
 59 
 60 /*
 61  * Wrapper type for pointers to code which uses the non-standard
 62  * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
 63  */
 64 struct paravirt_callee_save {
 65         void *func;
 66 };
 67 
 68 /* general info */
 69 struct pv_info {
 70 #ifdef CONFIG_PARAVIRT_XXL
 71         unsigned int kernel_rpl;
 72         int shared_kernel_pmd;
 73 
 74 #ifdef CONFIG_X86_64
 75         u16 extra_user_64bit_cs;  /* __USER_CS if none */
 76 #endif
 77 #endif
 78 
 79         const char *name;
 80 };
 81 
 82 struct pv_init_ops {
 83         /*
 84          * Patch may replace one of the defined code sequences with
 85          * arbitrary code, subject to the same register constraints.
 86          * This generally means the code is not free to clobber any
 87          * registers other than EAX.  The patch function should return
 88          * the number of bytes of code generated, as we nop pad the
 89          * rest in generic code.
 90          */
 91         unsigned (*patch)(u8 type, void *insn_buff,
 92                           unsigned long addr, unsigned len);
 93 } __no_randomize_layout;
 94 
 95 #ifdef CONFIG_PARAVIRT_XXL
 96 struct pv_lazy_ops {
 97         /* Set deferred update mode, used for batching operations. */
 98         void (*enter)(void);
 99         void (*leave)(void);
100         void (*flush)(void);
101 } __no_randomize_layout;
102 #endif
103 
104 struct pv_time_ops {
105         unsigned long long (*sched_clock)(void);
106         unsigned long long (*steal_clock)(int cpu);
107 } __no_randomize_layout;
108 
109 struct pv_cpu_ops {
110         /* hooks for various privileged instructions */
111         void (*io_delay)(void);
112 
113 #ifdef CONFIG_PARAVIRT_XXL
114         unsigned long (*get_debugreg)(int regno);
115         void (*set_debugreg)(int regno, unsigned long value);
116 
117         unsigned long (*read_cr0)(void);
118         void (*write_cr0)(unsigned long);
119 
120         void (*write_cr4)(unsigned long);
121 
122         /* Segment descriptor handling */
123         void (*load_tr_desc)(void);
124         void (*load_gdt)(const struct desc_ptr *);
125         void (*load_idt)(const struct desc_ptr *);
126         void (*set_ldt)(const void *desc, unsigned entries);
127         unsigned long (*store_tr)(void);
128         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
129 #ifdef CONFIG_X86_64
130         void (*load_gs_index)(unsigned int idx);
131 #endif
132         void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
133                                 const void *desc);
134         void (*write_gdt_entry)(struct desc_struct *,
135                                 int entrynum, const void *desc, int size);
136         void (*write_idt_entry)(gate_desc *,
137                                 int entrynum, const gate_desc *gate);
138         void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
139         void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
140 
141         void (*load_sp0)(unsigned long sp0);
142 
143 #ifdef CONFIG_X86_IOPL_IOPERM
144         void (*update_io_bitmap)(void);
145 #endif
146 
147         void (*wbinvd)(void);
148 
149         /* cpuid emulation, mostly so that caps bits can be disabled */
150         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
151                       unsigned int *ecx, unsigned int *edx);
152 
153         /* Unsafe MSR operations.  These will warn or panic on failure. */
154         u64 (*read_msr)(unsigned int msr);
155         void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
156 
157         /*
158          * Safe MSR operations.
159          * read sets err to 0 or -EIO.  write returns 0 or -EIO.
160          */
161         u64 (*read_msr_safe)(unsigned int msr, int *err);
162         int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
163 
164         u64 (*read_pmc)(int counter);
165 
166         /*
167          * Switch to usermode gs and return to 64-bit usermode using
168          * sysret.  Only used in 64-bit kernels to return to 64-bit
169          * processes.  Usermode register state, including %rsp, must
170          * already be restored.
171          */
172         void (*usergs_sysret64)(void);
173 
174         /* Normal iret.  Jump to this with the standard iret stack
175            frame set up. */
176         void (*iret)(void);
177 
178         void (*swapgs)(void);
179 
180         void (*start_context_switch)(struct task_struct *prev);
181         void (*end_context_switch)(struct task_struct *next);
182 #endif
183 } __no_randomize_layout;
184 
185 struct pv_irq_ops {
186 #ifdef CONFIG_PARAVIRT_XXL
187         /*
188          * Get/set interrupt state.  save_fl and restore_fl are only
189          * expected to use X86_EFLAGS_IF; all other bits
190          * returned from save_fl are undefined, and may be ignored by
191          * restore_fl.
192          *
193          * NOTE: These functions callers expect the callee to preserve
194          * more registers than the standard C calling convention.
195          */
196         struct paravirt_callee_save save_fl;
197         struct paravirt_callee_save restore_fl;
198         struct paravirt_callee_save irq_disable;
199         struct paravirt_callee_save irq_enable;
200 
201         void (*safe_halt)(void);
202         void (*halt)(void);
203 #endif
204 } __no_randomize_layout;
205 
206 struct pv_mmu_ops {
207         /* TLB operations */
208         void (*flush_tlb_user)(void);
209         void (*flush_tlb_kernel)(void);
210         void (*flush_tlb_one_user)(unsigned long addr);
211         void (*flush_tlb_others)(const struct cpumask *cpus,
212                                  const struct flush_tlb_info *info);
213 
214         void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
215 
216         /* Hook for intercepting the destruction of an mm_struct. */
217         void (*exit_mmap)(struct mm_struct *mm);
218 
219 #ifdef CONFIG_PARAVIRT_XXL
220         struct paravirt_callee_save read_cr2;
221         void (*write_cr2)(unsigned long);
222 
223         unsigned long (*read_cr3)(void);
224         void (*write_cr3)(unsigned long);
225 
226         /* Hooks for intercepting the creation/use of an mm_struct. */
227         void (*activate_mm)(struct mm_struct *prev,
228                             struct mm_struct *next);
229         void (*dup_mmap)(struct mm_struct *oldmm,
230                          struct mm_struct *mm);
231 
232         /* Hooks for allocating and freeing a pagetable top-level */
233         int  (*pgd_alloc)(struct mm_struct *mm);
234         void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
235 
236         /*
237          * Hooks for allocating/releasing pagetable pages when they're
238          * attached to a pagetable
239          */
240         void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
241         void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
242         void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
243         void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
244         void (*release_pte)(unsigned long pfn);
245         void (*release_pmd)(unsigned long pfn);
246         void (*release_pud)(unsigned long pfn);
247         void (*release_p4d)(unsigned long pfn);
248 
249         /* Pagetable manipulation functions */
250         void (*set_pte)(pte_t *ptep, pte_t pteval);
251         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
252                            pte_t *ptep, pte_t pteval);
253         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
254 
255         pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
256                                         pte_t *ptep);
257         void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr,
258                                         pte_t *ptep, pte_t pte);
259 
260         struct paravirt_callee_save pte_val;
261         struct paravirt_callee_save make_pte;
262 
263         struct paravirt_callee_save pgd_val;
264         struct paravirt_callee_save make_pgd;
265 
266 #if CONFIG_PGTABLE_LEVELS >= 3
267 #ifdef CONFIG_X86_PAE
268         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
269         void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
270                           pte_t *ptep);
271         void (*pmd_clear)(pmd_t *pmdp);
272 
273 #endif  /* CONFIG_X86_PAE */
274 
275         void (*set_pud)(pud_t *pudp, pud_t pudval);
276 
277         struct paravirt_callee_save pmd_val;
278         struct paravirt_callee_save make_pmd;
279 
280 #if CONFIG_PGTABLE_LEVELS >= 4
281         struct paravirt_callee_save pud_val;
282         struct paravirt_callee_save make_pud;
283 
284         void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval);
285 
286 #if CONFIG_PGTABLE_LEVELS >= 5
287         struct paravirt_callee_save p4d_val;
288         struct paravirt_callee_save make_p4d;
289 
290         void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
291 #endif  /* CONFIG_PGTABLE_LEVELS >= 5 */
292 
293 #endif  /* CONFIG_PGTABLE_LEVELS >= 4 */
294 
295 #endif  /* CONFIG_PGTABLE_LEVELS >= 3 */
296 
297         struct pv_lazy_ops lazy_mode;
298 
299         /* dom0 ops */
300 
301         /* Sometimes the physical address is a pfn, and sometimes its
302            an mfn.  We can tell which is which from the index. */
303         void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
304                            phys_addr_t phys, pgprot_t flags);
305 #endif
306 } __no_randomize_layout;
307 
308 struct arch_spinlock;
309 #ifdef CONFIG_SMP
310 #include <asm/spinlock_types.h>
311 #endif
312 
313 struct qspinlock;
314 
315 struct pv_lock_ops {
316         void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
317         struct paravirt_callee_save queued_spin_unlock;
318 
319         void (*wait)(u8 *ptr, u8 val);
320         void (*kick)(int cpu);
321 
322         struct paravirt_callee_save vcpu_is_preempted;
323 } __no_randomize_layout;
324 
325 /* This contains all the paravirt structures: we get a convenient
326  * number for each function using the offset which we use to indicate
327  * what to patch. */
328 struct paravirt_patch_template {
329         struct pv_init_ops      init;
330         struct pv_time_ops      time;
331         struct pv_cpu_ops       cpu;
332         struct pv_irq_ops       irq;
333         struct pv_mmu_ops       mmu;
334         struct pv_lock_ops      lock;
335 } __no_randomize_layout;
336 
337 extern struct pv_info pv_info;
338 extern struct paravirt_patch_template pv_ops;
339 
340 #define PARAVIRT_PATCH(x)                                       \
341         (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
342 
343 #define paravirt_type(op)                               \
344         [paravirt_typenum] "i" (PARAVIRT_PATCH(op)),    \
345         [paravirt_opptr] "i" (&(pv_ops.op))
346 #define paravirt_clobber(clobber)               \
347         [paravirt_clobber] "i" (clobber)
348 
349 /*
350  * Generate some code, and mark it as patchable by the
351  * apply_paravirt() alternate instruction patcher.
352  */
353 #define _paravirt_alt(insn_string, type, clobber)       \
354         "771:\n\t" insn_string "\n" "772:\n"            \
355         ".pushsection .parainstructions,\"a\"\n"        \
356         _ASM_ALIGN "\n"                                 \
357         _ASM_PTR " 771b\n"                              \
358         "  .byte " type "\n"                            \
359         "  .byte 772b-771b\n"                           \
360         "  .short " clobber "\n"                        \
361         ".popsection\n"
362 
363 /* Generate patchable code, with the default asm parameters. */
364 #define paravirt_alt(insn_string)                                       \
365         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
366 
367 /* Simple instruction patching code. */
368 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
369 
370 unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len);
371 unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len);
372 unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end);
373 
374 unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len);
375 
376 int paravirt_disable_iospace(void);
377 
378 /*
379  * This generates an indirect call based on the operation type number.
380  * The type number, computed in PARAVIRT_PATCH, is derived from the
381  * offset into the paravirt_patch_template structure, and can therefore be
382  * freely converted back into a structure offset.
383  */
384 #define PARAVIRT_CALL                                   \
385         ANNOTATE_RETPOLINE_SAFE                         \
386         "call *%c[paravirt_opptr];"
387 
388 /*
389  * These macros are intended to wrap calls through one of the paravirt
390  * ops structs, so that they can be later identified and patched at
391  * runtime.
392  *
393  * Normally, a call to a pv_op function is a simple indirect call:
394  * (pv_op_struct.operations)(args...).
395  *
396  * Unfortunately, this is a relatively slow operation for modern CPUs,
397  * because it cannot necessarily determine what the destination
398  * address is.  In this case, the address is a runtime constant, so at
399  * the very least we can patch the call to e a simple direct call, or
400  * ideally, patch an inline implementation into the callsite.  (Direct
401  * calls are essentially free, because the call and return addresses
402  * are completely predictable.)
403  *
404  * For i386, these macros rely on the standard gcc "regparm(3)" calling
405  * convention, in which the first three arguments are placed in %eax,
406  * %edx, %ecx (in that order), and the remaining arguments are placed
407  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
408  * to be modified (either clobbered or used for return values).
409  * X86_64, on the other hand, already specifies a register-based calling
410  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
411  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
412  * special handling for dealing with 4 arguments, unlike i386.
413  * However, x86_64 also have to clobber all caller saved registers, which
414  * unfortunately, are quite a bit (r8 - r11)
415  *
416  * The call instruction itself is marked by placing its start address
417  * and size into the .parainstructions section, so that
418  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
419  * appropriate patching under the control of the backend pv_init_ops
420  * implementation.
421  *
422  * Unfortunately there's no way to get gcc to generate the args setup
423  * for the call, and then allow the call itself to be generated by an
424  * inline asm.  Because of this, we must do the complete arg setup and
425  * return value handling from within these macros.  This is fairly
426  * cumbersome.
427  *
428  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
429  * It could be extended to more arguments, but there would be little
430  * to be gained from that.  For each number of arguments, there are
431  * the two VCALL and CALL variants for void and non-void functions.
432  *
433  * When there is a return value, the invoker of the macro must specify
434  * the return type.  The macro then uses sizeof() on that type to
435  * determine whether its a 32 or 64 bit value, and places the return
436  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
437  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
438  * the return value size.
439  *
440  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
441  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
442  * in low,high order
443  *
444  * Small structures are passed and returned in registers.  The macro
445  * calling convention can't directly deal with this, so the wrapper
446  * functions must do this.
447  *
448  * These PVOP_* macros are only defined within this header.  This
449  * means that all uses must be wrapped in inline functions.  This also
450  * makes sure the incoming and outgoing types are always correct.
451  */
452 #ifdef CONFIG_X86_32
453 #define PVOP_VCALL_ARGS                                                 \
454         unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
455 
456 #define PVOP_CALL_ARGS                  PVOP_VCALL_ARGS
457 
458 #define PVOP_CALL_ARG1(x)               "a" ((unsigned long)(x))
459 #define PVOP_CALL_ARG2(x)               "d" ((unsigned long)(x))
460 #define PVOP_CALL_ARG3(x)               "c" ((unsigned long)(x))
461 
462 #define PVOP_VCALL_CLOBBERS             "=a" (__eax), "=d" (__edx),     \
463                                         "=c" (__ecx)
464 #define PVOP_CALL_CLOBBERS              PVOP_VCALL_CLOBBERS
465 
466 #define PVOP_VCALLEE_CLOBBERS           "=a" (__eax), "=d" (__edx)
467 #define PVOP_CALLEE_CLOBBERS            PVOP_VCALLEE_CLOBBERS
468 
469 #define EXTRA_CLOBBERS
470 #define VEXTRA_CLOBBERS
471 #else  /* CONFIG_X86_64 */
472 /* [re]ax isn't an arg, but the return val */
473 #define PVOP_VCALL_ARGS                                         \
474         unsigned long __edi = __edi, __esi = __esi,             \
475                 __edx = __edx, __ecx = __ecx, __eax = __eax;
476 
477 #define PVOP_CALL_ARGS          PVOP_VCALL_ARGS
478 
479 #define PVOP_CALL_ARG1(x)               "D" ((unsigned long)(x))
480 #define PVOP_CALL_ARG2(x)               "S" ((unsigned long)(x))
481 #define PVOP_CALL_ARG3(x)               "d" ((unsigned long)(x))
482 #define PVOP_CALL_ARG4(x)               "c" ((unsigned long)(x))
483 
484 #define PVOP_VCALL_CLOBBERS     "=D" (__edi),                           \
485                                 "=S" (__esi), "=d" (__edx),             \
486                                 "=c" (__ecx)
487 #define PVOP_CALL_CLOBBERS      PVOP_VCALL_CLOBBERS, "=a" (__eax)
488 
489 /* void functions are still allowed [re]ax for scratch */
490 #define PVOP_VCALLEE_CLOBBERS   "=a" (__eax)
491 #define PVOP_CALLEE_CLOBBERS    PVOP_VCALLEE_CLOBBERS
492 
493 #define EXTRA_CLOBBERS   , "r8", "r9", "r10", "r11"
494 #define VEXTRA_CLOBBERS  , "rax", "r8", "r9", "r10", "r11"
495 #endif  /* CONFIG_X86_32 */
496 
497 #ifdef CONFIG_PARAVIRT_DEBUG
498 #define PVOP_TEST_NULL(op)      BUG_ON(pv_ops.op == NULL)
499 #else
500 #define PVOP_TEST_NULL(op)      ((void)pv_ops.op)
501 #endif
502 
503 #define PVOP_RETMASK(rettype)                                           \
504         ({      unsigned long __mask = ~0UL;                            \
505                 switch (sizeof(rettype)) {                              \
506                 case 1: __mask =       0xffUL; break;                   \
507                 case 2: __mask =     0xffffUL; break;                   \
508                 case 4: __mask = 0xffffffffUL; break;                   \
509                 default: break;                                         \
510                 }                                                       \
511                 __mask;                                                 \
512         })
513 
514 
515 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,         \
516                       pre, post, ...)                                   \
517         ({                                                              \
518                 rettype __ret;                                          \
519                 PVOP_CALL_ARGS;                                         \
520                 PVOP_TEST_NULL(op);                                     \
521                 /* This is 32-bit specific, but is okay in 64-bit */    \
522                 /* since this condition will never hold */              \
523                 if (sizeof(rettype) > sizeof(unsigned long)) {          \
524                         asm volatile(pre                                \
525                                      paravirt_alt(PARAVIRT_CALL)        \
526                                      post                               \
527                                      : call_clbr, ASM_CALL_CONSTRAINT   \
528                                      : paravirt_type(op),               \
529                                        paravirt_clobber(clbr),          \
530                                        ##__VA_ARGS__                    \
531                                      : "memory", "cc" extra_clbr);      \
532                         __ret = (rettype)((((u64)__edx) << 32) | __eax); \
533                 } else {                                                \
534                         asm volatile(pre                                \
535                                      paravirt_alt(PARAVIRT_CALL)        \
536                                      post                               \
537                                      : call_clbr, ASM_CALL_CONSTRAINT   \
538                                      : paravirt_type(op),               \
539                                        paravirt_clobber(clbr),          \
540                                        ##__VA_ARGS__                    \
541                                      : "memory", "cc" extra_clbr);      \
542                         __ret = (rettype)(__eax & PVOP_RETMASK(rettype));       \
543                 }                                                       \
544                 __ret;                                                  \
545         })
546 
547 #define __PVOP_CALL(rettype, op, pre, post, ...)                        \
548         ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,        \
549                       EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
550 
551 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)                  \
552         ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
553                       PVOP_CALLEE_CLOBBERS, ,                           \
554                       pre, post, ##__VA_ARGS__)
555 
556 
557 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
558         ({                                                              \
559                 PVOP_VCALL_ARGS;                                        \
560                 PVOP_TEST_NULL(op);                                     \
561                 asm volatile(pre                                        \
562                              paravirt_alt(PARAVIRT_CALL)                \
563                              post                                       \
564                              : call_clbr, ASM_CALL_CONSTRAINT           \
565                              : paravirt_type(op),                       \
566                                paravirt_clobber(clbr),                  \
567                                ##__VA_ARGS__                            \
568                              : "memory", "cc" extra_clbr);              \
569         })
570 
571 #define __PVOP_VCALL(op, pre, post, ...)                                \
572         ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
573                        VEXTRA_CLOBBERS,                                 \
574                        pre, post, ##__VA_ARGS__)
575 
576 #define __PVOP_VCALLEESAVE(op, pre, post, ...)                          \
577         ____PVOP_VCALL(op.func, CLBR_RET_REG,                           \
578                       PVOP_VCALLEE_CLOBBERS, ,                          \
579                       pre, post, ##__VA_ARGS__)
580 
581 
582 
583 #define PVOP_CALL0(rettype, op)                                         \
584         __PVOP_CALL(rettype, op, "", "")
585 #define PVOP_VCALL0(op)                                                 \
586         __PVOP_VCALL(op, "", "")
587 
588 #define PVOP_CALLEE0(rettype, op)                                       \
589         __PVOP_CALLEESAVE(rettype, op, "", "")
590 #define PVOP_VCALLEE0(op)                                               \
591         __PVOP_VCALLEESAVE(op, "", "")
592 
593 
594 #define PVOP_CALL1(rettype, op, arg1)                                   \
595         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
596 #define PVOP_VCALL1(op, arg1)                                           \
597         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
598 
599 #define PVOP_CALLEE1(rettype, op, arg1)                                 \
600         __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
601 #define PVOP_VCALLEE1(op, arg1)                                         \
602         __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
603 
604 
605 #define PVOP_CALL2(rettype, op, arg1, arg2)                             \
606         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
607                     PVOP_CALL_ARG2(arg2))
608 #define PVOP_VCALL2(op, arg1, arg2)                                     \
609         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
610                      PVOP_CALL_ARG2(arg2))
611 
612 #define PVOP_CALLEE2(rettype, op, arg1, arg2)                           \
613         __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),    \
614                           PVOP_CALL_ARG2(arg2))
615 #define PVOP_VCALLEE2(op, arg1, arg2)                                   \
616         __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),            \
617                            PVOP_CALL_ARG2(arg2))
618 
619 
620 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                       \
621         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
622                     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
623 #define PVOP_VCALL3(op, arg1, arg2, arg3)                               \
624         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
625                      PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
626 
627 /* This is the only difference in x86_64. We can make it much simpler */
628 #ifdef CONFIG_X86_32
629 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
630         __PVOP_CALL(rettype, op,                                        \
631                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
632                     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
633                     PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
634 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
635         __PVOP_VCALL(op,                                                \
636                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
637                     "" ((u32)(arg1)), "1" ((u32)(arg2)),               \
638                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
639 #else
640 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
641         __PVOP_CALL(rettype, op, "", "",                                \
642                     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
643                     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
644 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
645         __PVOP_VCALL(op, "", "",                                        \
646                      PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),        \
647                      PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
648 #endif
649 
650 /* Lazy mode for batching updates / context switch */
651 enum paravirt_lazy_mode {
652         PARAVIRT_LAZY_NONE,
653         PARAVIRT_LAZY_MMU,
654         PARAVIRT_LAZY_CPU,
655 };
656 
657 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
658 void paravirt_start_context_switch(struct task_struct *prev);
659 void paravirt_end_context_switch(struct task_struct *next);
660 
661 void paravirt_enter_lazy_mmu(void);
662 void paravirt_leave_lazy_mmu(void);
663 void paravirt_flush_lazy_mmu(void);
664 
665 void _paravirt_nop(void);
666 u64 _paravirt_ident_64(u64);
667 
668 #define paravirt_nop    ((void *)_paravirt_nop)
669 
670 /* These all sit in the .parainstructions section to tell us what to patch. */
671 struct paravirt_patch_site {
672         u8 *instr;              /* original instructions */
673         u8 type;                /* type of this instruction */
674         u8 len;                 /* length of original instruction */
675 };
676 
677 extern struct paravirt_patch_site __parainstructions[],
678         __parainstructions_end[];
679 
680 #endif  /* __ASSEMBLY__ */
681 
682 #endif  /* _ASM_X86_PARAVIRT_TYPES_H */
683 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp