~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/paravirt_types.h

Version: ~ [ linux-5.13-rc5 ] ~ [ linux-5.12.9 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.42 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.124 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.193 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.235 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.271 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.271 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _ASM_X86_PARAVIRT_TYPES_H
  2 #define _ASM_X86_PARAVIRT_TYPES_H
  3 
  4 /* Bitmask of what can be clobbered: usually at least eax. */
  5 #define CLBR_NONE 0
  6 #define CLBR_EAX  (1 << 0)
  7 #define CLBR_ECX  (1 << 1)
  8 #define CLBR_EDX  (1 << 2)
  9 #define CLBR_EDI  (1 << 3)
 10 
 11 #ifdef CONFIG_X86_32
 12 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
 13 #define CLBR_ANY  ((1 << 4) - 1)
 14 
 15 #define CLBR_ARG_REGS   (CLBR_EAX | CLBR_EDX | CLBR_ECX)
 16 #define CLBR_RET_REG    (CLBR_EAX | CLBR_EDX)
 17 #define CLBR_SCRATCH    (0)
 18 #else
 19 #define CLBR_RAX  CLBR_EAX
 20 #define CLBR_RCX  CLBR_ECX
 21 #define CLBR_RDX  CLBR_EDX
 22 #define CLBR_RDI  CLBR_EDI
 23 #define CLBR_RSI  (1 << 4)
 24 #define CLBR_R8   (1 << 5)
 25 #define CLBR_R9   (1 << 6)
 26 #define CLBR_R10  (1 << 7)
 27 #define CLBR_R11  (1 << 8)
 28 
 29 #define CLBR_ANY  ((1 << 9) - 1)
 30 
 31 #define CLBR_ARG_REGS   (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
 32                          CLBR_RCX | CLBR_R8 | CLBR_R9)
 33 #define CLBR_RET_REG    (CLBR_RAX)
 34 #define CLBR_SCRATCH    (CLBR_R10 | CLBR_R11)
 35 
 36 #endif /* X86_64 */
 37 
 38 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
 39 
 40 #ifndef __ASSEMBLY__
 41 
 42 #include <asm/desc_defs.h>
 43 #include <asm/kmap_types.h>
 44 #include <asm/pgtable_types.h>
 45 
 46 struct page;
 47 struct thread_struct;
 48 struct desc_ptr;
 49 struct tss_struct;
 50 struct mm_struct;
 51 struct desc_struct;
 52 struct task_struct;
 53 struct cpumask;
 54 
 55 /*
 56  * Wrapper type for pointers to code which uses the non-standard
 57  * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
 58  */
 59 struct paravirt_callee_save {
 60         void *func;
 61 };
 62 
 63 /* general info */
 64 struct pv_info {
 65         unsigned int kernel_rpl;
 66         int shared_kernel_pmd;
 67 
 68 #ifdef CONFIG_X86_64
 69         u16 extra_user_64bit_cs;  /* __USER_CS if none */
 70 #endif
 71 
 72         int paravirt_enabled;
 73         const char *name;
 74 };
 75 
 76 struct pv_init_ops {
 77         /*
 78          * Patch may replace one of the defined code sequences with
 79          * arbitrary code, subject to the same register constraints.
 80          * This generally means the code is not free to clobber any
 81          * registers other than EAX.  The patch function should return
 82          * the number of bytes of code generated, as we nop pad the
 83          * rest in generic code.
 84          */
 85         unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
 86                           unsigned long addr, unsigned len);
 87 };
 88 
 89 
 90 struct pv_lazy_ops {
 91         /* Set deferred update mode, used for batching operations. */
 92         void (*enter)(void);
 93         void (*leave)(void);
 94         void (*flush)(void);
 95 };
 96 
 97 struct pv_time_ops {
 98         unsigned long long (*sched_clock)(void);
 99         unsigned long long (*steal_clock)(int cpu);
100         unsigned long (*get_tsc_khz)(void);
101 };
102 
103 struct pv_cpu_ops {
104         /* hooks for various privileged instructions */
105         unsigned long (*get_debugreg)(int regno);
106         void (*set_debugreg)(int regno, unsigned long value);
107 
108         void (*clts)(void);
109 
110         unsigned long (*read_cr0)(void);
111         void (*write_cr0)(unsigned long);
112 
113         unsigned long (*read_cr4_safe)(void);
114         unsigned long (*read_cr4)(void);
115         void (*write_cr4)(unsigned long);
116 
117 #ifdef CONFIG_X86_64
118         unsigned long (*read_cr8)(void);
119         void (*write_cr8)(unsigned long);
120 #endif
121 
122         /* Segment descriptor handling */
123         void (*load_tr_desc)(void);
124         void (*load_gdt)(const struct desc_ptr *);
125         void (*load_idt)(const struct desc_ptr *);
126         /* store_gdt has been removed. */
127         void (*store_idt)(struct desc_ptr *);
128         void (*set_ldt)(const void *desc, unsigned entries);
129         unsigned long (*store_tr)(void);
130         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
131 #ifdef CONFIG_X86_64
132         void (*load_gs_index)(unsigned int idx);
133 #endif
134         void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
135                                 const void *desc);
136         void (*write_gdt_entry)(struct desc_struct *,
137                                 int entrynum, const void *desc, int size);
138         void (*write_idt_entry)(gate_desc *,
139                                 int entrynum, const gate_desc *gate);
140         void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
141         void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
142 
143         void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
144 
145         void (*set_iopl_mask)(unsigned mask);
146 
147         void (*wbinvd)(void);
148         void (*io_delay)(void);
149 
150         /* cpuid emulation, mostly so that caps bits can be disabled */
151         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
152                       unsigned int *ecx, unsigned int *edx);
153 
154         /* MSR, PMC and TSR operations.
155            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
156         u64 (*read_msr)(unsigned int msr, int *err);
157         int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
158 
159         u64 (*read_tsc)(void);
160         u64 (*read_pmc)(int counter);
161         unsigned long long (*read_tscp)(unsigned int *aux);
162 
163         /*
164          * Atomically enable interrupts and return to userspace.  This
165          * is only ever used to return to 32-bit processes; in a
166          * 64-bit kernel, it's used for 32-on-64 compat processes, but
167          * never native 64-bit processes.  (Jump, not call.)
168          */
169         void (*irq_enable_sysexit)(void);
170 
171         /*
172          * Switch to usermode gs and return to 64-bit usermode using
173          * sysret.  Only used in 64-bit kernels to return to 64-bit
174          * processes.  Usermode register state, including %rsp, must
175          * already be restored.
176          */
177         void (*usergs_sysret64)(void);
178 
179         /*
180          * Switch to usermode gs and return to 32-bit usermode using
181          * sysret.  Used to return to 32-on-64 compat processes.
182          * Other usermode register state, including %esp, must already
183          * be restored.
184          */
185         void (*usergs_sysret32)(void);
186 
187         /* Normal iret.  Jump to this with the standard iret stack
188            frame set up. */
189         void (*iret)(void);
190 
191         void (*swapgs)(void);
192 
193         void (*start_context_switch)(struct task_struct *prev);
194         void (*end_context_switch)(struct task_struct *next);
195 };
196 
197 struct pv_irq_ops {
198         /*
199          * Get/set interrupt state.  save_fl and restore_fl are only
200          * expected to use X86_EFLAGS_IF; all other bits
201          * returned from save_fl are undefined, and may be ignored by
202          * restore_fl.
203          *
204          * NOTE: These functions callers expect the callee to preserve
205          * more registers than the standard C calling convention.
206          */
207         struct paravirt_callee_save save_fl;
208         struct paravirt_callee_save restore_fl;
209         struct paravirt_callee_save irq_disable;
210         struct paravirt_callee_save irq_enable;
211 
212         void (*safe_halt)(void);
213         void (*halt)(void);
214 
215 #ifdef CONFIG_X86_64
216         void (*adjust_exception_frame)(void);
217 #endif
218 };
219 
220 struct pv_apic_ops {
221 #ifdef CONFIG_X86_LOCAL_APIC
222         void (*startup_ipi_hook)(int phys_apicid,
223                                  unsigned long start_eip,
224                                  unsigned long start_esp);
225 #endif
226 };
227 
228 struct pv_mmu_ops {
229         unsigned long (*read_cr2)(void);
230         void (*write_cr2)(unsigned long);
231 
232         unsigned long (*read_cr3)(void);
233         void (*write_cr3)(unsigned long);
234 
235         /*
236          * Hooks for intercepting the creation/use/destruction of an
237          * mm_struct.
238          */
239         void (*activate_mm)(struct mm_struct *prev,
240                             struct mm_struct *next);
241         void (*dup_mmap)(struct mm_struct *oldmm,
242                          struct mm_struct *mm);
243         void (*exit_mmap)(struct mm_struct *mm);
244 
245 
246         /* TLB operations */
247         void (*flush_tlb_user)(void);
248         void (*flush_tlb_kernel)(void);
249         void (*flush_tlb_single)(unsigned long addr);
250         void (*flush_tlb_others)(const struct cpumask *cpus,
251                                  struct mm_struct *mm,
252                                  unsigned long start,
253                                  unsigned long end);
254 
255         /* Hooks for allocating and freeing a pagetable top-level */
256         int  (*pgd_alloc)(struct mm_struct *mm);
257         void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
258 
259         /*
260          * Hooks for allocating/releasing pagetable pages when they're
261          * attached to a pagetable
262          */
263         void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
264         void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
265         void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
266         void (*release_pte)(unsigned long pfn);
267         void (*release_pmd)(unsigned long pfn);
268         void (*release_pud)(unsigned long pfn);
269 
270         /* Pagetable manipulation functions */
271         void (*set_pte)(pte_t *ptep, pte_t pteval);
272         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
273                            pte_t *ptep, pte_t pteval);
274         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
275         void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
276                            pmd_t *pmdp, pmd_t pmdval);
277         void (*pte_update)(struct mm_struct *mm, unsigned long addr,
278                            pte_t *ptep);
279         void (*pte_update_defer)(struct mm_struct *mm,
280                                  unsigned long addr, pte_t *ptep);
281         void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
282                            pmd_t *pmdp);
283         void (*pmd_update_defer)(struct mm_struct *mm,
284                                  unsigned long addr, pmd_t *pmdp);
285 
286         pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
287                                         pte_t *ptep);
288         void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
289                                         pte_t *ptep, pte_t pte);
290 
291         struct paravirt_callee_save pte_val;
292         struct paravirt_callee_save make_pte;
293 
294         struct paravirt_callee_save pgd_val;
295         struct paravirt_callee_save make_pgd;
296 
297 #if PAGETABLE_LEVELS >= 3
298 #ifdef CONFIG_X86_PAE
299         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
300         void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
301                           pte_t *ptep);
302         void (*pmd_clear)(pmd_t *pmdp);
303 
304 #endif  /* CONFIG_X86_PAE */
305 
306         void (*set_pud)(pud_t *pudp, pud_t pudval);
307 
308         struct paravirt_callee_save pmd_val;
309         struct paravirt_callee_save make_pmd;
310 
311 #if PAGETABLE_LEVELS == 4
312         struct paravirt_callee_save pud_val;
313         struct paravirt_callee_save make_pud;
314 
315         void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
316 #endif  /* PAGETABLE_LEVELS == 4 */
317 #endif  /* PAGETABLE_LEVELS >= 3 */
318 
319         struct pv_lazy_ops lazy_mode;
320 
321         /* dom0 ops */
322 
323         /* Sometimes the physical address is a pfn, and sometimes its
324            an mfn.  We can tell which is which from the index. */
325         void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
326                            phys_addr_t phys, pgprot_t flags);
327 };
328 
329 struct arch_spinlock;
330 #ifdef CONFIG_SMP
331 #include <asm/spinlock_types.h>
332 #else
333 typedef u16 __ticket_t;
334 #endif
335 
336 struct pv_lock_ops {
337         struct paravirt_callee_save lock_spinning;
338         void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
339 };
340 
341 /* This contains all the paravirt structures: we get a convenient
342  * number for each function using the offset which we use to indicate
343  * what to patch. */
344 struct paravirt_patch_template {
345         struct pv_init_ops pv_init_ops;
346         struct pv_time_ops pv_time_ops;
347         struct pv_cpu_ops pv_cpu_ops;
348         struct pv_irq_ops pv_irq_ops;
349         struct pv_apic_ops pv_apic_ops;
350         struct pv_mmu_ops pv_mmu_ops;
351         struct pv_lock_ops pv_lock_ops;
352 };
353 
354 extern struct pv_info pv_info;
355 extern struct pv_init_ops pv_init_ops;
356 extern struct pv_time_ops pv_time_ops;
357 extern struct pv_cpu_ops pv_cpu_ops;
358 extern struct pv_irq_ops pv_irq_ops;
359 extern struct pv_apic_ops pv_apic_ops;
360 extern struct pv_mmu_ops pv_mmu_ops;
361 extern struct pv_lock_ops pv_lock_ops;
362 
363 #define PARAVIRT_PATCH(x)                                       \
364         (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
365 
366 #define paravirt_type(op)                               \
367         [paravirt_typenum] "i" (PARAVIRT_PATCH(op)),    \
368         [paravirt_opptr] "i" (&(op))
369 #define paravirt_clobber(clobber)               \
370         [paravirt_clobber] "i" (clobber)
371 
372 /*
373  * Generate some code, and mark it as patchable by the
374  * apply_paravirt() alternate instruction patcher.
375  */
376 #define _paravirt_alt(insn_string, type, clobber)       \
377         "771:\n\t" insn_string "\n" "772:\n"            \
378         ".pushsection .parainstructions,\"a\"\n"        \
379         _ASM_ALIGN "\n"                                 \
380         _ASM_PTR " 771b\n"                              \
381         "  .byte " type "\n"                            \
382         "  .byte 772b-771b\n"                           \
383         "  .short " clobber "\n"                        \
384         ".popsection\n"
385 
386 /* Generate patchable code, with the default asm parameters. */
387 #define paravirt_alt(insn_string)                                       \
388         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
389 
390 /* Simple instruction patching code. */
391 #define DEF_NATIVE(ops, name, code)                                     \
392         extern const char start_##ops##_##name[] __visible,             \
393                           end_##ops##_##name[] __visible;               \
394         asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
395 
396 unsigned paravirt_patch_nop(void);
397 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
398 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
399 unsigned paravirt_patch_ignore(unsigned len);
400 unsigned paravirt_patch_call(void *insnbuf,
401                              const void *target, u16 tgt_clobbers,
402                              unsigned long addr, u16 site_clobbers,
403                              unsigned len);
404 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
405                             unsigned long addr, unsigned len);
406 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
407                                 unsigned long addr, unsigned len);
408 
409 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
410                               const char *start, const char *end);
411 
412 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
413                       unsigned long addr, unsigned len);
414 
415 int paravirt_disable_iospace(void);
416 
417 /*
418  * This generates an indirect call based on the operation type number.
419  * The type number, computed in PARAVIRT_PATCH, is derived from the
420  * offset into the paravirt_patch_template structure, and can therefore be
421  * freely converted back into a structure offset.
422  */
423 #define PARAVIRT_CALL   "call *%c[paravirt_opptr];"
424 
425 /*
426  * These macros are intended to wrap calls through one of the paravirt
427  * ops structs, so that they can be later identified and patched at
428  * runtime.
429  *
430  * Normally, a call to a pv_op function is a simple indirect call:
431  * (pv_op_struct.operations)(args...).
432  *
433  * Unfortunately, this is a relatively slow operation for modern CPUs,
434  * because it cannot necessarily determine what the destination
435  * address is.  In this case, the address is a runtime constant, so at
436  * the very least we can patch the call to e a simple direct call, or
437  * ideally, patch an inline implementation into the callsite.  (Direct
438  * calls are essentially free, because the call and return addresses
439  * are completely predictable.)
440  *
441  * For i386, these macros rely on the standard gcc "regparm(3)" calling
442  * convention, in which the first three arguments are placed in %eax,
443  * %edx, %ecx (in that order), and the remaining arguments are placed
444  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
445  * to be modified (either clobbered or used for return values).
446  * X86_64, on the other hand, already specifies a register-based calling
447  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
448  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
449  * special handling for dealing with 4 arguments, unlike i386.
450  * However, x86_64 also have to clobber all caller saved registers, which
451  * unfortunately, are quite a bit (r8 - r11)
452  *
453  * The call instruction itself is marked by placing its start address
454  * and size into the .parainstructions section, so that
455  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
456  * appropriate patching under the control of the backend pv_init_ops
457  * implementation.
458  *
459  * Unfortunately there's no way to get gcc to generate the args setup
460  * for the call, and then allow the call itself to be generated by an
461  * inline asm.  Because of this, we must do the complete arg setup and
462  * return value handling from within these macros.  This is fairly
463  * cumbersome.
464  *
465  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
466  * It could be extended to more arguments, but there would be little
467  * to be gained from that.  For each number of arguments, there are
468  * the two VCALL and CALL variants for void and non-void functions.
469  *
470  * When there is a return value, the invoker of the macro must specify
471  * the return type.  The macro then uses sizeof() on that type to
472  * determine whether its a 32 or 64 bit value, and places the return
473  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
474  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
475  * the return value size.
476  *
477  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
478  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
479  * in low,high order
480  *
481  * Small structures are passed and returned in registers.  The macro
482  * calling convention can't directly deal with this, so the wrapper
483  * functions must do this.
484  *
485  * These PVOP_* macros are only defined within this header.  This
486  * means that all uses must be wrapped in inline functions.  This also
487  * makes sure the incoming and outgoing types are always correct.
488  */
489 #ifdef CONFIG_X86_32
490 #define PVOP_VCALL_ARGS                         \
491         unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
492 #define PVOP_CALL_ARGS                  PVOP_VCALL_ARGS
493 
494 #define PVOP_CALL_ARG1(x)               "a" ((unsigned long)(x))
495 #define PVOP_CALL_ARG2(x)               "d" ((unsigned long)(x))
496 #define PVOP_CALL_ARG3(x)               "c" ((unsigned long)(x))
497 
498 #define PVOP_VCALL_CLOBBERS             "=a" (__eax), "=d" (__edx),     \
499                                         "=c" (__ecx)
500 #define PVOP_CALL_CLOBBERS              PVOP_VCALL_CLOBBERS
501 
502 #define PVOP_VCALLEE_CLOBBERS           "=a" (__eax), "=d" (__edx)
503 #define PVOP_CALLEE_CLOBBERS            PVOP_VCALLEE_CLOBBERS
504 
505 #define EXTRA_CLOBBERS
506 #define VEXTRA_CLOBBERS
507 #else  /* CONFIG_X86_64 */
508 /* [re]ax isn't an arg, but the return val */
509 #define PVOP_VCALL_ARGS                                 \
510         unsigned long __edi = __edi, __esi = __esi,     \
511                 __edx = __edx, __ecx = __ecx, __eax = __eax
512 #define PVOP_CALL_ARGS          PVOP_VCALL_ARGS
513 
514 #define PVOP_CALL_ARG1(x)               "D" ((unsigned long)(x))
515 #define PVOP_CALL_ARG2(x)               "S" ((unsigned long)(x))
516 #define PVOP_CALL_ARG3(x)               "d" ((unsigned long)(x))
517 #define PVOP_CALL_ARG4(x)               "c" ((unsigned long)(x))
518 
519 #define PVOP_VCALL_CLOBBERS     "=D" (__edi),                           \
520                                 "=S" (__esi), "=d" (__edx),             \
521                                 "=c" (__ecx)
522 #define PVOP_CALL_CLOBBERS      PVOP_VCALL_CLOBBERS, "=a" (__eax)
523 
524 /* void functions are still allowed [re]ax for scratch */
525 #define PVOP_VCALLEE_CLOBBERS   "=a" (__eax)
526 #define PVOP_CALLEE_CLOBBERS    PVOP_VCALLEE_CLOBBERS
527 
528 #define EXTRA_CLOBBERS   , "r8", "r9", "r10", "r11"
529 #define VEXTRA_CLOBBERS  , "rax", "r8", "r9", "r10", "r11"
530 #endif  /* CONFIG_X86_32 */
531 
532 #ifdef CONFIG_PARAVIRT_DEBUG
533 #define PVOP_TEST_NULL(op)      BUG_ON(op == NULL)
534 #else
535 #define PVOP_TEST_NULL(op)      ((void)op)
536 #endif
537 
538 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,         \
539                       pre, post, ...)                                   \
540         ({                                                              \
541                 rettype __ret;                                          \
542                 PVOP_CALL_ARGS;                                         \
543                 PVOP_TEST_NULL(op);                                     \
544                 /* This is 32-bit specific, but is okay in 64-bit */    \
545                 /* since this condition will never hold */              \
546                 if (sizeof(rettype) > sizeof(unsigned long)) {          \
547                         asm volatile(pre                                \
548                                      paravirt_alt(PARAVIRT_CALL)        \
549                                      post                               \
550                                      : call_clbr                        \
551                                      : paravirt_type(op),               \
552                                        paravirt_clobber(clbr),          \
553                                        ##__VA_ARGS__                    \
554                                      : "memory", "cc" extra_clbr);      \
555                         __ret = (rettype)((((u64)__edx) << 32) | __eax); \
556                 } else {                                                \
557                         asm volatile(pre                                \
558                                      paravirt_alt(PARAVIRT_CALL)        \
559                                      post                               \
560                                      : call_clbr                        \
561                                      : paravirt_type(op),               \
562                                        paravirt_clobber(clbr),          \
563                                        ##__VA_ARGS__                    \
564                                      : "memory", "cc" extra_clbr);      \
565                         __ret = (rettype)__eax;                         \
566                 }                                                       \
567                 __ret;                                                  \
568         })
569 
570 #define __PVOP_CALL(rettype, op, pre, post, ...)                        \
571         ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,        \
572                       EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
573 
574 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)                  \
575         ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
576                       PVOP_CALLEE_CLOBBERS, ,                           \
577                       pre, post, ##__VA_ARGS__)
578 
579 
580 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
581         ({                                                              \
582                 PVOP_VCALL_ARGS;                                        \
583                 PVOP_TEST_NULL(op);                                     \
584                 asm volatile(pre                                        \
585                              paravirt_alt(PARAVIRT_CALL)                \
586                              post                                       \
587                              : call_clbr                                \
588                              : paravirt_type(op),                       \
589                                paravirt_clobber(clbr),                  \
590                                ##__VA_ARGS__                            \
591                              : "memory", "cc" extra_clbr);              \
592         })
593 
594 #define __PVOP_VCALL(op, pre, post, ...)                                \
595         ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
596                        VEXTRA_CLOBBERS,                                 \
597                        pre, post, ##__VA_ARGS__)
598 
599 #define __PVOP_VCALLEESAVE(op, pre, post, ...)                          \
600         ____PVOP_VCALL(op.func, CLBR_RET_REG,                           \
601                       PVOP_VCALLEE_CLOBBERS, ,                          \
602                       pre, post, ##__VA_ARGS__)
603 
604 
605 
606 #define PVOP_CALL0(rettype, op)                                         \
607         __PVOP_CALL(rettype, op, "", "")
608 #define PVOP_VCALL0(op)                                                 \
609         __PVOP_VCALL(op, "", "")
610 
611 #define PVOP_CALLEE0(rettype, op)                                       \
612         __PVOP_CALLEESAVE(rettype, op, "", "")
613 #define PVOP_VCALLEE0(op)                                               \
614         __PVOP_VCALLEESAVE(op, "", "")
615 
616 
617 #define PVOP_CALL1(rettype, op, arg1)                                   \
618         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
619 #define PVOP_VCALL1(op, arg1)                                           \
620         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
621 
622 #define PVOP_CALLEE1(rettype, op, arg1)                                 \
623         __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
624 #define PVOP_VCALLEE1(op, arg1)                                         \
625         __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
626 
627 
628 #define PVOP_CALL2(rettype, op, arg1, arg2)                             \
629         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
630                     PVOP_CALL_ARG2(arg2))
631 #define PVOP_VCALL2(op, arg1, arg2)                                     \
632         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
633                      PVOP_CALL_ARG2(arg2))
634 
635 #define PVOP_CALLEE2(rettype, op, arg1, arg2)                           \
636         __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),    \
637                           PVOP_CALL_ARG2(arg2))
638 #define PVOP_VCALLEE2(op, arg1, arg2)                                   \
639         __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),            \
640                            PVOP_CALL_ARG2(arg2))
641 
642 
643 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                       \
644         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
645                     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
646 #define PVOP_VCALL3(op, arg1, arg2, arg3)                               \
647         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
648                      PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
649 
650 /* This is the only difference in x86_64. We can make it much simpler */
651 #ifdef CONFIG_X86_32
652 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
653         __PVOP_CALL(rettype, op,                                        \
654                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
655                     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
656                     PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
657 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
658         __PVOP_VCALL(op,                                                \
659                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
660                     "" ((u32)(arg1)), "1" ((u32)(arg2)),               \
661                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
662 #else
663 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
664         __PVOP_CALL(rettype, op, "", "",                                \
665                     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
666                     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
667 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
668         __PVOP_VCALL(op, "", "",                                        \
669                      PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),        \
670                      PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
671 #endif
672 
673 /* Lazy mode for batching updates / context switch */
674 enum paravirt_lazy_mode {
675         PARAVIRT_LAZY_NONE,
676         PARAVIRT_LAZY_MMU,
677         PARAVIRT_LAZY_CPU,
678 };
679 
680 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
681 void paravirt_start_context_switch(struct task_struct *prev);
682 void paravirt_end_context_switch(struct task_struct *next);
683 
684 void paravirt_enter_lazy_mmu(void);
685 void paravirt_leave_lazy_mmu(void);
686 void paravirt_flush_lazy_mmu(void);
687 
688 void _paravirt_nop(void);
689 u32 _paravirt_ident_32(u32);
690 u64 _paravirt_ident_64(u64);
691 
692 #define paravirt_nop    ((void *)_paravirt_nop)
693 
694 /* These all sit in the .parainstructions section to tell us what to patch. */
695 struct paravirt_patch_site {
696         u8 *instr;              /* original instructions */
697         u8 instrtype;           /* type of this instruction */
698         u8 len;                 /* length of original instruction */
699         u16 clobbers;           /* what registers you may clobber */
700 };
701 
702 extern struct paravirt_patch_site __parainstructions[],
703         __parainstructions_end[];
704 
705 #endif  /* __ASSEMBLY__ */
706 
707 #endif  /* _ASM_X86_PARAVIRT_TYPES_H */
708 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp