Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
#ifdef CONFIG_X86_PAE
/* Special-case pte-setting operations for PAE, which can't update a
64-bit pte atomically */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
pte.pte, pte.pte >> 32);
}
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
/* 5 arg words */
pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
}
#else /* !CONFIG_X86_PAE */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
set_pte(ptep, pte);
}
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte(ptep, pte);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
set_pte_at(mm, addr, ptep, __pte(0));
}
#endif /* CONFIG_X86_PAE */
#if PAGETABLE_LEVELS >= 3
static inline pmd_t __pmd(pmdval_t val)
Jeremy Fitzhardinge
committed
{
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
val);
return (pmd_t) { ret };
Jeremy Fitzhardinge
committed
}
static inline pmdval_t pmd_val(pmd_t pmd)
Jeremy Fitzhardinge
committed
{
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd, (u64)pmd.pmd >> 32);
else
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd);
return ret;
Jeremy Fitzhardinge
committed
}
#endif /* PAGETABLE_LEVELS >= 3 */
#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge
committed
static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp,
pmdval.pmd, pmdval.pmd >> 32);
Jeremy Fitzhardinge
committed
}
static inline void set_pud(pud_t *pudp, pud_t pudval)
{
PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
pudval.pgd.pgd, pudval.pgd.pgd >> 32);
}
static inline void pmd_clear(pmd_t *pmdp)
{
PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
Jeremy Fitzhardinge
committed
}
#else /* !CONFIG_X86_PAE */
Jeremy Fitzhardinge
committed
static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd);
Jeremy Fitzhardinge
committed
}
static inline void pmd_clear(pmd_t *pmdp)
{
set_pmd(pmdp, __pmd(0));
}
Jeremy Fitzhardinge
committed
#endif /* CONFIG_X86_PAE */
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
PARAVIRT_LAZY_NONE,
PARAVIRT_LAZY_MMU,
PARAVIRT_LAZY_CPU,
};
enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
void paravirt_enter_lazy_cpu(void);
void paravirt_leave_lazy_cpu(void);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
Jeremy Fitzhardinge
committed
static inline void arch_enter_lazy_cpu_mode(void)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
Jeremy Fitzhardinge
committed
}
static inline void arch_leave_lazy_cpu_mode(void)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
Jeremy Fitzhardinge
committed
}
static inline void arch_flush_lazy_cpu_mode(void)
{
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
arch_leave_lazy_cpu_mode();
arch_enter_lazy_cpu_mode();
}
Jeremy Fitzhardinge
committed
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
Jeremy Fitzhardinge
committed
static inline void arch_enter_lazy_mmu_mode(void)
{
PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
Jeremy Fitzhardinge
committed
}
static inline void arch_leave_lazy_mmu_mode(void)
{
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
Jeremy Fitzhardinge
committed
}
static inline void arch_flush_lazy_mmu_mode(void)
{
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
Jeremy Fitzhardinge
committed
}
Jeremy Fitzhardinge
committed
void _paravirt_nop(void);
#define paravirt_nop ((void *)_paravirt_nop)
/* These all sit in the .parainstructions section to tell us what to patch. */
Jeremy Fitzhardinge
committed
struct paravirt_patch_site {
u8 *instr; /* original instructions */
u8 instrtype; /* type of this instruction */
u8 len; /* length of original instruction */
u16 clobbers; /* what registers you may clobber */
};
Jeremy Fitzhardinge
committed
extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
#ifdef CONFIG_X86_32
#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
#define PV_FLAGS_ARG "0"
#define PV_EXTRA_CLOBBERS
#define PV_VEXTRA_CLOBBERS
#else
/* We save some registers, but all of them, that's too much. We clobber all
* caller saved registers but the argument parameter */
#define PV_SAVE_REGS "pushq %%rdi;"
#define PV_RESTORE_REGS "popq %%rdi;"
#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx"
#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx"
#define PV_FLAGS_ARG "D"
#endif
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long f;
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_VEXTRA_CLOBBERS);
return f;
}
static inline void raw_local_irq_restore(unsigned long f)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_EXTRA_CLOBBERS);
}
static inline void raw_local_irq_disable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
:
: paravirt_type(pv_irq_ops.irq_disable),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
}
static inline void raw_local_irq_enable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
:
: paravirt_type(pv_irq_ops.irq_enable),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
}
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long f;
Jeremy Fitzhardinge
committed
f = __raw_local_save_flags();
raw_local_irq_disable();
return f;
}
/* Make sure as little as possible of this mess escapes. */
Jeremy Fitzhardinge
committed
#undef PARAVIRT_CALL
#undef __PVOP_CALL
#undef __PVOP_VCALL
Jeremy Fitzhardinge
committed
#undef PVOP_VCALL0
#undef PVOP_CALL0
#undef PVOP_VCALL1
#undef PVOP_CALL1
#undef PVOP_VCALL2
#undef PVOP_CALL2
#undef PVOP_VCALL3
#undef PVOP_CALL3
#undef PVOP_VCALL4
#undef PVOP_CALL4
#else /* __ASSEMBLY__ */
#define _PVSITE(ptype, clobbers, ops, word, algn) \
771:; \
ops; \
772:; \
.pushsection .parainstructions,"a"; \
.align algn; \
word 771b; \
.byte ptype; \
.byte 772b-771b; \
.short clobbers; \
.popsection
#ifdef CONFIG_X86_64
#define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx
#define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
#else
#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
#endif
#define INTERRUPT_RETURN \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
jmp *%cs:pv_cpu_ops+PV_CPU_iret)
Jeremy Fitzhardinge
committed
#define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
PV_SAVE_REGS; \
call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \
PV_RESTORE_REGS;) \
Jeremy Fitzhardinge
committed
#define ENABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
PV_SAVE_REGS; \
call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \
PV_RESTORE_REGS;)
Jeremy Fitzhardinge
committed
#define ENABLE_INTERRUPTS_SYSCALL_RET \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\
CLBR_NONE, \
jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret)
#ifdef CONFIG_X86_32
#define GET_CR0_INTO_EAX \
Jeremy Fitzhardinge
committed
push %ecx; push %edx; \
call *pv_cpu_ops+PV_CPU_read_cr0; \
Jeremy Fitzhardinge
committed
pop %edx; pop %ecx
#define SWAPGS \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
PV_SAVE_REGS; \
call *pv_cpu_ops+PV_CPU_swapgs; \
PV_RESTORE_REGS \
)
#define GET_CR2_INTO_RCX \
call *pv_mmu_ops+PV_MMU_read_cr2; \
movq %rax, %rcx; \
xorq %rax, %rax;
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */
#endif /* __ASM_PARAVIRT_H */