Newer
Older
PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
Jeremy Fitzhardinge
committed
}
#else /* !CONFIG_X86_PAE */
Jeremy Fitzhardinge
committed
static inline pte_t __pte(unsigned long val)
{
return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) };
Jeremy Fitzhardinge
committed
static inline pgd_t __pgd(unsigned long val)
{
return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) };
Jeremy Fitzhardinge
committed
}
static inline unsigned long pte_val(pte_t x)
{
return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low);
Jeremy Fitzhardinge
committed
}
static inline unsigned long pgd_val(pgd_t x)
{
return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd);
Jeremy Fitzhardinge
committed
}
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low);
Jeremy Fitzhardinge
committed
}
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low);
Jeremy Fitzhardinge
committed
}
static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd);
Jeremy Fitzhardinge
committed
}
#endif /* CONFIG_X86_PAE */
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
PARAVIRT_LAZY_NONE,
PARAVIRT_LAZY_MMU,
PARAVIRT_LAZY_CPU,
};
enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
void paravirt_enter_lazy_cpu(void);
void paravirt_leave_lazy_cpu(void);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
Jeremy Fitzhardinge
committed
static inline void arch_enter_lazy_cpu_mode(void)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
Jeremy Fitzhardinge
committed
}
static inline void arch_leave_lazy_cpu_mode(void)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
Jeremy Fitzhardinge
committed
}
static inline void arch_flush_lazy_cpu_mode(void)
{
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
arch_leave_lazy_cpu_mode();
arch_enter_lazy_cpu_mode();
}
Jeremy Fitzhardinge
committed
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
Jeremy Fitzhardinge
committed
static inline void arch_enter_lazy_mmu_mode(void)
{
PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
Jeremy Fitzhardinge
committed
}
static inline void arch_leave_lazy_mmu_mode(void)
{
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
Jeremy Fitzhardinge
committed
}
static inline void arch_flush_lazy_mmu_mode(void)
{
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
Jeremy Fitzhardinge
committed
}
Jeremy Fitzhardinge
committed
void _paravirt_nop(void);
#define paravirt_nop ((void *)_paravirt_nop)
/* These all sit in the .parainstructions section to tell us what to patch. */
Jeremy Fitzhardinge
committed
struct paravirt_patch_site {
u8 *instr; /* original instructions */
u8 instrtype; /* type of this instruction */
u8 len; /* length of original instruction */
u16 clobbers; /* what registers you may clobber */
};
Jeremy Fitzhardinge
committed
extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
#ifdef CONFIG_X86_32
#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
#define PV_FLAGS_ARG "0"
#define PV_EXTRA_CLOBBERS
#define PV_VEXTRA_CLOBBERS
#else
/* We save some registers, but all of them, that's too much. We clobber all
* caller saved registers but the argument parameter */
#define PV_SAVE_REGS "pushq %%rdi;"
#define PV_RESTORE_REGS "popq %%rdi;"
#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx"
#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx"
#define PV_FLAGS_ARG "D"
#endif
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long f;
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_VEXTRA_CLOBBERS);
return f;
}
static inline void raw_local_irq_restore(unsigned long f)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_EXTRA_CLOBBERS);
}
static inline void raw_local_irq_disable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
:
: paravirt_type(pv_irq_ops.irq_disable),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
}
static inline void raw_local_irq_enable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
:
: paravirt_type(pv_irq_ops.irq_enable),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
}
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long f;
Jeremy Fitzhardinge
committed
f = __raw_local_save_flags();
raw_local_irq_disable();
return f;
}
/* Make sure as little as possible of this mess escapes. */
Jeremy Fitzhardinge
committed
#undef PARAVIRT_CALL
#undef __PVOP_CALL
#undef __PVOP_VCALL
Jeremy Fitzhardinge
committed
#undef PVOP_VCALL0
#undef PVOP_CALL0
#undef PVOP_VCALL1
#undef PVOP_CALL1
#undef PVOP_VCALL2
#undef PVOP_CALL2
#undef PVOP_VCALL3
#undef PVOP_CALL3
#undef PVOP_VCALL4
#undef PVOP_CALL4
#else /* __ASSEMBLY__ */
#define _PVSITE(ptype, clobbers, ops, word, algn) \
771:; \
ops; \
772:; \
.pushsection .parainstructions,"a"; \
.align algn; \
word 771b; \
.byte ptype; \
.byte 772b-771b; \
.short clobbers; \
.popsection
#ifdef CONFIG_X86_64
#define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx
#define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
#else
#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
#endif
#define INTERRUPT_RETURN \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
jmp *%cs:pv_cpu_ops+PV_CPU_iret)
Jeremy Fitzhardinge
committed
#define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
PV_SAVE_REGS; \
call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \
PV_RESTORE_REGS;) \
Jeremy Fitzhardinge
committed
#define ENABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
PV_SAVE_REGS; \
call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \
PV_RESTORE_REGS;)
Jeremy Fitzhardinge
committed
#define ENABLE_INTERRUPTS_SYSCALL_RET \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\
CLBR_NONE, \
jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret)
#ifdef CONFIG_X86_32
#define GET_CR0_INTO_EAX \
Jeremy Fitzhardinge
committed
push %ecx; push %edx; \
call *pv_cpu_ops+PV_CPU_read_cr0; \
Jeremy Fitzhardinge
committed
pop %edx; pop %ecx
#define SWAPGS \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
PV_SAVE_REGS; \
call *pv_cpu_ops+PV_CPU_swapgs; \
PV_RESTORE_REGS \
)
#define GET_CR2_INTO_RCX \
call *pv_mmu_ops+PV_MMU_read_cr2; \
movq %rax, %rcx; \
xorq %rax, %rax;
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */
#endif /* __ASM_PARAVIRT_H */