Newer
Older
{
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
pte.pte, (u64)pte.pte >> 32);
else
ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
pte.pte);
return ret;
}
static inline pgd_t __pgd(pgdval_t val)
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
val);
return (pgd_t) { ret };
}
static inline pgdval_t pgd_val(pgd_t pgd)
{
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd, (u64)pgd.pgd >> 32);
else
ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd);
return ret;
Jeremy Fitzhardinge
committed
}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
if (sizeof(pteval_t) > sizeof(long))
PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
pte.pte, (u64)pte.pte >> 32);
else
PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
pte.pte);
}
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (sizeof(pteval_t) > sizeof(long))
/* 5 arg words */
pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
else
PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
}
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
pmdval_t val = native_pmd_val(pmd);
if (sizeof(pmdval_t) > sizeof(long))
PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
else
PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
#if PAGETABLE_LEVELS >= 3
static inline pmd_t __pmd(pmdval_t val)
{
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
val);
return (pmd_t) { ret };
}
static inline pmdval_t pmd_val(pmd_t pmd)
{
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd, (u64)pmd.pmd >> 32);
else
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd);
return ret;
}
static inline void set_pud(pud_t *pudp, pud_t pud)
{
pudval_t val = native_pud_val(pud);
if (sizeof(pudval_t) > sizeof(long))
PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
val, (u64)val >> 32);
else
PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
val);
}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
#if PAGETABLE_LEVELS == 4
static inline pud_t __pud(pudval_t val)
{
pudval_t ret;
if (sizeof(pudval_t) > sizeof(long))
ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
val);
return (pud_t) { ret };
}
static inline pudval_t pud_val(pud_t pud)
{
pudval_t ret;
if (sizeof(pudval_t) > sizeof(long))
ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
pud.pud, (u64)pud.pud >> 32);
else
ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
pud.pud);
return ret;
}
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
pgdval_t val = native_pgd_val(pgd);
if (sizeof(pgdval_t) > sizeof(long))
PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
val, (u64)val >> 32);
else
PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
val);
}
static inline void pgd_clear(pgd_t *pgdp)
{
set_pgd(pgdp, __pgd(0));
}
static inline void pud_clear(pud_t *pudp)
{
set_pud(pudp, __pud(0));
}
#endif /* PAGETABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
#ifdef CONFIG_X86_PAE
/* Special-case pte-setting operations for PAE, which can't update a
64-bit pte atomically */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
pte.pte, pte.pte >> 32);
}
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
/* 5 arg words */
pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
}
static inline void pmd_clear(pmd_t *pmdp)
{
PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
}
#else /* !CONFIG_X86_PAE */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
set_pte(ptep, pte);
}
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte(ptep, pte);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
set_pte_at(mm, addr, ptep, __pte(0));
}
static inline void pmd_clear(pmd_t *pmdp)
{
set_pmd(pmdp, __pmd(0));
}
#endif /* CONFIG_X86_PAE */
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
PARAVIRT_LAZY_NONE,
PARAVIRT_LAZY_MMU,
PARAVIRT_LAZY_CPU,
};
enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
void paravirt_enter_lazy_cpu(void);
void paravirt_leave_lazy_cpu(void);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
Jeremy Fitzhardinge
committed
static inline void arch_enter_lazy_cpu_mode(void)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
Jeremy Fitzhardinge
committed
}
static inline void arch_leave_lazy_cpu_mode(void)
{
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
Jeremy Fitzhardinge
committed
}
static inline void arch_flush_lazy_cpu_mode(void)
{
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
arch_leave_lazy_cpu_mode();
arch_enter_lazy_cpu_mode();
}
Jeremy Fitzhardinge
committed
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
Jeremy Fitzhardinge
committed
static inline void arch_enter_lazy_mmu_mode(void)
{
PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
Jeremy Fitzhardinge
committed
}
static inline void arch_leave_lazy_mmu_mode(void)
{
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
Jeremy Fitzhardinge
committed
}
static inline void arch_flush_lazy_mmu_mode(void)
{
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
Jeremy Fitzhardinge
committed
}
Jeremy Fitzhardinge
committed
void _paravirt_nop(void);
#define paravirt_nop ((void *)_paravirt_nop)
/* These all sit in the .parainstructions section to tell us what to patch. */
Jeremy Fitzhardinge
committed
struct paravirt_patch_site {
u8 *instr; /* original instructions */
u8 instrtype; /* type of this instruction */
u8 len; /* length of original instruction */
u16 clobbers; /* what registers you may clobber */
};
Jeremy Fitzhardinge
committed
extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
#ifdef CONFIG_X86_32
#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
#define PV_FLAGS_ARG "0"
#define PV_EXTRA_CLOBBERS
#define PV_VEXTRA_CLOBBERS
#else
/* We save some registers, but all of them, that's too much. We clobber all
* caller saved registers but the argument parameter */
#define PV_SAVE_REGS "pushq %%rdi;"
#define PV_RESTORE_REGS "popq %%rdi;"
#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx"
#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx"
#define PV_FLAGS_ARG "D"
#endif
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long f;
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_VEXTRA_CLOBBERS);
return f;
}
static inline void raw_local_irq_restore(unsigned long f)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_EXTRA_CLOBBERS);
}
static inline void raw_local_irq_disable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
:
: paravirt_type(pv_irq_ops.irq_disable),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
}
static inline void raw_local_irq_enable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
Jeremy Fitzhardinge
committed
PARAVIRT_CALL
PV_RESTORE_REGS)
Jeremy Fitzhardinge
committed
:
: paravirt_type(pv_irq_ops.irq_enable),
Jeremy Fitzhardinge
committed
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
}
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long f;
Jeremy Fitzhardinge
committed
f = __raw_local_save_flags();
raw_local_irq_disable();
return f;
}
/* Make sure as little as possible of this mess escapes. */
Jeremy Fitzhardinge
committed
#undef PARAVIRT_CALL
#undef __PVOP_CALL
#undef __PVOP_VCALL
Jeremy Fitzhardinge
committed
#undef PVOP_VCALL0
#undef PVOP_CALL0
#undef PVOP_VCALL1
#undef PVOP_CALL1
#undef PVOP_VCALL2
#undef PVOP_CALL2
#undef PVOP_VCALL3
#undef PVOP_CALL3
#undef PVOP_VCALL4
#undef PVOP_CALL4
#else /* __ASSEMBLY__ */
#define _PVSITE(ptype, clobbers, ops, word, algn) \
771:; \
ops; \
772:; \
.pushsection .parainstructions,"a"; \
.align algn; \
word 771b; \
.byte ptype; \
.byte 772b-771b; \
.short clobbers; \
.popsection
#ifdef CONFIG_X86_64
#define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx
#define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
#else
#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
#endif
#define INTERRUPT_RETURN \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
jmp *%cs:pv_cpu_ops+PV_CPU_iret)
Jeremy Fitzhardinge
committed
#define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
PV_SAVE_REGS; \
call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \
PV_RESTORE_REGS;) \
Jeremy Fitzhardinge
committed
#define ENABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
PV_SAVE_REGS; \
call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \
PV_RESTORE_REGS;)
Jeremy Fitzhardinge
committed
#define ENABLE_INTERRUPTS_SYSCALL_RET \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\
CLBR_NONE, \
jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret)
#ifdef CONFIG_X86_32
#define GET_CR0_INTO_EAX \
Jeremy Fitzhardinge
committed
push %ecx; push %edx; \
call *pv_cpu_ops+PV_CPU_read_cr0; \
Jeremy Fitzhardinge
committed
pop %edx; pop %ecx
#define SWAPGS \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
PV_SAVE_REGS; \
call *pv_cpu_ops+PV_CPU_swapgs; \
PV_RESTORE_REGS \
)
#define GET_CR2_INTO_RCX \
call *pv_mmu_ops+PV_MMU_read_cr2; \
movq %rax, %rcx; \
xorq %rax, %rax;
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */
#endif /* __ASM_PARAVIRT_H */