744 lines
21 KiB
Plaintext
744 lines
21 KiB
Plaintext
From: Alexander Graf <agraf@suse.de>
|
|
Date: Wed, 18 Nov 2009 00:27:59 +0100
|
|
Subject: Split paravirt ops by functionality
|
|
References: bnc#556135, FATE#306453
|
|
Patch-Mainline: Submitted to virtualization list
|
|
|
|
Currently when using paravirt ops it's an all-or-nothing option. We can either
|
|
use pv-ops for CPU, MMU, timing, etc. or not at all.
|
|
|
|
Now there are some use cases where we don't need the full feature set, but only
|
|
a small chunk of it. KVM is a pretty prominent example for this.
|
|
|
|
So let's make everything a bit more fine-grained. We already have a splitting
|
|
by function groups, namely "cpu", "mmu", "time", "irq", "apic" and "spinlock".
|
|
|
|
Taking that existing splitting and extending it to only compile in the PV
|
|
capable bits sounded like a natural fit. That way we don't get performance hits
|
|
in MMU code from using the KVM PV clock which only needs the TIME parts of
|
|
pv-ops.
|
|
|
|
We define a new CONFIG_PARAVIRT_ALL option that basically does the same thing
|
|
the CONFIG_PARAVIRT did before this splitting. We move all users of
|
|
CONFIG_PARAVIRT to CONFIG_PARAVIRT_ALL, so they behave the same way they did
|
|
before.
|
|
|
|
So here it is - the splitting! I would have made the patch smaller, but this
|
|
was the closest I could get to atomic (for bisect) while staying sane.
|
|
|
|
Signed-off-by: Alexander Graf <agraf@suse.de>
|
|
---
|
|
arch/x86/Kconfig | 47 +++++++++++++++++++++++++---
|
|
arch/x86/include/asm/apic.h | 2 -
|
|
arch/x86/include/asm/desc.h | 4 +-
|
|
arch/x86/include/asm/fixmap.h | 2 -
|
|
arch/x86/include/asm/io.h | 2 -
|
|
arch/x86/include/asm/irqflags.h | 21 +++++++++---
|
|
arch/x86/include/asm/mmu_context.h | 4 +-
|
|
arch/x86/include/asm/msr.h | 4 +-
|
|
arch/x86/include/asm/paravirt.h | 44 +++++++++++++++++++++++++-
|
|
arch/x86/include/asm/paravirt_types.h | 12 +++++++
|
|
arch/x86/include/asm/pgalloc.h | 2 -
|
|
arch/x86/include/asm/pgtable-3level_types.h | 2 -
|
|
arch/x86/include/asm/pgtable.h | 2 -
|
|
arch/x86/include/asm/processor.h | 2 -
|
|
arch/x86/include/asm/required-features.h | 2 -
|
|
arch/x86/include/asm/smp.h | 2 -
|
|
arch/x86/include/asm/system.h | 13 +++++--
|
|
arch/x86/include/asm/tlbflush.h | 4 +-
|
|
arch/x86/kernel/head_64.S | 2 -
|
|
arch/x86/kernel/paravirt.c | 2 +
|
|
arch/x86/kernel/tsc.c | 2 -
|
|
arch/x86/kernel/vsmp_64.c | 2 -
|
|
arch/x86/kernel/x8664_ksyms_64.c | 2 -
|
|
arch/x86/xen/Kconfig | 2 -
|
|
24 files changed, 146 insertions(+), 37 deletions(-)
|
|
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -362,7 +362,7 @@ endif
|
|
|
|
config X86_VSMP
|
|
bool "ScaleMP vSMP"
|
|
- select PARAVIRT
|
|
+ select PARAVIRT_ALL
|
|
depends on X86_64 && PCI
|
|
depends on X86_EXTENDED_PLATFORM
|
|
---help---
|
|
@@ -510,7 +510,7 @@ source "arch/x86/xen/Kconfig"
|
|
|
|
config VMI
|
|
bool "VMI Guest support (DEPRECATED)"
|
|
- select PARAVIRT
|
|
+ select PARAVIRT_ALL
|
|
depends on X86_32
|
|
---help---
|
|
VMI provides a paravirtualized interface to the VMware ESX server
|
|
@@ -529,7 +529,6 @@ config VMI
|
|
|
|
config KVM_CLOCK
|
|
bool "KVM paravirtualized clock"
|
|
- select PARAVIRT
|
|
select PARAVIRT_CLOCK
|
|
---help---
|
|
Turning on this option will allow you to run a paravirtualized clock
|
|
@@ -540,7 +539,7 @@ config KVM_CLOCK
|
|
|
|
config KVM_GUEST
|
|
bool "KVM Guest support"
|
|
- select PARAVIRT
|
|
+ select PARAVIRT_ALL
|
|
---help---
|
|
This option enables various optimizations for running under the KVM
|
|
hypervisor.
|
|
@@ -568,8 +567,48 @@ config PARAVIRT_SPINLOCKS
|
|
|
|
If you are unsure how to answer this question, answer N.
|
|
|
|
+config PARAVIRT_CPU
|
|
+ bool
|
|
+ select PARAVIRT
|
|
+ default n
|
|
+
|
|
+config PARAVIRT_TIME
|
|
+ bool
|
|
+ select PARAVIRT
|
|
+ default n
|
|
+
|
|
+config PARAVIRT_IRQ
|
|
+ bool
|
|
+ select PARAVIRT
|
|
+ default n
|
|
+
|
|
+config PARAVIRT_APIC
|
|
+ bool
|
|
+ select PARAVIRT
|
|
+ default n
|
|
+
|
|
+config PARAVIRT_MMU
|
|
+ bool
|
|
+ select PARAVIRT
|
|
+ default n
|
|
+
|
|
+#
|
|
+# This is a placeholder to activate the old "include all pv-ops functionality"
|
|
+# behavior. If you're using this I'd recommend looking through your code to see
|
|
+# if you can be more specific. It probably saves you a few cycles!
|
|
+#
|
|
+config PARAVIRT_ALL
|
|
+ bool
|
|
+ select PARAVIRT_CPU
|
|
+ select PARAVIRT_TIME
|
|
+ select PARAVIRT_IRQ
|
|
+ select PARAVIRT_APIC
|
|
+ select PARAVIRT_MMU
|
|
+ default n
|
|
+
|
|
config PARAVIRT_CLOCK
|
|
bool
|
|
+ select PARAVIRT_TIME
|
|
default n
|
|
|
|
endif
|
|
--- a/arch/x86/include/asm/apic.h
|
|
+++ b/arch/x86/include/asm/apic.h
|
|
@@ -81,7 +81,7 @@ static inline bool apic_from_smp_config(
|
|
/*
|
|
* Basic functions accessing APICs.
|
|
*/
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_APIC
|
|
#include <asm/paravirt.h>
|
|
#endif
|
|
|
|
--- a/arch/x86/include/asm/desc.h
|
|
+++ b/arch/x86/include/asm/desc.h
|
|
@@ -78,7 +78,7 @@ static inline int desc_empty(const void
|
|
return !(desc[0] | desc[1]);
|
|
}
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
#include <asm/paravirt.h>
|
|
#else
|
|
#define load_TR_desc() native_load_tr_desc()
|
|
@@ -108,7 +108,7 @@ static inline void paravirt_alloc_ldt(st
|
|
static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
|
|
{
|
|
}
|
|
-#endif /* CONFIG_PARAVIRT */
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
|
|
|
|
--- a/arch/x86/include/asm/fixmap.h
|
|
+++ b/arch/x86/include/asm/fixmap.h
|
|
@@ -162,7 +162,7 @@ void __native_set_fixmap(enum fixed_addr
|
|
void native_set_fixmap(enum fixed_addresses idx,
|
|
phys_addr_t phys, pgprot_t flags);
|
|
|
|
-#ifndef CONFIG_PARAVIRT
|
|
+#ifndef CONFIG_PARAVIRT_MMU
|
|
static inline void __set_fixmap(enum fixed_addresses idx,
|
|
phys_addr_t phys, pgprot_t flags)
|
|
{
|
|
--- a/arch/x86/include/asm/io.h
|
|
+++ b/arch/x86/include/asm/io.h
|
|
@@ -268,7 +268,7 @@ extern void native_io_delay(void);
|
|
extern int io_delay_type;
|
|
extern void io_delay_init(void);
|
|
|
|
-#if defined(CONFIG_PARAVIRT)
|
|
+#if defined(CONFIG_PARAVIRT_CPU)
|
|
#include <asm/paravirt.h>
|
|
#else
|
|
|
|
--- a/arch/x86/include/asm/irqflags.h
|
|
+++ b/arch/x86/include/asm/irqflags.h
|
|
@@ -58,9 +58,11 @@ static inline void native_halt(void)
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
#include <asm/paravirt.h>
|
|
-#else
|
|
+#endif
|
|
+
|
|
#ifndef __ASSEMBLY__
|
|
|
|
+#ifndef CONFIG_PARAVIRT_IRQ
|
|
static inline unsigned long __raw_local_save_flags(void)
|
|
{
|
|
return native_save_fl();
|
|
@@ -110,12 +112,17 @@ static inline unsigned long __raw_local_
|
|
|
|
return flags;
|
|
}
|
|
-#else
|
|
+#endif /* CONFIG_PARAVIRT_IRQ */
|
|
+
|
|
+#else /* __ASSEMBLY__ */
|
|
|
|
+#ifndef CONFIG_PARAVIRT_IRQ
|
|
#define ENABLE_INTERRUPTS(x) sti
|
|
#define DISABLE_INTERRUPTS(x) cli
|
|
+#endif /* !CONFIG_PARAVIRT_IRQ */
|
|
|
|
#ifdef CONFIG_X86_64
|
|
+#ifndef CONFIG_PARAVIRT_CPU
|
|
#define SWAPGS swapgs
|
|
/*
|
|
* Currently paravirt can't handle swapgs nicely when we
|
|
@@ -128,8 +135,6 @@ static inline unsigned long __raw_local_
|
|
*/
|
|
#define SWAPGS_UNSAFE_STACK swapgs
|
|
|
|
-#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
|
|
-
|
|
#define INTERRUPT_RETURN iretq
|
|
#define USERGS_SYSRET64 \
|
|
swapgs; \
|
|
@@ -141,16 +146,22 @@ static inline unsigned long __raw_local_
|
|
swapgs; \
|
|
sti; \
|
|
sysexit
|
|
+#endif /* !CONFIG_PARAVIRT_CPU */
|
|
+
|
|
+#ifndef CONFIG_PARAVIRT_IRQ
|
|
+#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
|
|
+#endif /* !CONFIG_PARAVIRT_IRQ */
|
|
|
|
#else
|
|
+#ifndef CONFIG_PARAVIRT_CPU
|
|
#define INTERRUPT_RETURN iret
|
|
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
|
|
#define GET_CR0_INTO_EAX movl %cr0, %eax
|
|
+#endif /* !CONFIG_PARAVIRT_CPU */
|
|
#endif
|
|
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
-#endif /* CONFIG_PARAVIRT */
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#define raw_local_save_flags(flags) \
|
|
--- a/arch/x86/include/asm/mmu_context.h
|
|
+++ b/arch/x86/include/asm/mmu_context.h
|
|
@@ -6,14 +6,14 @@
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/paravirt.h>
|
|
-#ifndef CONFIG_PARAVIRT
|
|
+#ifndef CONFIG_PARAVIRT_MMU
|
|
#include <asm-generic/mm_hooks.h>
|
|
|
|
static inline void paravirt_activate_mm(struct mm_struct *prev,
|
|
struct mm_struct *next)
|
|
{
|
|
}
|
|
-#endif /* !CONFIG_PARAVIRT */
|
|
+#endif /* !CONFIG_PARAVIRT_MMU */
|
|
|
|
/*
|
|
* Used for LDT copy/destruction.
|
|
--- a/arch/x86/include/asm/msr.h
|
|
+++ b/arch/x86/include/asm/msr.h
|
|
@@ -135,7 +135,7 @@ static inline unsigned long long native_
|
|
return EAX_EDX_VAL(val, low, high);
|
|
}
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
#include <asm/paravirt.h>
|
|
#else
|
|
#include <linux/errno.h>
|
|
@@ -246,7 +246,7 @@ do {
|
|
|
|
#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
|
|
|
|
-#endif /* !CONFIG_PARAVIRT */
|
|
+#endif /* !CONFIG_PARAVIRT_CPU */
|
|
|
|
|
|
#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
|
|
--- a/arch/x86/include/asm/paravirt.h
|
|
+++ b/arch/x86/include/asm/paravirt.h
|
|
@@ -18,6 +18,7 @@ static inline int paravirt_enabled(void)
|
|
return pv_info.paravirt_enabled;
|
|
}
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
static inline void load_sp0(struct tss_struct *tss,
|
|
struct thread_struct *thread)
|
|
{
|
|
@@ -58,7 +59,9 @@ static inline void write_cr0(unsigned lo
|
|
{
|
|
PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
|
|
}
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
static inline unsigned long read_cr2(void)
|
|
{
|
|
return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
|
|
@@ -78,7 +81,9 @@ static inline void write_cr3(unsigned lo
|
|
{
|
|
PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
|
|
}
|
|
+#endif /* CONFIG_PARAVIRT_MMU */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
static inline unsigned long read_cr4(void)
|
|
{
|
|
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
|
|
@@ -92,8 +97,9 @@ static inline void write_cr4(unsigned lo
|
|
{
|
|
PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
|
|
}
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
-#ifdef CONFIG_X86_64
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_CPU)
|
|
static inline unsigned long read_cr8(void)
|
|
{
|
|
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
|
|
@@ -105,6 +111,7 @@ static inline void write_cr8(unsigned lo
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PARAVIRT_IRQ
|
|
static inline void raw_safe_halt(void)
|
|
{
|
|
PVOP_VCALL0(pv_irq_ops.safe_halt);
|
|
@@ -114,14 +121,18 @@ static inline void halt(void)
|
|
{
|
|
PVOP_VCALL0(pv_irq_ops.safe_halt);
|
|
}
|
|
+#endif /* CONFIG_PARAVIRT_IRQ */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
static inline void wbinvd(void)
|
|
{
|
|
PVOP_VCALL0(pv_cpu_ops.wbinvd);
|
|
}
|
|
+#endif
|
|
|
|
#define get_kernel_rpl() (pv_info.kernel_rpl)
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
static inline u64 paravirt_read_msr(unsigned msr, int *err)
|
|
{
|
|
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
|
|
@@ -224,12 +235,16 @@ do { \
|
|
} while (0)
|
|
|
|
#define rdtscll(val) (val = paravirt_read_tsc())
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_TIME
|
|
static inline unsigned long long paravirt_sched_clock(void)
|
|
{
|
|
return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
|
|
}
|
|
+#endif /* CONFIG_PARAVIRT_TIME */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
static inline unsigned long long paravirt_read_pmc(int counter)
|
|
{
|
|
return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
|
|
@@ -345,8 +360,9 @@ static inline void slow_down_io(void)
|
|
pv_cpu_ops.io_delay();
|
|
#endif
|
|
}
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
-#ifdef CONFIG_SMP
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_APIC)
|
|
static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
|
|
unsigned long start_esp)
|
|
{
|
|
@@ -355,6 +371,7 @@ static inline void startup_ipi_hook(int
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
static inline void paravirt_activate_mm(struct mm_struct *prev,
|
|
struct mm_struct *next)
|
|
{
|
|
@@ -689,7 +706,9 @@ static inline void pmd_clear(pmd_t *pmdp
|
|
set_pmd(pmdp, __pmd(0));
|
|
}
|
|
#endif /* CONFIG_X86_PAE */
|
|
+#endif /* CONFIG_PARAVIRT_MMU */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
#define __HAVE_ARCH_START_CONTEXT_SWITCH
|
|
static inline void arch_start_context_switch(struct task_struct *prev)
|
|
{
|
|
@@ -700,7 +719,9 @@ static inline void arch_end_context_swit
|
|
{
|
|
PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
|
|
}
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
|
static inline void arch_enter_lazy_mmu_mode(void)
|
|
{
|
|
@@ -719,6 +740,7 @@ static inline void __set_fixmap(unsigned
|
|
{
|
|
pv_mmu_ops.set_fixmap(idx, phys, flags);
|
|
}
|
|
+#endif /* CONFIG_PARAVIRT_MMU */
|
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
|
|
|
@@ -829,6 +851,7 @@ static __always_inline void arch_spin_un
|
|
#define __PV_IS_CALLEE_SAVE(func) \
|
|
((struct paravirt_callee_save) { func })
|
|
|
|
+#ifdef CONFIG_PARAVIRT_IRQ
|
|
static inline unsigned long __raw_local_save_flags(void)
|
|
{
|
|
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
|
|
@@ -857,6 +880,7 @@ static inline unsigned long __raw_local_
|
|
raw_local_irq_disable();
|
|
return f;
|
|
}
|
|
+#endif /* CONFIG_PARAVIRT_IRQ */
|
|
|
|
|
|
/* Make sure as little as possible of this mess escapes. */
|
|
@@ -939,10 +963,13 @@ extern void default_banner(void);
|
|
#define PARA_INDIRECT(addr) *%cs:addr
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
#define INTERRUPT_RETURN \
|
|
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
|
|
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_IRQ
|
|
#define DISABLE_INTERRUPTS(clobbers) \
|
|
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
|
|
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
|
|
@@ -954,13 +981,17 @@ extern void default_banner(void);
|
|
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
|
|
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
|
|
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
|
|
+#endif /* CONFIG_PARAVIRT_IRQ */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
#define USERGS_SYSRET32 \
|
|
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
|
|
CLBR_NONE, \
|
|
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
#ifdef CONFIG_X86_32
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
#define GET_CR0_INTO_EAX \
|
|
push %ecx; push %edx; \
|
|
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
|
|
@@ -970,10 +1001,12 @@ extern void default_banner(void);
|
|
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
|
|
CLBR_NONE, \
|
|
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
|
|
#else /* !CONFIG_X86_32 */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
/*
|
|
* If swapgs is used while the userspace stack is still current,
|
|
* there's no way to call a pvop. The PV replacement *must* be
|
|
@@ -993,17 +1026,23 @@ extern void default_banner(void);
|
|
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
|
|
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
|
|
)
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
#define GET_CR2_INTO_RCX \
|
|
call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
|
|
movq %rax, %rcx; \
|
|
xorq %rax, %rax;
|
|
+#endif /* CONFIG_PARAVIRT_MMU */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_IRQ
|
|
#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
|
|
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
|
|
CLBR_NONE, \
|
|
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
|
|
+#endif /* CONFIG_PARAVIRT_IRQ */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
#define USERGS_SYSRET64 \
|
|
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
|
|
CLBR_NONE, \
|
|
@@ -1013,6 +1052,7 @@ extern void default_banner(void);
|
|
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
|
|
CLBR_NONE, \
|
|
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
--- a/arch/x86/include/asm/paravirt_types.h
|
|
+++ b/arch/x86/include/asm/paravirt_types.h
|
|
@@ -339,12 +339,24 @@ struct paravirt_patch_template {
|
|
|
|
extern struct pv_info pv_info;
|
|
extern struct pv_init_ops pv_init_ops;
|
|
+#ifdef CONFIG_PARAVIRT_TIME
|
|
extern struct pv_time_ops pv_time_ops;
|
|
+#endif
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
extern struct pv_cpu_ops pv_cpu_ops;
|
|
+#endif
|
|
+#ifdef CONFIG_PARAVIRT_IRQ
|
|
extern struct pv_irq_ops pv_irq_ops;
|
|
+#endif
|
|
+#ifdef CONFIG_PARAVIRT_APIC
|
|
extern struct pv_apic_ops pv_apic_ops;
|
|
+#endif
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
extern struct pv_mmu_ops pv_mmu_ops;
|
|
+#endif
|
|
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
|
extern struct pv_lock_ops pv_lock_ops;
|
|
+#endif
|
|
|
|
#define PARAVIRT_PATCH(x) \
|
|
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
|
|
--- a/arch/x86/include/asm/pgalloc.h
|
|
+++ b/arch/x86/include/asm/pgalloc.h
|
|
@@ -7,7 +7,7 @@
|
|
|
|
static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
#include <asm/paravirt.h>
|
|
#else
|
|
#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
|
|
--- a/arch/x86/include/asm/pgtable-3level_types.h
|
|
+++ b/arch/x86/include/asm/pgtable-3level_types.h
|
|
@@ -18,7 +18,7 @@ typedef union {
|
|
} pte_t;
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
|
|
#else
|
|
#define SHARED_KERNEL_PMD 1
|
|
--- a/arch/x86/include/asm/pgtable.h
|
|
+++ b/arch/x86/include/asm/pgtable.h
|
|
@@ -28,7 +28,7 @@ extern unsigned long empty_zero_page[PAG
|
|
extern spinlock_t pgd_lock;
|
|
extern struct list_head pgd_list;
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
#include <asm/paravirt.h>
|
|
#else /* !CONFIG_PARAVIRT */
|
|
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
|
|
--- a/arch/x86/include/asm/processor.h
|
|
+++ b/arch/x86/include/asm/processor.h
|
|
@@ -573,7 +573,7 @@ static inline void native_swapgs(void)
|
|
#endif
|
|
}
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
#include <asm/paravirt.h>
|
|
#else
|
|
#define __cpuid native_cpuid
|
|
--- a/arch/x86/include/asm/required-features.h
|
|
+++ b/arch/x86/include/asm/required-features.h
|
|
@@ -48,7 +48,7 @@
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_64
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
/* Paravirtualized systems may not have PSE or PGE available */
|
|
#define NEED_PSE 0
|
|
#define NEED_PGE 0
|
|
--- a/arch/x86/include/asm/smp.h
|
|
+++ b/arch/x86/include/asm/smp.h
|
|
@@ -66,7 +66,7 @@ struct smp_ops {
|
|
extern void set_cpu_sibling_map(int cpu);
|
|
|
|
#ifdef CONFIG_SMP
|
|
-#ifndef CONFIG_PARAVIRT
|
|
+#ifndef CONFIG_PARAVIRT_APIC
|
|
#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
|
|
#endif
|
|
extern struct smp_ops smp_ops;
|
|
--- a/arch/x86/include/asm/system.h
|
|
+++ b/arch/x86/include/asm/system.h
|
|
@@ -304,13 +304,18 @@ static inline void native_wbinvd(void)
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
#include <asm/paravirt.h>
|
|
-#else
|
|
-#define read_cr0() (native_read_cr0())
|
|
-#define write_cr0(x) (native_write_cr0(x))
|
|
+#endif/* CONFIG_PARAVIRT */
|
|
+
|
|
+#ifndef CONFIG_PARAVIRT_MMU
|
|
#define read_cr2() (native_read_cr2())
|
|
#define write_cr2(x) (native_write_cr2(x))
|
|
#define read_cr3() (native_read_cr3())
|
|
#define write_cr3(x) (native_write_cr3(x))
|
|
+#endif /* CONFIG_PARAVIRT_MMU */
|
|
+
|
|
+#ifndef CONFIG_PARAVIRT_CPU
|
|
+#define read_cr0() (native_read_cr0())
|
|
+#define write_cr0(x) (native_write_cr0(x))
|
|
#define read_cr4() (native_read_cr4())
|
|
#define read_cr4_safe() (native_read_cr4_safe())
|
|
#define write_cr4(x) (native_write_cr4(x))
|
|
@@ -324,7 +329,7 @@ static inline void native_wbinvd(void)
|
|
/* Clear the 'TS' bit */
|
|
#define clts() (native_clts())
|
|
|
|
-#endif/* CONFIG_PARAVIRT */
|
|
+#endif /* CONFIG_PARAVIRT_CPU */
|
|
|
|
#define stts() write_cr0(read_cr0() | X86_CR0_TS)
|
|
|
|
--- a/arch/x86/include/asm/tlbflush.h
|
|
+++ b/arch/x86/include/asm/tlbflush.h
|
|
@@ -7,7 +7,7 @@
|
|
#include <asm/processor.h>
|
|
#include <asm/system.h>
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
#include <asm/paravirt.h>
|
|
#else
|
|
#define __flush_tlb() __native_flush_tlb()
|
|
@@ -162,7 +162,7 @@ static inline void reset_lazy_tlbstate(v
|
|
|
|
#endif /* SMP */
|
|
|
|
-#ifndef CONFIG_PARAVIRT
|
|
+#ifndef CONFIG_PARAVIRT_MMU
|
|
#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
|
|
#endif
|
|
|
|
--- a/arch/x86/kernel/head_64.S
|
|
+++ b/arch/x86/kernel/head_64.S
|
|
@@ -20,7 +20,7 @@
|
|
#include <asm/processor-flags.h>
|
|
#include <asm/percpu.h>
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_MMU
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/paravirt.h>
|
|
#else
|
|
--- a/arch/x86/kernel/paravirt.c
|
|
+++ b/arch/x86/kernel/paravirt.c
|
|
@@ -155,12 +155,14 @@ unsigned paravirt_patch_default(u8 type,
|
|
else if (opfunc == _paravirt_ident_64)
|
|
ret = paravirt_patch_ident_64(insnbuf, len);
|
|
|
|
+#ifdef CONFIG_PARAVIRT_CPU
|
|
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
|
|
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
|
|
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
|
|
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
|
|
/* If operation requires a jmp, then jmp */
|
|
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
|
|
+#endif
|
|
else
|
|
/* Otherwise call the function; assume target could
|
|
clobber any caller-save reg */
|
|
--- a/arch/x86/kernel/tsc.c
|
|
+++ b/arch/x86/kernel/tsc.c
|
|
@@ -66,7 +66,7 @@ u64 native_sched_clock(void)
|
|
|
|
/* We need to define a real function for sched_clock, to override the
|
|
weak default version */
|
|
-#ifdef CONFIG_PARAVIRT
|
|
+#ifdef CONFIG_PARAVIRT_TIME
|
|
unsigned long long sched_clock(void)
|
|
{
|
|
return paravirt_sched_clock();
|
|
--- a/arch/x86/kernel/vsmp_64.c
|
|
+++ b/arch/x86/kernel/vsmp_64.c
|
|
@@ -22,7 +22,7 @@
|
|
#include <asm/paravirt.h>
|
|
#include <asm/setup.h>
|
|
|
|
-#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
|
|
+#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_IRQ
|
|
/*
|
|
* Interrupt control on vSMPowered systems:
|
|
* ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
|
|
--- a/arch/x86/kernel/x8664_ksyms_64.c
|
|
+++ b/arch/x86/kernel/x8664_ksyms_64.c
|
|
@@ -55,6 +55,6 @@ EXPORT_SYMBOL(__memcpy);
|
|
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
EXPORT_SYMBOL(init_level4_pgt);
|
|
-#ifndef CONFIG_PARAVIRT
|
|
+#ifndef CONFIG_PARAVIRT_CPU
|
|
EXPORT_SYMBOL(native_load_gs_index);
|
|
#endif
|
|
--- a/arch/x86/xen/Kconfig
|
|
+++ b/arch/x86/xen/Kconfig
|
|
@@ -4,7 +4,7 @@
|
|
|
|
config XEN
|
|
bool "Xen guest support"
|
|
- select PARAVIRT
|
|
+ select PARAVIRT_ALL
|
|
select PARAVIRT_CLOCK
|
|
depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
|
|
depends on X86_CMPXCHG && X86_TSC
|