|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 43/44] x86/smp: Use the percpu TSS mapping
Construction of the TSS is the final action remaining in load_system_tables(),
and is lifted to early_switch_to_idle(). As a single global TSS is in use,
the per_cpu init_tss variable is dropped.
The setting of HOST_TR_BASE is now a constant, so moves to construct_vmcs().
This means that vmx_set_host_env() and arch_vmx_struct.hostenv_migrated can be
dropped as well.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/cpu/common.c | 66 --------------------------------------
xen/arch/x86/hvm/vmx/vmcs.c | 22 +------------
xen/arch/x86/setup.c | 22 ++++++++++---
xen/arch/x86/smpboot.c | 6 ++--
xen/arch/x86/traps.c | 7 ++--
xen/include/asm-x86/hvm/vmx/vmcs.h | 1 -
xen/include/asm-x86/processor.h | 2 --
7 files changed, 23 insertions(+), 103 deletions(-)
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 262eccc..579d149 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -642,72 +642,6 @@ void __init early_cpu_init(void)
}
/*
- * Sets up system tables and descriptors.
- *
- * - Sets up TSS with stack pointers, including ISTs
- * - Inserts TSS selector into regular and compat GDTs
- * - Loads GDT, IDT, TR then null LDT
- * - Sets up IST references in the IDT
- */
-void load_system_tables(void)
-{
- unsigned long stack_bottom = get_stack_bottom(),
- stack_top = stack_bottom & ~(STACK_SIZE - 1);
-
- struct tss_struct *tss = &this_cpu(init_tss);
- struct desc_struct *gdt =
- this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY;
- struct desc_struct *compat_gdt =
- this_cpu(compat_gdt_table) - FIRST_RESERVED_GDT_ENTRY;
-
- *tss = (struct tss_struct){
- /* Main stack for interrupts/exceptions. */
- .rsp0 = stack_bottom,
-
- /* Ring 1 and 2 stacks poisoned. */
- .rsp1 = 0x8600111111111111ul,
- .rsp2 = 0x8600111111111111ul,
-
- /*
- * MCE, NMI and Double Fault handlers get their own stacks.
- * All others poisoned.
- */
- .ist = {
- [IST_MCE - 1] = stack_top + IST_MCE * PAGE_SIZE,
- [IST_DF - 1] = stack_top + IST_DF * PAGE_SIZE,
- [IST_NMI - 1] = stack_top + IST_NMI * PAGE_SIZE,
-
- [IST_MAX ... ARRAY_SIZE(tss->ist) - 1] =
- 0x8600111111111111ul,
- },
-
- .bitmap = IOBMP_INVALID_OFFSET,
- };
-
- _set_tssldt_desc(
- gdt + TSS_ENTRY,
- (unsigned long)tss,
- offsetof(struct tss_struct, __cacheline_filler) - 1,
- SYS_DESC_tss_avail);
- _set_tssldt_desc(
- compat_gdt + TSS_ENTRY,
- (unsigned long)tss,
- offsetof(struct tss_struct, __cacheline_filler) - 1,
- SYS_DESC_tss_busy);
-
- ltr(TSS_ENTRY << 3);
-
- /*
- * Bottom-of-stack must be 16-byte aligned!
- *
- * Defer checks until exception support is sufficiently set up.
- */
- BUILD_BUG_ON((sizeof(struct cpu_info) -
- offsetof(struct cpu_info, guest_cpu_user_regs.es)) & 0xf);
- BUG_ON(system_state != SYS_STATE_early_boot && (stack_bottom & 0xf));
-}
-
-/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
* and IDT. We reload them nevertheless, this function acts as a
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 483f72d..93d979e 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -798,14 +798,6 @@ void vmx_vmcs_exit(struct vcpu *v)
}
}
-static void vmx_set_host_env(struct vcpu *v)
-{
- unsigned int cpu = smp_processor_id();
-
- __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(init_tss, cpu));
-
-}
-
void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
enum vmx_msr_intercept_type type)
{
@@ -898,12 +890,6 @@ void vmx_vmcs_switch(paddr_t from, paddr_t to)
vmx->launched = 0;
this_cpu(current_vmcs) = to;
- if ( vmx->hostenv_migrated )
- {
- vmx->hostenv_migrated = 0;
- vmx_set_host_env(current);
- }
-
spin_unlock(&vmx->vmcs_lock);
}
@@ -1123,6 +1109,7 @@ static int construct_vmcs(struct vcpu *v)
/* Host system tables. */
__vmwrite(HOST_IDTR_BASE, PERCPU_IDT_MAPPING);
__vmwrite(HOST_GDTR_BASE, PERCPU_GDT_MAPPING);
+ __vmwrite(HOST_TR_BASE, PERCPU_TSS_MAPPING);
/* Host data selectors. */
__vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
@@ -1701,13 +1688,6 @@ void vmx_do_resume(struct vcpu *v)
vmx_load_vmcs(v);
hvm_migrate_timers(v);
hvm_migrate_pirqs(v);
- vmx_set_host_env(v);
- /*
- * Both n1 VMCS and n2 VMCS need to update the host environment after
- * VCPU migration. The environment of current VMCS is updated in place,
- * but the action of another VMCS is deferred till it is switched in.
- */
- v->arch.hvm_vmx.hostenv_migrated = 1;
hvm_asid_flush_vcpu(v);
}
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index c0f7289..3458ea6 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -90,8 +90,6 @@ unsigned long __read_mostly xen_phys_start;
unsigned long __read_mostly xen_virt_end;
-DEFINE_PER_CPU(struct tss_struct, init_tss);
-
char __section(".bss.stack_aligned") __aligned(STACK_SIZE)
cpu0_stack[STACK_SIZE];
@@ -258,6 +256,10 @@ void early_switch_to_idle(bool bsp)
.base = PERCPU_IDT_MAPPING,
.limit = 0xffff,
};
+ struct desc_struct *gdt =
+ this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY;
+ struct desc_struct *compat_gdt =
+ this_cpu(compat_gdt_table) - FIRST_RESERVED_GDT_ENTRY;
set_current(v);
per_cpu(curr_vcpu, cpu) = v;
@@ -267,8 +269,20 @@ void early_switch_to_idle(bool bsp)
per_cpu(curr_ptbase, cpu) = v->arch.cr3;
per_cpu(curr_extended_directmap, cpu) = true;
+ _set_tssldt_desc(
+ gdt + TSS_ENTRY,
+ (unsigned long)&global_tss,
+ offsetof(struct tss_struct, __cacheline_filler) - 1,
+ SYS_DESC_tss_avail);
+ _set_tssldt_desc(
+ compat_gdt + TSS_ENTRY,
+ (unsigned long)&global_tss,
+ offsetof(struct tss_struct, __cacheline_filler) - 1,
+ SYS_DESC_tss_busy);
+
lgdt(&gdtr);
lidt(&idtr);
+ ltr(TSS_ENTRY << 3);
lldt(0);
if ( likely(!bsp) ) /* BSP IST setup deferred. */
@@ -654,9 +668,7 @@ static void __init noreturn reinit_bsp_stack(void)
/* Sanity check that IST settings weren't set up before this point. */
ASSERT(MASK_EXTR(idt_tables[0][TRAP_nmi].a, 7UL << 32) == 0);
- /* Update TSS and ISTs */
- load_system_tables();
-
+ /* Enable BSP ISTs now we've switched stack. */
enable_each_ist(idt_tables[0]);
/* Update SYSCALL trampolines */
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index fa99e4d..69767e2 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -310,6 +310,8 @@ void start_secondary(void *unused)
early_switch_to_idle(false);
+ /* Full exception support from here on in. */
+
rdmsrl(MSR_EFER, this_cpu(efer));
/*
@@ -330,10 +332,6 @@ void start_secondary(void *unused)
*/
spin_debug_disable();
- load_system_tables();
-
- /* Full exception support from here on in. */
-
/* Safe to enable feature such as CR4.MCE with the IDT set up now. */
write_cr4(mmu_cr4_features);
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 0ab10ba..6b02a5f 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -551,7 +551,7 @@ void show_stack_overflow(unsigned int cpu, const struct
cpu_user_regs *regs)
printk("Valid stack range: %p-%p, sp=%p, tss.rsp0=%p\n",
(void *)esp_top, (void *)esp_bottom, (void *)esp,
- (void *)per_cpu(init_tss, cpu).rsp0);
+ (void *)global_tss.rsp0);
/*
* Trigger overflow trace if %esp is anywhere within the guard page, or
@@ -1804,7 +1804,6 @@ static void __init set_intr_gate(unsigned int n, void
*addr)
void load_TR(void)
{
- struct tss_struct *tss = &this_cpu(init_tss);
struct desc_ptr old_gdt, tss_gdt = {
.base = (long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY),
.limit = LAST_RESERVED_GDT_BYTE
@@ -1812,12 +1811,12 @@ void load_TR(void)
_set_tssldt_desc(
this_cpu(gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
- (unsigned long)tss,
+ (unsigned long)&global_tss,
offsetof(struct tss_struct, __cacheline_filler) - 1,
SYS_DESC_tss_avail);
_set_tssldt_desc(
this_cpu(compat_gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
- (unsigned long)tss,
+ (unsigned long)&global_tss,
offsetof(struct tss_struct, __cacheline_filler) - 1,
SYS_DESC_tss_busy);
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 8fb9e3c..c1bd468 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -157,7 +157,6 @@ struct arch_vmx_struct {
struct segment_register vm86_saved_seg[x86_seg_tr + 1];
/* Remember EFLAGS while in virtual 8086 mode */
uint32_t vm86_saved_eflags;
- int hostenv_migrated;
/* Bitmap to control vmexit policy for Non-root VMREAD/VMWRITE */
struct page_info *vmread_bitmap;
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 22882a6..2990afd 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -477,8 +477,6 @@ extern idt_entry_t *idt_tables[];
extern const struct tss_struct global_tss;
-DECLARE_PER_CPU(struct tss_struct, init_tss);
-
extern void init_int80_direct_trap(struct vcpu *v);
extern void do_write_ptbase(struct vcpu *v, bool tlb_maintenance);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |