[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 6/8] x86/pv: Store the data segment selectors outside of cpu_user_regs
In order to support FRED, we're going to have to remove the {ds..gs} fields from struct cpu_user_regs. This will impact v->arch.user_regs. These fields are unused for HVM guests, but for PV hold the selector values when the vCPU is scheduled out. Introduce new fields for the selectors in struct pv_vcpu, and update: * {save,load}_segments(), context switching * arch_{set,set}_info_guest(), hypercalls * vcpu_show_registers(), diagnostics * dom0_construct(), PV dom0 to use the new storage. This removes the final user of read_sregs() so drop it too. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- xen/arch/x86/domain.c | 70 +++++++++++++++---------------- xen/arch/x86/domctl.c | 16 +++---- xen/arch/x86/include/asm/domain.h | 2 + xen/arch/x86/include/asm/regs.h | 8 ---- xen/arch/x86/pv/dom0_build.c | 6 ++- xen/arch/x86/x86_64/traps.c | 8 ++-- 6 files changed, 53 insertions(+), 57 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index bc0816c71495..e9c331be6f63 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1211,10 +1211,10 @@ int arch_set_info_guest( v->arch.user_regs.rflags = c.nat->user_regs.rflags; v->arch.user_regs.rsp = c.nat->user_regs.rsp; v->arch.user_regs.ss = c.nat->user_regs.ss; - v->arch.user_regs.es = c.nat->user_regs.es; - v->arch.user_regs.ds = c.nat->user_regs.ds; - v->arch.user_regs.fs = c.nat->user_regs.fs; - v->arch.user_regs.gs = c.nat->user_regs.gs; + v->arch.pv.es = c.nat->user_regs.es; + v->arch.pv.ds = c.nat->user_regs.ds; + v->arch.pv.fs = c.nat->user_regs.fs; + v->arch.pv.gs = c.nat->user_regs.gs; if ( is_pv_domain(d) ) memcpy(v->arch.pv.trap_ctxt, c.nat->trap_ctxt, @@ -1238,10 +1238,10 @@ int arch_set_info_guest( v->arch.user_regs.eflags = c.cmp->user_regs.eflags; v->arch.user_regs.esp = c.cmp->user_regs.esp; v->arch.user_regs.ss = c.cmp->user_regs.ss; - v->arch.user_regs.es = c.cmp->user_regs.es; - v->arch.user_regs.ds = c.cmp->user_regs.ds; - v->arch.user_regs.fs = c.cmp->user_regs.fs; - v->arch.user_regs.gs = c.cmp->user_regs.gs; + v->arch.pv.es = c.nat->user_regs.es; + v->arch.pv.ds = c.nat->user_regs.ds; + v->arch.pv.fs = c.nat->user_regs.fs; + v->arch.pv.gs = c.nat->user_regs.gs; if ( is_pv_domain(d) ) { @@ -1729,7 +1729,6 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) */ static void load_segments(struct vcpu *n) { - struct cpu_user_regs *uregs = &n->arch.user_regs; unsigned long gsb = 0, gss = 0; bool compat = is_pv_32bit_vcpu(n); bool all_segs_okay = true, fs_gs_done = false; @@ -1762,7 +1761,7 @@ static void load_segments(struct vcpu *n) if ( !(n->arch.flags & TF_kernel_mode) ) SWAP(gsb, gss); - if ( using_svm() && (uregs->fs | uregs->gs) <= 3 ) + if ( using_svm() && (n->arch.pv.fs | n->arch.pv.gs) <= 3 ) fs_gs_done = svm_load_segs(n->arch.pv.ldt_ents, LDT_VIRT_START(n), n->arch.pv.fs_base, gsb, gss); } @@ -1771,12 +1770,12 @@ static void load_segments(struct vcpu *n) { load_LDT(n); - TRY_LOAD_SEG(fs, uregs->fs); - TRY_LOAD_SEG(gs, uregs->gs); + TRY_LOAD_SEG(fs, n->arch.pv.fs); + TRY_LOAD_SEG(gs, n->arch.pv.gs); } - TRY_LOAD_SEG(ds, uregs->ds); - TRY_LOAD_SEG(es, uregs->es); + TRY_LOAD_SEG(ds, n->arch.pv.ds); + TRY_LOAD_SEG(es, n->arch.pv.es); if ( !fs_gs_done && !compat ) { @@ -1829,13 +1828,13 @@ static void load_segments(struct vcpu *n) } if ( ret | - put_guest(rflags, esp - 1) | - put_guest(cs_and_mask, esp - 2) | - put_guest(regs->eip, esp - 3) | - put_guest(uregs->gs, esp - 4) | - put_guest(uregs->fs, esp - 5) | - put_guest(uregs->es, esp - 6) | - put_guest(uregs->ds, esp - 7) ) + put_guest(rflags, esp - 1) | + put_guest(cs_and_mask, esp - 2) | + put_guest(regs->eip, esp - 3) | + put_guest(n->arch.pv.gs, esp - 4) | + put_guest(n->arch.pv.fs, esp - 5) | + put_guest(n->arch.pv.es, esp - 6) | + put_guest(n->arch.pv.ds, esp - 7) ) domain_crash(n->domain, "Error creating compat failsafe callback frame\n"); @@ -1861,17 +1860,17 @@ static void load_segments(struct vcpu *n) cs_and_mask = (unsigned long)regs->cs | ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32); - if ( put_guest(regs->ss, rsp - 1) | - put_guest(regs->rsp, rsp - 2) | - put_guest(rflags, rsp - 3) | - put_guest(cs_and_mask, rsp - 4) | - put_guest(regs->rip, rsp - 5) | - put_guest(uregs->gs, rsp - 6) | - put_guest(uregs->fs, rsp - 7) | - put_guest(uregs->es, rsp - 8) | - put_guest(uregs->ds, rsp - 9) | - put_guest(regs->r11, rsp - 10) | - put_guest(regs->rcx, rsp - 11) ) + if ( put_guest(regs->ss, rsp - 1) | + put_guest(regs->rsp, rsp - 2) | + put_guest(rflags, rsp - 3) | + put_guest(cs_and_mask, rsp - 4) | + put_guest(regs->rip, rsp - 5) | + put_guest(n->arch.pv.gs, rsp - 6) | + put_guest(n->arch.pv.fs, rsp - 7) | + put_guest(n->arch.pv.es, rsp - 8) | + put_guest(n->arch.pv.ds, rsp - 9) | + put_guest(regs->r11, rsp - 10) | + put_guest(regs->rcx, rsp - 11) ) domain_crash(n->domain, "Error creating failsafe callback frame\n"); @@ -1900,9 +1899,10 @@ static void load_segments(struct vcpu *n) */ static void save_segments(struct vcpu *v) { - struct cpu_user_regs *regs = &v->arch.user_regs; - - read_sregs(regs); + asm ( "mov %%ds, %0" : "=m" (v->arch.pv.ds) ); + asm ( "mov %%es, %0" : "=m" (v->arch.pv.es) ); + asm ( "mov %%fs, %0" : "=m" (v->arch.pv.fs) ); + asm ( "mov %%gs, %0" : "=m" (v->arch.pv.gs) ); if ( !is_pv_32bit_vcpu(v) ) { diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 7ab9e9176b58..833fcbd4bbb6 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1414,10 +1414,10 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) c.nat->user_regs.rflags = v->arch.user_regs.rflags; c.nat->user_regs.rsp = v->arch.user_regs.rsp; c.nat->user_regs.ss = v->arch.user_regs.ss; - c.nat->user_regs.es = v->arch.user_regs.es; - c.nat->user_regs.ds = v->arch.user_regs.ds; - c.nat->user_regs.fs = v->arch.user_regs.fs; - c.nat->user_regs.gs = v->arch.user_regs.gs; + c.nat->user_regs.es = v->arch.pv.es; + c.nat->user_regs.ds = v->arch.pv.ds; + c.nat->user_regs.fs = v->arch.pv.fs; + c.nat->user_regs.gs = v->arch.pv.gs; if ( is_pv_domain(d) ) memcpy(c.nat->trap_ctxt, v->arch.pv.trap_ctxt, @@ -1441,10 +1441,10 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) c.cmp->user_regs.eflags = v->arch.user_regs.eflags; c.cmp->user_regs.esp = v->arch.user_regs.esp; c.cmp->user_regs.ss = v->arch.user_regs.ss; - c.cmp->user_regs.es = v->arch.user_regs.es; - c.cmp->user_regs.ds = v->arch.user_regs.ds; - c.cmp->user_regs.fs = v->arch.user_regs.fs; - c.cmp->user_regs.gs = v->arch.user_regs.gs; + c.cmp->user_regs.es = v->arch.pv.es; + c.cmp->user_regs.ds = v->arch.pv.ds; + c.cmp->user_regs.fs = v->arch.pv.fs; + c.cmp->user_regs.gs = v->arch.pv.gs; if ( is_pv_domain(d) ) { diff --git a/xen/arch/x86/include/asm/domain.h b/xen/arch/x86/include/asm/domain.h index 5fc1d1e5d01a..7fa409cb3055 100644 --- a/xen/arch/x86/include/asm/domain.h +++ b/xen/arch/x86/include/asm/domain.h @@ -546,6 +546,8 @@ struct pv_vcpu bool syscall32_disables_events; bool sysenter_disables_events; + uint16_t ds, es, fs, gs; + /* * 64bit segment bases. * diff --git a/xen/arch/x86/include/asm/regs.h b/xen/arch/x86/include/asm/regs.h index 4f2f06b60161..c05b9207c281 100644 --- a/xen/arch/x86/include/asm/regs.h +++ b/xen/arch/x86/include/asm/regs.h @@ -41,12 +41,4 @@ __sel; \ }) -static inline void read_sregs(struct cpu_user_regs *regs) -{ - asm ( "mov %%ds, %0" : "=m" (regs->ds) ); - asm ( "mov %%es, %0" : "=m" (regs->es) ); - asm ( "mov %%fs, %0" : "=m" (regs->fs) ); - asm ( "mov %%gs, %0" : "=m" (regs->gs) ); -} - #endif /* __X86_REGS_H__ */ diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c index 96e28c7b6a77..bcaacc7586c0 100644 --- a/xen/arch/x86/pv/dom0_build.c +++ b/xen/arch/x86/pv/dom0_build.c @@ -1020,8 +1020,10 @@ static int __init dom0_construct(struct boot_info *bi, struct domain *d) * [rAX,rBX,rCX,rDX,rDI,rBP,R8-R15 are zero] */ regs = &v->arch.user_regs; - regs->ds = regs->es = regs->fs = regs->gs = - (compat ? FLAT_COMPAT_KERNEL_DS : FLAT_KERNEL_DS); + v->arch.pv.ds = (compat ? FLAT_COMPAT_KERNEL_DS : FLAT_KERNEL_DS); + v->arch.pv.es = (compat ? FLAT_COMPAT_KERNEL_DS : FLAT_KERNEL_DS); + v->arch.pv.fs = (compat ? FLAT_COMPAT_KERNEL_DS : FLAT_KERNEL_DS); + v->arch.pv.gs = (compat ? FLAT_COMPAT_KERNEL_DS : FLAT_KERNEL_DS); regs->ss = (compat ? FLAT_COMPAT_KERNEL_SS : FLAT_KERNEL_SS); regs->cs = (compat ? FLAT_COMPAT_KERNEL_CS : FLAT_KERNEL_CS); regs->rip = parms.virt_entry; diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c index 23622cdb1440..cb06f99021d1 100644 --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -219,10 +219,10 @@ void vcpu_show_registers(struct vcpu *v) state.gsb = gsb; state.gss = gss; - state.ds = v->arch.user_regs.ds; - state.es = v->arch.user_regs.es; - state.fs = v->arch.user_regs.fs; - state.gs = v->arch.user_regs.gs; + state.ds = v->arch.pv.ds; + state.es = v->arch.pv.es; + state.fs = v->arch.pv.fs; + state.gs = v->arch.pv.gs; context = CTXT_pv_guest; } -- 2.39.5
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |