|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] [v2] xen: Add FS and GS base to HVM VCPU context
On Tue, Apr 24, 2012 at 10:33 AM, Aravindh Puthiyaparambil
<aravindh@xxxxxxxxxxxx> wrote:
> On Tue, Apr 24, 2012 at 12:52 AM, Jan Beulich <JBeulich@xxxxxxxx> wrote:
>>>>> On 24.04.12 at 01:16, Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>
>>>>> wrote:
>>> Add FS and GS base to the HVM VCPU context returned by xc_vcpu_getcontext()
>>>
>>> Signed-off-by: Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>
>>>
>>> diff -r 6ef297a3761f -r f7a1633867bf xen/arch/x86/domctl.c
>>> --- a/xen/arch/x86/domctl.c Mon Apr 23 15:16:34 2012 -0700
>>> +++ b/xen/arch/x86/domctl.c Mon Apr 23 16:12:50 2012 -0700
>>> @@ -1590,8 +1590,17 @@ void arch_get_info_guest(struct vcpu *v,
>>> c.nat->user_regs.es = sreg.sel;
>>> hvm_get_segment_register(v, x86_seg_fs, &sreg);
>>> c.nat->user_regs.fs = sreg.sel;
>>> +#ifdef __x86_64__
>>> + c.nat->fs_base = sreg.base;
>>> +#endif
>>> hvm_get_segment_register(v, x86_seg_gs, &sreg);
>>> c.nat->user_regs.gs = sreg.sel;
>>> +#ifdef __x86_64__
>>> + if ( ring_0(&c.nat->user_regs) )
>>> + c.nat->gs_base_kernel = sreg.base;
>>> + else
>>> + c.nat->gs_base_user = sreg.base;
>>> +#endif
>>
>> Which still leaves one of gs_base_* unfilled in all cases. You'll need
>> to get ahold of vmcb->kerngsbase (AMD/SVM) or
>> v->arch.hvm_vmx.shadow_gs (Intel/VMX). You could either
>> introduce a new wrapper, or expose {svm,vmx}_save_cpu_state()
>> as a new function table entry, or pay the price of calling
>> ->save_cpu_ctxt(). But you will need to pay extra attention to
>> the case of the subject vCPU being current.
>
> OK, I will try introducing a new wrapper that will return
> vmcb->kerngsbase / v->arch.hvm_vmx.shadow_gs.
Jan,
How does this look?
PS: Come submission time, I will send it out as two separate patches.
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1585,18 +1585,33 @@ void arch_get_info_guest(struct vcpu *v,
hvm_get_segment_register(v, x86_seg_ss, &sreg);
c.nat->user_regs.ss = sreg.sel;
hvm_get_segment_register(v, x86_seg_ds, &sreg);
c.nat->user_regs.ds = sreg.sel;
hvm_get_segment_register(v, x86_seg_es, &sreg);
c.nat->user_regs.es = sreg.sel;
hvm_get_segment_register(v, x86_seg_fs, &sreg);
c.nat->user_regs.fs = sreg.sel;
+#ifdef __x86_64__
+ c.nat->fs_base = sreg.base;
+#endif
hvm_get_segment_register(v, x86_seg_gs, &sreg);
c.nat->user_regs.gs = sreg.sel;
+#ifdef __x86_64__
+ if ( ring_0(&c.nat->user_regs) )
+ {
+ c.nat->gs_base_kernel = sreg.base;
+ c.nat->gs_base_user = hvm_get_shadow_gs_base(v);
+ }
+ else
+ {
+ c.nat->gs_base_user = sreg.base;
+ c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v);
+ }
+#endif
}
else
{
c(ldt_base = v->arch.pv_vcpu.ldt_base);
c(ldt_ents = v->arch.pv_vcpu.ldt_ents);
for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames); ++i )
c(gdt_frames[i] = v->arch.pv_vcpu.gdt_frames[i]);
#ifdef CONFIG_COMPAT
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -640,16 +640,25 @@ static void svm_set_segment_register(str
default:
BUG();
}
if ( sync )
svm_vmload(vmcb);
}
+#ifdef __x86_64__
+static unsigned long svm_get_shadow_gs_base(struct vcpu *v)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ return vmcb->kerngsbase;
+}
+#endif
+
static int svm_set_guest_pat(struct vcpu *v, u64 gpat)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
if ( !paging_mode_hap(v->domain) )
return 0;
vmcb_set_g_pat(vmcb, gpat);
@@ -1978,16 +1987,17 @@ static struct hvm_function_table __read_
.vcpu_destroy = svm_vcpu_destroy,
.save_cpu_ctxt = svm_save_vmcb_ctxt,
.load_cpu_ctxt = svm_load_vmcb_ctxt,
.get_interrupt_shadow = svm_get_interrupt_shadow,
.set_interrupt_shadow = svm_set_interrupt_shadow,
.guest_x86_mode = svm_guest_x86_mode,
.get_segment_register = svm_get_segment_register,
.set_segment_register = svm_set_segment_register,
+ .get_shadow_gs_base = svm_get_shadow_gs_base,
.update_host_cr3 = svm_update_host_cr3,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
.set_guest_pat = svm_set_guest_pat,
.get_guest_pat = svm_get_guest_pat,
.set_tsc_offset = svm_set_tsc_offset,
.inject_exception = svm_inject_exception,
.init_hypercall_page = svm_init_hypercall_page,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -937,16 +937,23 @@ static void vmx_set_segment_register(str
break;
default:
BUG();
}
vmx_vmcs_exit(v);
}
+#ifdef __x86_64__
+static unsigned long vmx_get_shadow_gs_base(struct vcpu *v)
+{
+ return v->arch.hvm_vmx.shadow_gs;
+}
+#endif
+
static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
{
if ( !cpu_has_vmx_pat || !paging_mode_hap(v->domain) )
return 0;
vmx_vmcs_enter(v);
__vmwrite(GUEST_PAT, gpat);
#ifdef __i386__
@@ -1517,16 +1524,17 @@ static struct hvm_function_table __read_
.vcpu_destroy = vmx_vcpu_destroy,
.save_cpu_ctxt = vmx_save_vmcs_ctxt,
.load_cpu_ctxt = vmx_load_vmcs_ctxt,
.get_interrupt_shadow = vmx_get_interrupt_shadow,
.set_interrupt_shadow = vmx_set_interrupt_shadow,
.guest_x86_mode = vmx_guest_x86_mode,
.get_segment_register = vmx_get_segment_register,
.set_segment_register = vmx_set_segment_register,
+ .get_shadow_gs_base = vmx_get_shadow_gs_base,
.update_host_cr3 = vmx_update_host_cr3,
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
.set_guest_pat = vmx_set_guest_pat,
.get_guest_pat = vmx_get_guest_pat,
.set_tsc_offset = vmx_set_tsc_offset,
.inject_exception = vmx_inject_exception,
.init_hypercall_page = vmx_init_hypercall_page,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -101,16 +101,19 @@ struct hvm_function_table {
/* Examine specifics of the guest state. */
unsigned int (*get_interrupt_shadow)(struct vcpu *v);
void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
int (*guest_x86_mode)(struct vcpu *v);
void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg);
void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg);
+#ifdef __x86_64__
+ unsigned long (*get_shadow_gs_base)(struct vcpu *v);
+#endif
/*
* Re-set the value of CR3 that Xen runs on when handling VM exits.
*/
void (*update_host_cr3)(struct vcpu *v);
/*
* Called to inform HVM layer that a guest CRn or EFER has changed.
@@ -300,16 +303,23 @@ hvm_get_segment_register(struct vcpu *v,
static inline void
hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg)
{
hvm_funcs.set_segment_register(v, seg, reg);
}
+#ifdef __x86_64__
+static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
+{
+ return hvm_funcs.get_shadow_gs_base(v);
+}
+#endif
+
#define is_viridian_domain(_d) \
(is_hvm_domain(_d) && ((_d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN]))
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void hvm_migrate_timers(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
void hvm_migrate_pirqs(struct vcpu *v);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |