[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC v12 07/21] pvh: Disable unneeded features of HVM containers
Things kept: * cacheattr_region lists * irq-related structures * paging * tm_list Things disabled for now: * compat xlation Things disabled: * Emulated timers and clock sources * IO/MMIO emulation * msix tables * hvm params * hvm_funcs * nested HVM * Fast-path for emulated lapic accesses Getting rid of the hvm_params struct required a couple other places to check for its existence before attempting to read the params. Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> CC: Jan Beulich <jan.beulich@xxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> CC: Keir Fraser <keir@xxxxxxx> --- xen/arch/x86/hvm/hvm.c | 37 ++++++++++++++++++++++++++++++++++--- xen/arch/x86/hvm/io.c | 4 ++++ xen/arch/x86/hvm/irq.c | 3 +++ xen/arch/x86/hvm/mtrr.c | 3 ++- xen/arch/x86/hvm/vmx/intr.c | 3 ++- xen/arch/x86/hvm/vmx/vmcs.c | 5 +++-- xen/arch/x86/hvm/vmx/vmx.c | 10 ++++++++-- 7 files changed, 56 insertions(+), 9 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 1764b78..6a7a006 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -301,6 +301,10 @@ u64 hvm_get_guest_tsc_adjust(struct vcpu *v) void hvm_migrate_timers(struct vcpu *v) { + /* PVH doesn't use rtc and emulated timers, it uses pvclock mechanism. */ + if ( is_pvh_vcpu(v) ) + return; + rtc_migrate_timers(v); pt_migrate(v); } @@ -342,10 +346,13 @@ void hvm_do_resume(struct vcpu *v) { ioreq_t *p; - pt_restore_timer(v); - check_wakeup_from_wait(); + if ( is_pvh_vcpu(v) ) + goto check_inject_trap; + + pt_restore_timer(v); + /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ p = get_ioreq(v); while ( p->state != STATE_IOREQ_NONE ) @@ -368,6 +375,7 @@ void hvm_do_resume(struct vcpu *v) } } + check_inject_trap: /* Inject pending hw/sw trap */ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) { @@ -521,6 +529,7 @@ int hvm_domain_initialise(struct domain *d) return -EINVAL; } + /* PVH: pbut_lock and uc_lock unused, but won't hurt */ spin_lock_init(&d->arch.hvm_domain.pbuf_lock); spin_lock_init(&d->arch.hvm_domain.irq_lock); spin_lock_init(&d->arch.hvm_domain.uc_lock); @@ -531,6 +540,9 @@ int hvm_domain_initialise(struct domain *d) if ( rc != 0 ) goto fail0; + if ( is_pvh_domain(d) ) + return 0; + INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list); spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock); @@ -584,6 +596,9 @@ int hvm_domain_initialise(struct domain *d) void hvm_domain_relinquish_resources(struct domain *d) { + if ( is_pvh_domain(d) ) + return; + if ( hvm_funcs.nhvm_domain_relinquish_resources ) hvm_funcs.nhvm_domain_relinquish_resources(d); @@ -609,6 +624,10 @@ void hvm_domain_relinquish_resources(struct domain *d) void hvm_domain_destroy(struct domain *d) { hvm_destroy_cacheattr_region_list(d); + + if ( is_pvh_domain(d) ) + return; + hvm_funcs.domain_destroy(d); rtc_deinit(d); stdvga_deinit(d); @@ -1093,6 +1112,14 @@ int hvm_vcpu_initialise(struct vcpu *v) v->arch.hvm_vcpu.inject_trap.vector = -1; + if ( is_pvh_vcpu(v) ) + { + v->arch.hvm_vcpu.hcall_64bit = 1; /* PVH 32bitfixme. */ + /* This for hvm_long_mode_enabled(v). */ + v->arch.hvm_vcpu.guest_efer = EFER_SCE | EFER_LMA | EFER_LME; + return 0; + } + rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */ if ( rc != 0 ) goto fail3; @@ -1168,7 +1195,10 @@ void hvm_vcpu_destroy(struct vcpu *v) tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet); hvm_vcpu_cacheattr_destroy(v); - vlapic_destroy(v); + + if ( is_hvm_vcpu(v) ) + vlapic_destroy(v); + hvm_funcs.vcpu_destroy(v); /* Event channel is already freed by evtchn_destroy(). */ @@ -1369,6 +1399,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs, * a fast path for LAPIC accesses, skipping the p2m lookup. */ if ( !nestedhvm_vcpu_in_guestmode(v) + && is_hvm_vcpu(v) && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) ) { if ( !handle_mmio() ) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 4ae2c0c..3af4b34 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -175,6 +175,10 @@ int handle_mmio(void) struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; int rc; + /* No MMIO for PVH vcpus */ + if ( is_pvh_vcpu(curr) ) + return 0; + hvm_emulate_prepare(&ctxt, guest_cpu_user_regs()); rc = hvm_emulate_one(&ctxt); diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index 9eae5de..92fb245 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -405,6 +405,9 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v) && vcpu_info(v, evtchn_upcall_pending) ) return hvm_intack_vector(plat->irq.callback_via.vector); + if ( is_pvh_vcpu(v) ) + return hvm_intack_none; + if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output ) return hvm_intack_pic(0); diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c index ef51a8d..df888a6 100644 --- a/xen/arch/x86/hvm/mtrr.c +++ b/xen/arch/x86/hvm/mtrr.c @@ -693,7 +693,8 @@ uint8_t epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn, ((d->vcpu == NULL) || ((v = d->vcpu[0]) == NULL)) ) return MTRR_TYPE_WRBACK; - if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] ) + if ( v->domain->arch.hvm_domain.params + && !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] ) return MTRR_TYPE_WRBACK; if ( (v == current) && v->domain->arch.hvm_domain.is_in_uc_mode ) diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c index e376f3c..5a7a62e 100644 --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -220,7 +220,8 @@ void vmx_intr_assist(void) } /* Crank the handle on interrupt state. */ - pt_vector = pt_update_irq(v); + if ( is_hvm_vcpu(v) ) + pt_vector = pt_update_irq(v); do { intack = hvm_vcpu_has_pending_irq(v); diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 7087630..fa90493 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1295,8 +1295,9 @@ void vmx_do_resume(struct vcpu *v) } debug_state = v->domain->debugger_attached - || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3] - || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP]; + || (v->domain->arch.hvm_domain.params + && (v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3] + || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP])); if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) ) { diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index f02e47a..0ac96ab 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -788,7 +788,8 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg, if ( seg == x86_seg_tr ) { - if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VM86_TSS] ) + if ( v->domain->arch.hvm_domain.params + && v->domain->arch.hvm_domain.params[HVM_PARAM_VM86_TSS] ) { sel = 0; attr = vm86_tr_attr; @@ -1078,7 +1079,8 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr) v->arch.hvm_vmx.exec_control |= cr3_ctls; /* Trap CR3 updates if CR3 memory events are enabled. */ - if ( v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3] ) + if ( v->domain->arch.hvm_domain.params + && v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3] ) v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING; vmx_update_cpu_exec_control(v); @@ -1145,8 +1147,12 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr) if ( paging_mode_hap(v->domain) ) { if ( !hvm_paging_enabled(v) ) + { + /* We should never get here for pvh domains */ + ASSERT(v->domain->arch.hvm_domain.params); v->arch.hvm_vcpu.hw_cr[3] = v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT]; + } vmx_load_pdptrs(v); } -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |