[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC v13 05/20] pvh: Disable unneeded features of HVM containers
Things kept: * cacheattr_region lists * irq-related structures * paging * tm_list * hvm params Things disabled for now: * compat xlation Things disabled: * Emulated timers and clock sources * IO/MMIO emulation * msix tables * hvm_funcs * nested HVM * Fast-path for emulated lapic accesses Getting rid of the hvm_params struct required a couple other places to check for its existence before attempting to read the params. Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> --- v13: - Removed unnecessary comment - Allocate params for hvm domains; remove null checks necessary in last patch - Add ASSERT(!is_pvh) to handle_pio CC: Jan Beulich <jbeulich@xxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> CC: Keir Fraser <keir@xxxxxxx> --- xen/arch/x86/hvm/hvm.c | 43 ++++++++++++++++++++++++++++++++++++++----- xen/arch/x86/hvm/io.c | 7 +++++++ xen/arch/x86/hvm/irq.c | 3 +++ xen/arch/x86/hvm/vmx/intr.c | 3 ++- 4 files changed, 50 insertions(+), 6 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index aaf956a..370bd4d 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -301,6 +301,10 @@ u64 hvm_get_guest_tsc_adjust(struct vcpu *v) void hvm_migrate_timers(struct vcpu *v) { + /* PVH doesn't use rtc and emulated timers, it uses pvclock mechanism. */ + if ( is_pvh_vcpu(v) ) + return; + rtc_migrate_timers(v); pt_migrate(v); } @@ -342,10 +346,13 @@ void hvm_do_resume(struct vcpu *v) { ioreq_t *p; - pt_restore_timer(v); - check_wakeup_from_wait(); + if ( is_pvh_vcpu(v) ) + goto check_inject_trap; + + pt_restore_timer(v); + /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ p = get_ioreq(v); while ( p->state != STATE_IOREQ_NONE ) @@ -368,6 +375,7 @@ void hvm_do_resume(struct vcpu *v) } } + check_inject_trap: /* Inject pending hw/sw trap */ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) { @@ -528,10 +536,16 @@ int hvm_domain_initialise(struct domain *d) if ( rc != 0 ) goto fail0; + rc = -ENOMEM; d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); + if ( !d->arch.hvm_domain.params ) + goto fail1; + + if ( is_pvh_domain(d) ) + return 0; + d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler); - rc = -ENOMEM; - if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler ) + if ( !d->arch.hvm_domain.io_handler ) goto fail1; d->arch.hvm_domain.io_handler->num_slot = 0; @@ -578,6 +592,9 @@ int hvm_domain_initialise(struct domain *d) void hvm_domain_relinquish_resources(struct domain *d) { + if ( is_pvh_domain(d) ) + return; + if ( hvm_funcs.nhvm_domain_relinquish_resources ) hvm_funcs.nhvm_domain_relinquish_resources(d); @@ -602,6 +619,10 @@ void hvm_domain_relinquish_resources(struct domain *d) void hvm_domain_destroy(struct domain *d) { hvm_destroy_cacheattr_region_list(d); + + if ( is_pvh_domain(d) ) + return; + hvm_funcs.domain_destroy(d); rtc_deinit(d); stdvga_deinit(d); @@ -1114,6 +1135,14 @@ int hvm_vcpu_initialise(struct vcpu *v) v->arch.hvm_vcpu.inject_trap.vector = -1; + if ( is_pvh_vcpu(v) ) + { + v->arch.hvm_vcpu.hcall_64bit = 1; /* PVH 32bitfixme. */ + /* This for hvm_long_mode_enabled(v). */ + v->arch.hvm_vcpu.guest_efer = EFER_SCE | EFER_LMA | EFER_LME; + return 0; + } + rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */ if ( rc != 0 ) goto fail3; @@ -1188,7 +1217,10 @@ void hvm_vcpu_destroy(struct vcpu *v) tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet); hvm_vcpu_cacheattr_destroy(v); - vlapic_destroy(v); + + if ( is_hvm_vcpu(v) ) + vlapic_destroy(v); + hvm_funcs.vcpu_destroy(v); /* Event channel is already freed by evtchn_destroy(). */ @@ -1389,6 +1421,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs, * a fast path for LAPIC accesses, skipping the p2m lookup. */ if ( !nestedhvm_vcpu_in_guestmode(v) + && is_hvm_vcpu(v) && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) ) { if ( !handle_mmio() ) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 4ae2c0c..6edc5c9 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -175,6 +175,10 @@ int handle_mmio(void) struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; int rc; + /* No MMIO for PVH vcpus */ + if ( is_pvh_vcpu(curr) ) + return 0; + hvm_emulate_prepare(&ctxt, guest_cpu_user_regs()); rc = hvm_emulate_one(&ctxt); @@ -228,6 +232,9 @@ int handle_pio(uint16_t port, int size, int dir) unsigned long data, reps = 1; int rc; + /* PIO for PVH is handled by the PV handlers */ + ASSERT(!is_pvh_vcpu(curr)); + if ( dir == IOREQ_WRITE ) data = guest_cpu_user_regs()->eax; diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index 6a6fb68..677fbcd 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -405,6 +405,9 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v) && vcpu_info(v, evtchn_upcall_pending) ) return hvm_intack_vector(plat->irq.callback_via.vector); + if ( is_pvh_vcpu(v) ) + return hvm_intack_none; + if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output ) return hvm_intack_pic(0); diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c index e6d5b46..482413e 100644 --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -225,7 +225,8 @@ void vmx_intr_assist(void) } /* Crank the handle on interrupt state. */ - pt_vector = pt_update_irq(v); + if ( is_hvm_vcpu(v) ) + pt_vector = pt_update_irq(v); do { intack = hvm_vcpu_has_pending_irq(v); -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |