[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2] xen/pvh: use a custom IO bitmap for PVH hardware domains
On 28/04/15 16:44, Roger Pau Monne wrote: Since a PVH hardware domain has access to the physical hardware create a custom more permissive IO bitmap. The permissions set on the bitmap are populated based on the contents of the ioports rangeset. Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> Cc: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@xxxxxxx> Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx> Cc: Eddie Dong <eddie.dong@xxxxxxxxx> Cc: Kevin Tian <kevin.tian@xxxxxxxxx> --- Changes since v1: - Dinamically allocate PVH Dom0 IO bitmap if needed. - Drop cast from construct_vmcs when writting the IO bitmap. - Create a new function that sets up the bitmap before launching Dom0. This is needed because ns16550_endboot is called after construct_dom0. --- xen/arch/x86/domain_build.c | 13 +++++++++++++ xen/arch/x86/hvm/hvm.c | 21 ++++++++++++++++++++- xen/arch/x86/hvm/svm/vmcb.c | 3 ++- xen/arch/x86/hvm/vmx/vmcs.c | 5 +++-- xen/arch/x86/setup.c | 2 ++ xen/include/asm-x86/hvm/domain.h | 2 ++ xen/include/asm-x86/setup.h | 1 + 7 files changed, 43 insertions(+), 4 deletions(-) diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c index 378e650..5283a21 100644 --- a/xen/arch/x86/domain_build.c +++ b/xen/arch/x86/domain_build.c @@ -1635,6 +1635,19 @@ out: return rc; }+void __init setup_io_bitmap(struct domain *d)+{ + int i; unsigned + + if ( is_pvh_domain(d) ) + { + for ( i = 0; i < 0x10000; i++ ) + /* NB: 0xcf8 has special treatment so we need to trap it. */ Why? (and irrespective of my question, cf8 expects a 4 byte access, and surely cfc would need similar treatment?) + if ( ioports_access_permitted(d, i, i) && i != 0xcf8 ) + __clear_bit(i, d->arch.hvm_domain.io_bitmap); I still think that there must be a more efficient way of doing this loop. + } +} + /* * Local variables: * mode: C diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index bfde380..450edc3 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -78,9 +78,10 @@ integer_param("hvm_debug", opt_hvm_debug_level);struct hvm_function_table hvm_funcs __read_mostly; +#define HVM_IOBITMAP_SIZE (3*PAGE_SIZE/BYTES_PER_LONG)/* I/O permission bitmap is globally shared by all HVM guests. */ unsigned long __attribute__ ((__section__ (".bss.page_aligned"))) - hvm_io_bitmap[3*PAGE_SIZE/BYTES_PER_LONG]; + hvm_io_bitmap[HVM_IOBITMAP_SIZE];/* Xen command-line option to enable HAP */static bool_t __initdata opt_hap_enabled = 1; @@ -1484,6 +1485,22 @@ int hvm_domain_initialise(struct domain *d) goto fail1; d->arch.hvm_domain.io_handler->num_slot = 0;+ /* Set the default IO Bitmap */+ if ( is_hardware_domain(d) ) + { + d->arch.hvm_domain.io_bitmap = xzalloc_array(unsigned long, + HVM_IOBITMAP_SIZE); This is an unnecessarily large alignment. The bitmap only needs page alignment. + if ( d->arch.hvm_domain.io_bitmap == NULL ) + { + rc = -ENOMEM; + goto fail1; + } + memset(d->arch.hvm_domain.io_bitmap, ~0, + HVM_IOBITMAP_SIZE * BYTES_PER_LONG); + } + else + d->arch.hvm_domain.io_bitmap = hvm_io_bitmap; + if ( is_pvh_domain(d) ) { register_portio_handler(d, 0, 0x10003, handle_pvh_io); @@ -1519,6 +1536,8 @@ int hvm_domain_initialise(struct domain *d) stdvga_deinit(d); vioapic_deinit(d); fail1: + if ( is_hardware_domain(d) ) + xfree(d->arch.hvm_domain.io_bitmap); xfree(d->arch.hvm_domain.io_handler); xfree(d->arch.hvm_domain.params); fail0: diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index 21292bb..227a187 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -118,7 +118,8 @@ static int construct_vmcb(struct vcpu *v) svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR);vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);- vmcb->_iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap); + vmcb->_iopm_base_pa = + (u64)virt_to_maddr(v->domain->arch.hvm_domain.io_bitmap);/* Virtualise EFLAGS.IF and LAPIC TPR (CR8). */vmcb->_vintr.fields.intr_masking = 1; diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 63007a9..bd37f0e 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -986,8 +986,9 @@ static int construct_vmcs(struct vcpu *v) }/* I/O access bitmap. */- __vmwrite(IO_BITMAP_A, virt_to_maddr((char *)hvm_io_bitmap + 0)); - __vmwrite(IO_BITMAP_B, virt_to_maddr((char *)hvm_io_bitmap + PAGE_SIZE)); + __vmwrite(IO_BITMAP_A, virt_to_maddr(d->arch.hvm_domain.io_bitmap)); + __vmwrite(IO_BITMAP_B, virt_to_maddr(d->arch.hvm_domain.io_bitmap + + PAGE_SIZE / BYTES_PER_LONG));if ( cpu_has_vmx_virtual_intr_delivery ){ diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 2b9787a..339343f 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -1446,6 +1446,8 @@ void __init noreturn __start_xen(unsigned long mbi_p)dmi_end_boot(); + setup_io_bitmap(dom0);+ system_state = SYS_STATE_active;domain_unpause_by_systemcontroller(dom0);diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index 0702bf5..d002954 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -143,6 +143,8 @@ struct hvm_domain { */ uint64_t sync_tsc;+ unsigned long *io_bitmap;+ union { struct vmx_domain vmx; struct svm_domain svm; diff --git a/xen/include/asm-x86/setup.h b/xen/include/asm-x86/setup.h index 08bc23a..381d9f8 100644 --- a/xen/include/asm-x86/setup.h +++ b/xen/include/asm-x86/setup.h @@ -32,6 +32,7 @@ int construct_dom0( module_t *initrd, void *(*bootstrap_map)(const module_t *), char *cmdline); +void setup_io_bitmap(struct domain *d);unsigned long initial_images_nrpages(nodeid_t node);void discard_initial_images(void); _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |