|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 11/22] xen/x86: allow disabling emulated devices for HVM guests
Introduce a new DOMCTL flag that can be used to disable device emulation
inside of Xen for HVM guests. The following emulated devices are disabled
when the XEN_DOMCTL_CDF_noemu is used: hpet, pmtimer, rtc, ioapic, lapic,
pic and pmu. Also all the MMIO handlers are disabled.
Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@xxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Eddie Dong <eddie.dong@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
---
xen/arch/x86/domain.c | 2 +-
xen/arch/x86/hvm/hpet.c | 8 +++++++-
xen/arch/x86/hvm/hvm.c | 19 +++++++++++++------
xen/arch/x86/hvm/intercept.c | 31 ++++++++++++++++++++++++-------
xen/arch/x86/hvm/pmtimer.c | 7 +++++++
xen/arch/x86/hvm/rtc.c | 9 +++++++++
xen/arch/x86/hvm/stdvga.c | 6 ++++++
xen/arch/x86/hvm/svm/svm.c | 17 +++++++++--------
xen/arch/x86/hvm/vioapic.c | 8 +++++++-
xen/arch/x86/hvm/vlapic.c | 15 ++++++++++++---
xen/arch/x86/hvm/vmsi.c | 2 +-
xen/arch/x86/hvm/vmx/vmcs.c | 14 ++++++++++++++
xen/arch/x86/hvm/vmx/vmx.c | 9 ++++++++-
xen/arch/x86/hvm/vpic.c | 3 +++
xen/arch/x86/hvm/vpmu.c | 2 +-
xen/arch/x86/hvm/vpt.c | 3 +++
xen/common/domctl.c | 5 ++++-
xen/drivers/passthrough/amd/iommu_guest.c | 2 +-
xen/include/asm-x86/hvm/domain.h | 4 ++++
xen/include/asm-x86/hvm/hvm.h | 2 +-
xen/include/asm-x86/hvm/io.h | 12 +++++++-----
xen/include/public/domctl.h | 3 +++
xen/include/xen/sched.h | 9 +++++++++
23 files changed, 154 insertions(+), 38 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index a112953..0916c39 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -626,7 +626,7 @@ int arch_domain_create(struct domain *d, unsigned int
domcr_flags,
if ( has_hvm_container_domain(d) )
{
- if ( (rc = hvm_domain_initialise(d)) != 0 )
+ if ( (rc = hvm_domain_initialise(d, domcr_flags)) != 0 )
{
iommu_domain_destroy(d);
goto fail;
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index 9585ca8..b4c121d 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -504,7 +504,7 @@ static int hpet_range(struct vcpu *v, unsigned long addr)
(addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)) );
}
-const struct hvm_mmio_ops hpet_mmio_ops = {
+struct hvm_mmio_ops hpet_mmio_ops = {
.check = hpet_range,
.read = hpet_read,
.write = hpet_write
@@ -634,6 +634,9 @@ void hpet_init(struct domain *d)
HPETState *h = domain_vhpet(d);
int i;
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
memset(h, 0, sizeof(HPETState));
rwlock_init(&h->lock);
@@ -666,6 +669,9 @@ void hpet_deinit(struct domain *d)
int i;
HPETState *h = domain_vhpet(d);
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
write_lock(&h->lock);
if ( hpet_enabled(h) )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 535d622..66f95b2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1423,7 +1423,7 @@ static int hvm_set_dm_domain(struct domain *d, domid_t
domid)
return rc;
}
-int hvm_domain_initialise(struct domain *d)
+int hvm_domain_initialise(struct domain *d, unsigned int domcr_flags)
{
int rc;
@@ -1491,10 +1491,12 @@ int hvm_domain_initialise(struct domain *d)
return 0;
}
- hvm_init_guest_time(d);
+ if ( domcr_flags & DOMCRF_noemu )
+ d->arch.hvm_domain.no_emu = 1;
+
+ setup_mmio_handlers(d);
- d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
- d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON] = SHUTDOWN_reboot;
+ hvm_init_guest_time(d);
vpic_init(d);
@@ -1506,8 +1508,13 @@ int hvm_domain_initialise(struct domain *d)
rtc_init(d);
- register_portio_handler(d, 0xe9, 1, hvm_print_line);
- register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
+ if ( !d->arch.hvm_domain.no_emu )
+ {
+ d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
+ d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON] =
SHUTDOWN_reboot;
+ register_portio_handler(d, 0xe9, 1, hvm_print_line);
+ register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
+ }
rc = hvm_funcs.domain_initialise(d);
if ( rc != 0 )
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index cc44733..714e29d 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -32,7 +32,7 @@
#include <xen/event.h>
#include <xen/iommu.h>
-static const struct hvm_mmio_ops *const
+static struct hvm_mmio_ops *const
hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
{
&hpet_mmio_ops,
@@ -166,10 +166,12 @@ static int hvm_mmio_access(struct vcpu *v,
bool_t hvm_mmio_internal(paddr_t gpa)
{
struct vcpu *curr = current;
+ struct hvm_mmio_ops *const *mmio_handlers =
+ curr->domain->arch.hvm_domain.mmio_handlers;
unsigned int i;
- for ( i = 0; i < HVM_MMIO_HANDLER_NR; ++i )
- if ( hvm_mmio_handlers[i]->check(curr, gpa) )
+ for ( i = 0; i < curr->domain->arch.hvm_domain.nr_mmio_handlers; ++i )
+ if ( mmio_handlers[i]->check(curr, gpa) )
return 1;
return 0;
@@ -178,11 +180,13 @@ bool_t hvm_mmio_internal(paddr_t gpa)
int hvm_mmio_intercept(ioreq_t *p)
{
struct vcpu *v = current;
+ struct hvm_mmio_ops *const *mmio_handlers =
+ v->domain->arch.hvm_domain.mmio_handlers;
int i;
- for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ )
+ for ( i = 0; i < v->domain->arch.hvm_domain.nr_mmio_handlers; i++ )
{
- hvm_mmio_check_t check = hvm_mmio_handlers[i]->check;
+ hvm_mmio_check_t check = mmio_handlers[i]->check;
if ( check(v, p->addr) )
{
@@ -194,8 +198,8 @@ int hvm_mmio_intercept(ioreq_t *p)
return hvm_mmio_access(
v, p,
- hvm_mmio_handlers[i]->read,
- hvm_mmio_handlers[i]->write);
+ mmio_handlers[i]->read,
+ mmio_handlers[i]->write);
}
}
@@ -398,6 +402,19 @@ void relocate_io_handler(
handler->hdl_list[i].addr = new_addr;
}
+void setup_mmio_handlers(struct domain *d)
+{
+ if ( d->arch.hvm_domain.no_emu )
+ {
+ d->arch.hvm_domain.nr_mmio_handlers = 0;
+ }
+ else
+ {
+ d->arch.hvm_domain.mmio_handlers = hvm_mmio_handlers;
+ d->arch.hvm_domain.nr_mmio_handlers = HVM_MMIO_HANDLER_NR;
+ }
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 6ad2797..01ae274 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -329,6 +329,9 @@ void pmtimer_init(struct vcpu *v)
{
PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
+ if ( v->domain->arch.hvm_domain.no_emu )
+ return;
+
spin_lock_init(&s->lock);
s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / SYSTEM_TIME_HZ;
@@ -349,6 +352,10 @@ void pmtimer_init(struct vcpu *v)
void pmtimer_deinit(struct domain *d)
{
PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
+
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
kill_timer(&s->timer);
}
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index 3448971..b5dfb2c 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -726,6 +726,9 @@ void rtc_migrate_timers(struct vcpu *v)
{
RTCState *s = vcpu_vrtc(v);
+ if ( v->domain->arch.hvm_domain.no_emu )
+ return;
+
if ( v->vcpu_id == 0 )
{
migrate_timer(&s->update_timer, v->processor);;
@@ -790,6 +793,9 @@ void rtc_init(struct domain *d)
{
RTCState *s = domain_vrtc(d);
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
spin_lock_init(&s->lock);
init_timer(&s->update_timer, rtc_update_timer, s, smp_processor_id());
@@ -820,6 +826,9 @@ void rtc_deinit(struct domain *d)
{
RTCState *s = domain_vrtc(d);
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
spin_barrier(&s->lock);
TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER);
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index 13d1029..61718c7 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -599,6 +599,9 @@ void stdvga_init(struct domain *d)
void *p;
int i;
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
memset(s, 0, sizeof(*s));
spin_lock_init(&s->lock);
@@ -630,6 +633,9 @@ void stdvga_deinit(struct domain *d)
struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
int i;
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
{
if ( s->vram_page[i] == NULL )
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a02f983..0b1175d 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1035,6 +1035,7 @@ static void noreturn svm_do_resume(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
bool_t debug_state = v->domain->debugger_attached;
+ struct vlapic *vlapic = vcpu_vlapic(v);
bool_t vcpu_guestmode = 0;
if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
@@ -1059,14 +1060,13 @@ static void noreturn svm_do_resume(struct vcpu *v)
hvm_asid_flush_vcpu(v);
}
- if ( !vcpu_guestmode )
+ if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic))
{
vintr_t intr;
/* Reflect the vlapic's TPR in the hardware vtpr */
intr = vmcb_get_vintr(vmcb);
- intr.fields.tpr =
- (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4;
+ intr.fields.tpr = (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
vmcb_set_vintr(vmcb, intr);
}
@@ -2295,6 +2295,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
int inst_len, rc;
vintr_t intr;
bool_t vcpu_guestmode = 0;
+ struct vlapic *vlapic = vcpu_vlapic(v);
hvm_invalidate_regs_fields(regs);
@@ -2312,11 +2313,11 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
* NB. We need to preserve the low bits of the TPR to make checked builds
* of Windows work, even though they don't actually do anything.
*/
- if ( !vcpu_guestmode ) {
+ if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) ) {
intr = vmcb_get_vintr(vmcb);
- vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI,
+ vlapic_set_reg(vlapic, APIC_TASKPRI,
((intr.fields.tpr & 0x0F) << 4) |
- (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0x0F));
+ (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0x0F));
}
exit_reason = vmcb->exitcode;
@@ -2698,14 +2699,14 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
}
out:
- if ( vcpu_guestmode )
+ if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) )
/* Don't clobber TPR of the nested guest. */
return;
/* The exit may have updated the TPR: reflect this in the hardware vtpr */
intr = vmcb_get_vintr(vmcb);
intr.fields.tpr =
- (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4;
+ (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
vmcb_set_vintr(vmcb, intr);
}
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index cbbef9f..efa1930 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -250,7 +250,7 @@ static int vioapic_range(struct vcpu *v, unsigned long addr)
(addr < vioapic->base_address + VIOAPIC_MEM_LENGTH)));
}
-const struct hvm_mmio_ops vioapic_mmio_ops = {
+struct hvm_mmio_ops vioapic_mmio_ops = {
.check = vioapic_range,
.read = vioapic_read,
.write = vioapic_write
@@ -449,6 +449,9 @@ void vioapic_reset(struct domain *d)
int vioapic_init(struct domain *d)
{
+ if ( d->arch.hvm_domain.no_emu )
+ return 0;
+
if ( (d->arch.hvm_domain.vioapic == NULL) &&
((d->arch.hvm_domain.vioapic = xmalloc(struct hvm_vioapic)) == NULL) )
return -ENOMEM;
@@ -461,6 +464,9 @@ int vioapic_init(struct domain *d)
void vioapic_deinit(struct domain *d)
{
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
xfree(d->arch.hvm_domain.vioapic);
d->arch.hvm_domain.vioapic = NULL;
}
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 32e649e..606cafe 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -977,7 +977,7 @@ static int vlapic_range(struct vcpu *v, unsigned long addr)
(offset < PAGE_SIZE);
}
-const struct hvm_mmio_ops vlapic_mmio_ops = {
+struct hvm_mmio_ops vlapic_mmio_ops = {
.check = vlapic_range,
.read = vlapic_read,
.write = vlapic_write
@@ -994,6 +994,9 @@ static void set_x2apic_id(struct vlapic *vlapic)
bool_t vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
{
+ if ( vlapic_domain(vlapic)->arch.hvm_domain.no_emu )
+ return 0;
+
if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE )
{
if ( unlikely(value & MSR_IA32_APICBASE_EXTD) )
@@ -1044,7 +1047,7 @@ void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t
value)
struct vcpu *v = vlapic_vcpu(vlapic);
/* may need to exclude some other conditions like vlapic->hw.disabled */
- if ( !vlapic_lvtt_tdt(vlapic) )
+ if ( !vlapic_lvtt_tdt(vlapic) || vlapic_hw_disabled(vlapic) )
{
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "ignore tsc deadline msr write");
return;
@@ -1119,6 +1122,9 @@ static int __vlapic_accept_pic_intr(struct vcpu *v)
int vlapic_accept_pic_intr(struct vcpu *v)
{
+ if ( v->domain->arch.hvm_domain.no_emu )
+ return 0;
+
TRACE_2D(TRC_HVM_EMUL_LAPIC_PIC_INTR,
(v == v->domain->arch.hvm_domain.i8259_target),
v ? __vlapic_accept_pic_intr(v) : -1);
@@ -1400,7 +1406,7 @@ int vlapic_init(struct vcpu *v)
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
- if ( is_pvh_vcpu(v) )
+ if ( is_pvh_vcpu(v) || v->domain->arch.hvm_domain.no_emu )
{
vlapic->hw.disabled = VLAPIC_HW_DISABLED;
return 0;
@@ -1450,6 +1456,9 @@ void vlapic_destroy(struct vcpu *v)
{
struct vlapic *vlapic = vcpu_vlapic(v);
+ if ( v->domain->arch.hvm_domain.no_emu )
+ return;
+
tasklet_kill(&vlapic->init_sipi.tasklet);
TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
destroy_periodic_time(&vlapic->pt);
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index f89233d..2962042 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -344,7 +344,7 @@ static int msixtbl_range(struct vcpu *v, unsigned long addr)
return !!desc;
}
-const struct hvm_mmio_ops msixtbl_mmio_ops = {
+struct hvm_mmio_ops msixtbl_mmio_ops = {
.check = msixtbl_range,
.read = msixtbl_read,
.write = msixtbl_write
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 4c5ceb5..a27f117 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -992,6 +992,20 @@ static int construct_vmcs(struct vcpu *v)
ASSERT(!(v->arch.hvm_vmx.exec_control & CPU_BASED_RDTSC_EXITING));
}
+ if ( d->arch.hvm_domain.no_emu )
+ {
+ /* Disable virtual apics, TPR */
+ v->arch.hvm_vmx.secondary_exec_control &=
+ ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
+ | SECONDARY_EXEC_APIC_REGISTER_VIRT
+ | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_TPR_SHADOW;
+
+ /* In turn, disable posted interrupts. */
+ __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
+ vmx_pin_based_exec_control & ~PIN_BASED_POSTED_INTERRUPT);
+ }
+
vmx_update_cpu_exec_control(v);
__vmwrite(VM_EXIT_CONTROLS, vmexit_ctl);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index fc29b89..0d04623 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -88,6 +88,9 @@ static int vmx_domain_initialise(struct domain *d)
{
int rc;
+ if ( d->arch.hvm_domain.no_emu )
+ return 0;
+
if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
return rc;
@@ -96,6 +99,9 @@ static int vmx_domain_initialise(struct domain *d)
static void vmx_domain_destroy(struct domain *d)
{
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
vmx_free_vlapic_mapping(d);
}
@@ -2240,7 +2246,8 @@ static void vmx_install_vlapic_mapping(struct vcpu *v)
{
paddr_t virt_page_ma, apic_page_ma;
- if ( !cpu_has_vmx_virtualize_apic_accesses )
+ if ( !cpu_has_vmx_virtualize_apic_accesses ||
+ v->domain->arch.hvm_domain.vmx.apic_access_mfn == 0 )
return;
virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page);
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index 8eea061..169e870 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -425,6 +425,9 @@ void vpic_reset(struct domain *d)
void vpic_init(struct domain *d)
{
+ if ( d->arch.hvm_domain.no_emu )
+ return;
+
vpic_reset(d);
register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index c3273ee..d625365 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -233,7 +233,7 @@ void vpmu_initialise(struct vcpu *v)
uint8_t vendor = current_cpu_data.x86_vendor;
int ret;
- if ( is_pvh_vcpu(v) )
+ if ( is_pvh_vcpu(v) || v->domain->arch.hvm_domain.no_emu )
return;
ASSERT(!vpmu->flags && !vpmu->context);
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
index 7c6549c..c3a8534 100644
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -375,6 +375,9 @@ void pt_migrate(struct vcpu *v)
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
+ if ( v->domain->arch.hvm_domain.no_emu )
+ return;
+
spin_lock(&v->arch.hvm_vcpu.tm_lock);
list_for_each_entry ( pt, head, list )
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 2a2d203..b327596 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -551,7 +551,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
| XEN_DOMCTL_CDF_pvh_guest
| XEN_DOMCTL_CDF_hap
| XEN_DOMCTL_CDF_s3_integrity
- | XEN_DOMCTL_CDF_oos_off)) )
+ | XEN_DOMCTL_CDF_oos_off
+ | XEN_DOMCTL_CDF_noemu)) )
break;
dom = op->domain;
@@ -593,6 +594,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
domcr_flags |= DOMCRF_s3_integrity;
if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_oos_off )
domcr_flags |= DOMCRF_oos_off;
+ if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_noemu )
+ domcr_flags |= DOMCRF_noemu;
d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref,
&op->u.createdomain.config);
diff --git a/xen/drivers/passthrough/amd/iommu_guest.c
b/xen/drivers/passthrough/amd/iommu_guest.c
index 7b0c102..7266b6c 100644
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -919,7 +919,7 @@ static int guest_iommu_mmio_range(struct vcpu *v, unsigned
long addr)
addr < iommu->mmio_base + IOMMU_MMIO_SIZE;
}
-const struct hvm_mmio_ops iommu_mmio_ops = {
+struct hvm_mmio_ops iommu_mmio_ops = {
.check = guest_iommu_mmio_range,
.read = guest_iommu_mmio_read,
.write = guest_iommu_mmio_write
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index ad68fcf..03d223d 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -135,6 +135,7 @@ struct hvm_domain {
bool_t mem_sharing_enabled;
bool_t qemu_mapcache_invalidate;
bool_t is_s3_suspended;
+ bool_t no_emu;
/*
* TSC value that VCPUs use to calculate their tsc_offset value.
@@ -144,6 +145,9 @@ struct hvm_domain {
unsigned long *io_bitmap;
+ struct hvm_mmio_ops *const *mmio_handlers;
+ int nr_mmio_handlers;
+
union {
struct vmx_domain vmx;
struct svm_domain svm;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 57f9605..d62c2b8 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -217,7 +217,7 @@ extern s8 hvm_port80_allowed;
extern const struct hvm_function_table *start_svm(void);
extern const struct hvm_function_table *start_vmx(void);
-int hvm_domain_initialise(struct domain *d);
+int hvm_domain_initialise(struct domain *d, unsigned int domcr_flags);
void hvm_domain_relinquish_resources(struct domain *d);
void hvm_domain_destroy(struct domain *d);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index f2aaec5..bd948e7 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -65,11 +65,11 @@ struct hvm_mmio_ops {
hvm_mmio_write_t write;
};
-extern const struct hvm_mmio_ops hpet_mmio_ops;
-extern const struct hvm_mmio_ops vlapic_mmio_ops;
-extern const struct hvm_mmio_ops vioapic_mmio_ops;
-extern const struct hvm_mmio_ops msixtbl_mmio_ops;
-extern const struct hvm_mmio_ops iommu_mmio_ops;
+extern struct hvm_mmio_ops hpet_mmio_ops;
+extern struct hvm_mmio_ops vlapic_mmio_ops;
+extern struct hvm_mmio_ops vioapic_mmio_ops;
+extern struct hvm_mmio_ops msixtbl_mmio_ops;
+extern struct hvm_mmio_ops iommu_mmio_ops;
#define HVM_MMIO_HANDLER_NR 5
@@ -81,6 +81,8 @@ void relocate_io_handler(
struct domain *d, unsigned long old_addr, unsigned long new_addr,
unsigned long size, int type);
+void setup_mmio_handlers(struct domain *d);
+
static inline int hvm_portio_intercept(ioreq_t *p)
{
return hvm_io_intercept(p, HVM_PORTIO);
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index bc45ea5..4e9d7e7 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -63,6 +63,9 @@ struct xen_domctl_createdomain {
/* Is this a PVH guest (as opposed to an HVM or PV guest)? */
#define _XEN_DOMCTL_CDF_pvh_guest 4
#define XEN_DOMCTL_CDF_pvh_guest (1U<<_XEN_DOMCTL_CDF_pvh_guest)
+ /* Disable emulated devices */
+#define _XEN_DOMCTL_CDF_noemu 5
+#define XEN_DOMCTL_CDF_noemu (1U<<_XEN_DOMCTL_CDF_noemu)
uint32_t flags;
struct xen_arch_domainconfig config;
};
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index b29d9e7..d88c6aa 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -557,6 +557,15 @@ struct domain *domain_create(domid_t domid, unsigned int
domcr_flags,
/* DOMCRF_pvh: Create PV domain in HVM container. */
#define _DOMCRF_pvh 5
#define DOMCRF_pvh (1U<<_DOMCRF_pvh)
+/*
+ * DOMCRF_noemu: Create a HVM domain without emulated devices.
+ *
+ * This currently disables the following emulated devices inside of Xen:
+ * hpet, pmtimer, rtc, ioapic, lapic, pic and pmu.
+ * It also disables all the MMIO handlers.
+ */
+#define _DOMCRF_noemu 6
+#define DOMCRF_noemu (1U<<_DOMCRF_noemu)
/*
* rcu_lock_domain_by_id() is more efficient than get_domain_by_id().
--
1.9.5 (Apple Git-50.3)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |