|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86/hvm: implement save/restore for posted interrupts
Saving and restoring a PVonHVM guest on a host which has the VMX
"Posted Interrupt Processing" feature enabled will fail because the
xen-platform-pci device does not receive interrupts anymore after
restore. The reason is that the IRQs are not maintained in APIC_IRR,
but in a separate PIR array. This info is lost during the save
operation.
Syncing the PIR state into IRR during save, and restoring the state
later will fix 'xm save/restore' of a PVonHVM guest.
Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
---
Jan suggested to use the TMR flag to set 'trig' for vlapic_set_irq.
We are not sure if thats the correct approach, instead of forcing
trig=1.
Comments are welcome.
xen/arch/x86/hvm/vlapic.c | 4 ++++
xen/arch/x86/hvm/vmx/vmx.c | 14 ++++++++++++++
xen/include/asm-x86/hvm/hvm.h | 1 +
xen/include/asm-x86/hvm/vlapic.h | 2 ++
4 files changed, 21 insertions(+)
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index cd7e872..7201544 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1179,6 +1179,8 @@ static int lapic_save_regs(struct domain *d,
hvm_domain_context_t *h)
for_each_vcpu ( d, v )
{
+ if ( hvm_funcs.sync_pir_to_irr )
+ hvm_funcs.sync_pir_to_irr(v);
s = vcpu_vlapic(v);
if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 )
break;
@@ -1230,6 +1232,8 @@ static int lapic_load_regs(struct domain *d,
hvm_domain_context_t *h)
if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
return -EINVAL;
+ if ( hvm_funcs.sync_irr_to_pir )
+ hvm_funcs.sync_irr_to_pir(v);
if ( hvm_funcs.process_isr )
hvm_funcs.process_isr(vlapic_find_highest_isr(s), v);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2caa04a..85df77d 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1654,6 +1654,18 @@ static void vmx_sync_pir_to_irr(struct vcpu *v)
vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]);
}
+static void vmx_sync_irr_to_pir(struct vcpu *v)
+{
+ struct vlapic *vlapic = vcpu_vlapic(v);
+ unsigned int vector;
+
+ for ( vector = 0; vector < NR_VECTORS; vector++ ) {
+ if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_IRR]) )
+ vlapic_set_irq(vlapic, vector,
+ vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]));
+ }
+}
+
static void vmx_handle_eoi(u8 vector)
{
unsigned long status;
@@ -1737,6 +1749,7 @@ static struct hvm_function_table __initdata
vmx_function_table = {
.process_isr = vmx_process_isr,
.deliver_posted_intr = vmx_deliver_posted_intr,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .sync_irr_to_pir = vmx_sync_irr_to_pir,
.handle_eoi = vmx_handle_eoi,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
.hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
@@ -1783,6 +1796,7 @@ const struct hvm_function_table * __init start_vmx(void)
{
vmx_function_table.deliver_posted_intr = NULL;
vmx_function_table.sync_pir_to_irr = NULL;
+ vmx_function_table.sync_irr_to_pir = NULL;
}
if ( cpu_has_vmx_ept
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0ebd478..c762f1e 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -194,6 +194,7 @@ struct hvm_function_table {
void (*process_isr)(int isr, struct vcpu *v);
void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
void (*sync_pir_to_irr)(struct vcpu *v);
+ void (*sync_irr_to_pir)(struct vcpu *v);
void (*handle_eoi)(u8 vector);
/*Walk nested p2m */
diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h
index 66f0aff..2a83a1e 100644
--- a/xen/include/asm-x86/hvm/vlapic.h
+++ b/xen/include/asm-x86/hvm/vlapic.h
@@ -58,6 +58,8 @@
#define VEC_POS(v) ((v) % 32)
#define REG_POS(v) (((v) / 32) * 0x10)
+#define vlapic_test_vector(vec, bitmap) \
+ test_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec)))
#define vlapic_test_and_set_vector(vec, bitmap) \
test_and_set_bit(VEC_POS(vec), (uint32_t *)((bitmap) + REG_POS(vec)))
#define vlapic_test_and_clear_vector(vec, bitmap) \
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |