|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 2/2] x86/hvm: finish IOREQs correctly on completion path
Since the introduction of linear_{read,write}() helpers in 3bdec530a5
(x86/HVM: split page straddling emulated accesses in more cases) the
completion path for IOREQs has been broken: if there is an IOREQ in
progress but hvm_copy_{to,from}_guest_linear() returns HVMTRANS_okay
(e.g. when P2M type of source/destination has been changed by IOREQ
handler) the execution will never re-enter hvmemul_do_io() where
IOREQs are completed. This usually results in a domain crash upon
the execution of the next IOREQ entering hvmemul_do_io() and finding
the remnants of the previous IOREQ in the state machine.
This particular issue has been discovered in relation to p2m_ioreq_server
type where an emulator changed the memory type between p2m_ioreq_server
and p2m_ram_rw in process of responding to IOREQ which made
hvm_copy_..() to behave differently on the way back.
Fix it for now by checking if IOREQ completion is required (which
can be identified by quering MMIO cache) before trying to finish
a memory access immediately through hvm_copy_..(), re-enter
hvmemul_do_io() otherwise. This change alone addresses IOREQ
completion issue where P2M type is modified in the middle of emulation
but is not enough for a more general case where machine state
arbitrarely changes behind our back.
Signed-off-by: Igor Druzhinin <igor.druzhinin@xxxxxxxxxx>
---
Changes in v3:
* made it more clear that it's still a partial fix in the commit description
* other minor suggestions
---
xen/arch/x86/hvm/emulate.c | 31 +++++++++++++++++++++++++------
1 file changed, 25 insertions(+), 6 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 4879ccb..92a9b82 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -952,7 +952,7 @@ static int hvmemul_phys_mmio_access(
* cache indexed by linear MMIO address.
*/
static struct hvm_mmio_cache *hvmemul_find_mmio_cache(
- struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir)
+ struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir, bool create)
{
unsigned int i;
struct hvm_mmio_cache *cache;
@@ -966,6 +966,9 @@ static struct hvm_mmio_cache *hvmemul_find_mmio_cache(
return cache;
}
+ if ( !create )
+ return NULL;
+
i = vio->mmio_cache_count;
if( i == ARRAY_SIZE(vio->mmio_cache) )
return NULL;
@@ -1000,7 +1003,7 @@ static int hvmemul_linear_mmio_access(
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned long offset = gla & ~PAGE_MASK;
- struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir);
+ struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir,
true);
unsigned int chunk, buffer_offset = 0;
paddr_t gpa;
unsigned long one_rep = 1;
@@ -1089,8 +1092,9 @@ static int linear_read(unsigned long addr, unsigned int
bytes, void *p_data,
uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
{
pagefault_info_t pfinfo;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned int offset = addr & ~PAGE_MASK;
- int rc;
+ int rc = HVMTRANS_bad_gfn_to_mfn;
if ( offset + bytes > PAGE_SIZE )
{
@@ -1104,7 +1108,14 @@ static int linear_read(unsigned long addr, unsigned int
bytes, void *p_data,
return rc;
}
- rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+ /*
+ * If there is an MMIO cache entry for that access then we must be
re-issuing
+ * an access that was previously handled as MMIO. Thus it is imperative
that
+ * we handle this access in the same way to guarantee completion and hence
+ * clean up any interim state.
+ */
+ if ( !hvmemul_find_mmio_cache(vio, addr, IOREQ_READ, false) )
+ rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
switch ( rc )
{
@@ -1134,8 +1145,9 @@ static int linear_write(unsigned long addr, unsigned int
bytes, void *p_data,
uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
{
pagefault_info_t pfinfo;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned int offset = addr & ~PAGE_MASK;
- int rc;
+ int rc = HVMTRANS_bad_gfn_to_mfn;
if ( offset + bytes > PAGE_SIZE )
{
@@ -1149,7 +1161,14 @@ static int linear_write(unsigned long addr, unsigned int
bytes, void *p_data,
return rc;
}
- rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+ /*
+ * If there is an MMIO cache entry for that acces then we must be
re-issuing
+ * an access that was previously handled as MMIO. Thus it is imperative
that
+ * we handle this access in the same way to guarantee completion and hence
+ * clean up any interim state.
+ */
+ if ( !hvmemul_find_mmio_cache(vio, addr, IOREQ_WRITE, false) )
+ rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
switch ( rc )
{
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |