|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases
Assuming consecutive linear addresses map to all RAM or all MMIO is not
correct. Nor is assuming that a page straddling MMIO access will access
the same emulating component for both parts of the access. If a guest
RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
a page boundary, issue accesses separately for both parts.
The extra call to known_glfn() from hvmemul_write() is just to preserve
original behavior; we should consider dropping this (also to make sure
the intended effect of 8cbd4fb0b7 ["x86/hvm: implement hvmemul_write()
using real mappings"] is achieved in all cases where it can be achieved
without further rework), or alternatively we perhaps ought to mirror
this in hvmemul_rmw().
Note that the correctness of this depends on the MMIO caching used
elsewhere in the emulation code.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Also handle hvmemul_{write,rmw}().
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1041,6 +1041,110 @@ static inline int hvmemul_linear_mmio_wr
pfec, hvmemul_ctxt, translate);
}
+static bool known_glfn(unsigned long addr, unsigned int bytes, uint32_t pfec)
+{
+ const struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
+
+ if ( pfec & PFEC_write_access )
+ {
+ if ( !vio->mmio_access.write_access )
+ return false;
+ }
+ else if ( pfec & PFEC_insn_fetch )
+ {
+ if ( !vio->mmio_access.insn_fetch )
+ return false;
+ }
+ else if ( !vio->mmio_access.read_access )
+ return false;
+
+ return (vio->mmio_gla == (addr & PAGE_MASK) &&
+ (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE);
+}
+
+static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
+ uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+ pagefault_info_t pfinfo;
+ int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+
+ switch ( rc )
+ {
+ unsigned int offset, part1;
+
+ case HVMTRANS_okay:
+ return X86EMUL_OKAY;
+
+ case HVMTRANS_bad_linear_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+ return X86EMUL_EXCEPTION;
+
+ case HVMTRANS_bad_gfn_to_mfn:
+ if ( pfec & PFEC_insn_fetch )
+ return X86EMUL_UNHANDLEABLE;
+
+ offset = addr & ~PAGE_MASK;
+ if ( offset + bytes <= PAGE_SIZE )
+ return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
+ hvmemul_ctxt,
+ known_glfn(addr, bytes, pfec));
+
+ /* Split the access at the page boundary. */
+ part1 = PAGE_SIZE - offset;
+ rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
+ if ( rc == X86EMUL_OKAY )
+ rc = linear_read(addr + part1, bytes - part1, p_data + part1,
+ pfec, hvmemul_ctxt);
+ return rc;
+
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
+ return X86EMUL_RETRY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
+}
+
+static int linear_write(unsigned long addr, unsigned int bytes, void *p_data,
+ uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+ pagefault_info_t pfinfo;
+ int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+
+ switch ( rc )
+ {
+ unsigned int offset, part1;
+
+ case HVMTRANS_okay:
+ return X86EMUL_OKAY;
+
+ case HVMTRANS_bad_linear_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+ return X86EMUL_EXCEPTION;
+
+ case HVMTRANS_bad_gfn_to_mfn:
+ offset = addr & ~PAGE_MASK;
+ if ( offset + bytes <= PAGE_SIZE )
+ return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
+ hvmemul_ctxt,
+ known_glfn(addr, bytes, pfec));
+
+ /* Split the access at the page boundary. */
+ part1 = PAGE_SIZE - offset;
+ rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
+ if ( rc == X86EMUL_OKAY )
+ rc = linear_write(addr + part1, bytes - part1, p_data + part1,
+ pfec, hvmemul_ctxt);
+ return rc;
+
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
+ return X86EMUL_RETRY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
+}
+
static int __hvmemul_read(
enum x86_segment seg,
unsigned long offset,
@@ -1049,11 +1153,8 @@ static int __hvmemul_read(
enum hvm_access_type access_type,
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
- struct vcpu *curr = current;
- pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
- struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
if ( is_x86_system_segment(seg) )
@@ -1067,34 +1168,8 @@ static int __hvmemul_read(
seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
- if ( ((access_type != hvm_access_insn_fetch
- ? vio->mmio_access.read_access
- : vio->mmio_access.insn_fetch)) &&
- (vio->mmio_gla == (addr & PAGE_MASK)) )
- return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
hvmemul_ctxt, 1);
-
- rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
-
- switch ( rc )
- {
- case HVMTRANS_okay:
- break;
- case HVMTRANS_bad_linear_to_gfn:
- x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
- return X86EMUL_EXCEPTION;
- case HVMTRANS_bad_gfn_to_mfn:
- if ( access_type == hvm_access_insn_fetch )
- return X86EMUL_UNHANDLEABLE;
-
- return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
hvmemul_ctxt, 0);
- case HVMTRANS_gfn_paged_out:
- case HVMTRANS_gfn_shared:
- return X86EMUL_RETRY;
- default:
- return X86EMUL_UNHANDLEABLE;
- }
- return X86EMUL_OKAY;
+ return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt);
}
static int hvmemul_read(
@@ -1171,12 +1246,10 @@ static int hvmemul_write(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct vcpu *curr = current;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
- void *mapping;
+ void *mapping = NULL;
if ( is_x86_system_segment(seg) )
pfec |= PFEC_implicit;
@@ -1188,16 +1261,15 @@ static int hvmemul_write(
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
- if ( vio->mmio_access.write_access &&
- (vio->mmio_gla == (addr & PAGE_MASK)) )
- return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
hvmemul_ctxt, 1);
-
- mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
- if ( IS_ERR(mapping) )
- return ~PTR_ERR(mapping);
+ if ( !known_glfn(addr, bytes, pfec) )
+ {
+ mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
+ if ( IS_ERR(mapping) )
+ return ~PTR_ERR(mapping);
+ }
if ( !mapping )
- return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
hvmemul_ctxt, 0);
+ return linear_write(addr, bytes, p_data, pfec, hvmemul_ctxt);
memcpy(mapping, p_data, bytes);
@@ -1218,7 +1290,6 @@ static int hvmemul_rmw(
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
int rc;
void *mapping;
@@ -1244,18 +1315,14 @@ static int hvmemul_rmw(
else
{
unsigned long data = 0;
- bool known_gpfn = vio->mmio_access.write_access &&
- vio->mmio_gla == (addr & PAGE_MASK);
if ( bytes > sizeof(data) )
return X86EMUL_UNHANDLEABLE;
- rc = hvmemul_linear_mmio_read(addr, bytes, &data, pfec, hvmemul_ctxt,
- known_gpfn);
+ rc = linear_read(addr, bytes, &data, pfec, hvmemul_ctxt);
if ( rc == X86EMUL_OKAY )
rc = x86_emul_rmw(&data, bytes, eflags, state, ctxt);
if ( rc == X86EMUL_OKAY )
- rc = hvmemul_linear_mmio_write(addr, bytes, &data, pfec,
- hvmemul_ctxt, known_gpfn);
+ rc = linear_write(addr, bytes, &data, pfec, hvmemul_ctxt);
}
return rc;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |