[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate



This patch aims to have mem access vm events sent from the emulator.
This is useful in the case of page-walks that have to emulate
instructions in access denied pages.

We use hvmemul_map_linear_addr() ro intercept r/w access and
hvmemul_insn_fetch() to intercept exec access.

First we try to send a vm event and if the event is sent then emulation
returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
emulation goes on as expected.

Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>

---
Changes since V2:
        - Join "if" statements where possible
        - Correct indentation where needed
        - Remove rc initialization.
---
 xen/arch/x86/hvm/emulate.c             | 106 ++++++++++++++++++++++++-
 xen/arch/x86/hvm/vm_event.c            |   2 +-
 xen/arch/x86/mm/mem_access.c           |   3 +-
 xen/arch/x86/x86_emulate/x86_emulate.h |   1 +
 xen/include/asm-x86/hvm/emulate.h      |   4 +-
 5 files changed, 109 insertions(+), 7 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index a766eecc8e..e4acbdf271 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -15,6 +15,7 @@
 #include <xen/paging.h>
 #include <xen/trace.h>
 #include <xen/vm_event.h>
+#include <xen/monitor.h>
 #include <asm/event.h>
 #include <asm/i387.h>
 #include <asm/xstate.h>
@@ -26,6 +27,7 @@
 #include <asm/hvm/support.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/vm_event.h>
+#include <asm/altp2m.h>
 
 static void hvmtrace_io_assist(const ioreq_t *p)
 {
@@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
     return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
 }
 
+static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
+                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)
+{
+    xenmem_access_t access;
+    vm_event_request_t req = {};
+
+    if ( !ctxt->send_event || !pfec ||
+        p2m_get_mem_access(current->domain, gfn, &access,
+                           altp2m_vcpu_idx(current)) != 0 )
+        return false;
+
+    switch ( access )
+    {
+    case XENMEM_access_x:
+    case XENMEM_access_rx:
+        if ( pfec & PFEC_write_access )
+            req.u.mem_access.flags = MEM_ACCESS_R | MEM_ACCESS_W;
+        break;
+
+    case XENMEM_access_w:
+    case XENMEM_access_rw:
+        if ( pfec & PFEC_insn_fetch )
+            req.u.mem_access.flags = MEM_ACCESS_X;
+        break;
+
+    case XENMEM_access_r:
+    case XENMEM_access_n:
+        if ( pfec & PFEC_write_access )
+            req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W;
+        if ( pfec & PFEC_insn_fetch )
+            req.u.mem_access.flags |= MEM_ACCESS_X;
+        break;
+
+    default:
+        return false;
+    }
+
+    if ( !req.u.mem_access.flags )
+        return false; /* No violation. */
+
+    req.reason = VM_EVENT_REASON_MEM_ACCESS;
+    req.u.mem_access.gfn = gfn_x(gfn);
+    req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA | MEM_ACCESS_GLA_VALID;
+    req.u.mem_access.gla = gla;
+    req.u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
+
+    return monitor_traps(current, true, &req) >= 0;
+}
+
 /*
  * Convert addr from linear to physical form, valid over the range
  * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
@@ -636,6 +687,7 @@ static void *hvmemul_map_linear_addr(
     unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
         (linear >> PAGE_SHIFT) + 1;
     unsigned int i;
+    gfn_t gfn;
 
     /*
      * mfn points to the next free slot.  All used slots have a page reference
@@ -674,7 +726,7 @@ static void *hvmemul_map_linear_addr(
         ASSERT(mfn_x(*mfn) == 0);
 
         res = hvm_translate_get_page(curr, addr, true, pfec,
-                                     &pfinfo, &page, NULL, &p2mt);
+                                     &pfinfo, &page, &gfn, &p2mt);
 
         switch ( res )
         {
@@ -704,6 +756,23 @@ static void *hvmemul_map_linear_addr(
 
         if ( pfec & PFEC_write_access )
         {
+            unsigned long reps = 1;
+            struct hvm_emulate_ctxt old;
+            int rc = 0;
+            paddr_t gpa;
+
+            old = *hvmemul_ctxt;
+            rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
+                                        pfec, hvmemul_ctxt);
+            if ( rc == X86EMUL_EXCEPTION )
+                *hvmemul_ctxt = old;
+
+            if ( hvmemul_send_vm_event(gpa, addr, gfn, pfec, hvmemul_ctxt) )
+            {
+                err = ERR_PTR(~X86EMUL_ACCESS_EXCEPTION);
+                goto out;
+            }
+
             if ( p2m_is_discard_write(p2mt) )
             {
                 err = ERR_PTR(~X86EMUL_OKAY);
@@ -1224,7 +1293,35 @@ int hvmemul_insn_fetch(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
     uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
+    paddr_t gpa;
+    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
+    unsigned long addr, reps = 1;
+    int rc;
+    struct hvm_emulate_ctxt old;
+
+    rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps,
+                                   hvm_access_insn_fetch, hvmemul_ctxt, &addr);
+    if ( rc == X86EMUL_EXCEPTION )
+    {
+        x86_emul_reset_event(ctxt);
+        rc = X86EMUL_OKAY;
+    }
+
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
+        pfec |= PFEC_user_mode;
+
+    old = *hvmemul_ctxt;
+    rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
+                                pfec, hvmemul_ctxt);
+    if ( rc == X86EMUL_EXCEPTION )
+    {
+        *hvmemul_ctxt = old;
+        rc = X86EMUL_OKAY;
+    }
 
+    if ( gpa && hvmemul_send_vm_event(gpa, addr, gaddr_to_gfn(gpa),
+                                      pfec, hvmemul_ctxt) )
+        return X86EMUL_ACCESS_EXCEPTION;
     /*
      * Fall back if requested bytes are not in the prefetch cache.
      * But always perform the (fake) read when bytes == 0.
@@ -1232,8 +1329,8 @@ int hvmemul_insn_fetch(
     if ( !bytes ||
          unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
     {
-        int rc = __hvmemul_read(seg, offset, p_data, bytes,
-                                hvm_access_insn_fetch, hvmemul_ctxt);
+        rc = __hvmemul_read(seg, offset, p_data, bytes,
+                            hvm_access_insn_fetch, hvmemul_ctxt);
 
         if ( rc == X86EMUL_OKAY && bytes )
         {
@@ -2492,12 +2589,13 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned 
long gla)
 }
 
 void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
-    unsigned int errcode)
+    unsigned int errcode, bool send_event)
 {
     struct hvm_emulate_ctxt ctx = {{ 0 }};
     int rc;
 
     hvm_emulate_init_once(&ctx, NULL, guest_cpu_user_regs());
+    ctx.send_event = send_event;
 
     switch ( kind )
     {
diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c
index 0df8ab40e6..bdc65da3ed 100644
--- a/xen/arch/x86/hvm/vm_event.c
+++ b/xen/arch/x86/hvm/vm_event.c
@@ -87,7 +87,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
             kind = EMUL_KIND_SET_CONTEXT_INSN;
 
         hvm_emulate_one_vm_event(kind, TRAP_invalid_op,
-                                 X86_EVENT_NO_EC);
+                                 X86_EVENT_NO_EC, false);
 
         v->arch.vm_event->emulate_flags = 0;
     }
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 56c06a4fc6..536ad6367b 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -214,7 +214,8 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
          d->arch.monitor.inguest_pagefault_disabled &&
          npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */
     {
-        hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, 
X86_EVENT_NO_EC);
+        hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op,
+                                 X86_EVENT_NO_EC, true);
 
         return true;
     }
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h 
b/xen/arch/x86/x86_emulate/x86_emulate.h
index 55a9e0ed51..a9829913a4 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -162,6 +162,7 @@ struct x86_emul_fpu_aux {
 #define X86EMUL_UNRECOGNIZED   X86EMUL_UNIMPLEMENTED
  /* (cmpxchg accessor): CMPXCHG failed. */
 #define X86EMUL_CMPXCHG_FAILED 7
+#define X86EMUL_ACCESS_EXCEPTION 8
 
 /* FPU sub-types which may be requested via ->get_fpu(). */
 enum x86_emulate_fpu_type {
diff --git a/xen/include/asm-x86/hvm/emulate.h 
b/xen/include/asm-x86/hvm/emulate.h
index 26a01e83a4..721e175b04 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -47,6 +47,7 @@ struct hvm_emulate_ctxt {
     uint32_t intr_shadow;
 
     bool_t set_context;
+    bool send_event;
 };
 
 enum emul_kind {
@@ -63,7 +64,8 @@ int hvm_emulate_one(
     struct hvm_emulate_ctxt *hvmemul_ctxt);
 void hvm_emulate_one_vm_event(enum emul_kind kind,
     unsigned int trapnr,
-    unsigned int errcode);
+    unsigned int errcode,
+    bool send_event);
 /* Must be called once to set up hvmemul state. */
 void hvm_emulate_init_once(
     struct hvm_emulate_ctxt *hvmemul_ctxt,
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.