|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC V11 3/5] xen, libxc: Force-enable relevant MSR events
Vmx_disable_intercept_for_msr() will now refuse to disable interception of
MSRs needed for memory introspection. It is not possible to gate this on
mem_access being active for the domain, since by the time mem_access does
become active the interception for the interesting MSRs has already been
disabled (vmx_disable_intercept_for_msr() runs very early on).
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Changes since V8:
- Renamed vmx_msrs_exit_array to vmx_introspection_force_enabled_msrs.
Changes since V7:
- Reversed if conditions (cheapest one first).
- Combined two if statements.
- Moved "bool_t introspection_enabled;" to the bool_t group above.
- Renamed msrs_exit_array to vmx_msrs_exit_array and made it
"extern" in the header.
Changes since V6:
- Moved the array of interesting MSRs to common header.
- Minor code cleanup.
Changes since V5:
- Added xc_mem_access_enable_introspection() to libxc, which has
the same parameters and semantics as xc_mem_access_enable(),
but it additionally sets d->arch.hvm_domain.introspection_enabled
and enables relevant MSR interception.
- Renamed vmx_enable_intro_msr_interception() to
vmx_enable_msr_exit_interception().
- Simplified vmx_enable_msr_exit_interception() (const array of MSRs).
Changes since V3:
- Removed log line stating that MSR interception cannot be disabled.
- Removed superfluous #include <asm/hvm/vmx/vmcs.h>.
- Moved VMX-specific code to vmx.c (as a new hvm_funcs member).
Changes since V2:
- Split a log line differently to keep it grepable.
- Interception for relevant MSRs will be disabled only if
mem_access is not enabled.
- Since they end up being disabled early on (when mem_access
is not enabled yet), re-enable interception when mem_access
becomes active.
Changes since V1:
- Replaced printk() with gdprintk(XENLOG_DEBUG, ...).
---
xen/arch/x86/hvm/hvm.c | 33 +++++++++++++++++++++++
xen/arch/x86/mm/p2m.c | 57 ++++++++++++++++++++++++++++++++++++++++
xen/include/public/mem_event.h | 39 +++++++++++++++++++++++++++
3 files changed, 129 insertions(+)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 83e6fae..5761ff9 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -6149,6 +6149,38 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
return rc;
}
+static void hvm_mem_event_fill_regs(mem_event_request_t *req)
+{
+ const struct cpu_user_regs *regs = guest_cpu_user_regs();
+ const struct vcpu *curr = current;
+
+ req->x86_regs.rax = regs->eax;
+ req->x86_regs.rcx = regs->ecx;
+ req->x86_regs.rdx = regs->edx;
+ req->x86_regs.rbx = regs->ebx;
+ req->x86_regs.rsp = regs->esp;
+ req->x86_regs.rbp = regs->ebp;
+ req->x86_regs.rsi = regs->esi;
+ req->x86_regs.rdi = regs->edi;
+
+ req->x86_regs.r8 = regs->r8;
+ req->x86_regs.r9 = regs->r9;
+ req->x86_regs.r10 = regs->r10;
+ req->x86_regs.r11 = regs->r11;
+ req->x86_regs.r12 = regs->r12;
+ req->x86_regs.r13 = regs->r13;
+ req->x86_regs.r14 = regs->r14;
+ req->x86_regs.r15 = regs->r15;
+
+ req->x86_regs.rflags = regs->eflags;
+ req->x86_regs.rip = regs->eip;
+
+ req->x86_regs.msr_efer = curr->arch.hvm_vcpu.guest_efer;
+ req->x86_regs.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
+ req->x86_regs.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
+ req->x86_regs.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
+}
+
static int hvm_memory_event_traps(long p, uint32_t reason,
unsigned long value, unsigned long old,
bool_t gla_valid, unsigned long gla)
@@ -6193,6 +6225,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason,
req.gla = old;
}
+ hvm_mem_event_fill_regs(&req);
mem_event_put_request(d, &d->mem_event->access, &req);
return 1;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c2e89e1..6ed4109 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1323,6 +1323,61 @@ void p2m_mem_paging_resume(struct domain *d)
}
}
+static void p2m_mem_event_fill_regs(mem_event_request_t *req)
+{
+ const struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct segment_register seg;
+ struct hvm_hw_cpu ctxt;
+ struct vcpu *curr = current;
+
+ /* Architecture-specific vmcs/vmcb bits */
+ hvm_funcs.save_cpu_ctxt(curr, &ctxt);
+
+ req->x86_regs.rax = regs->eax;
+ req->x86_regs.rcx = regs->ecx;
+ req->x86_regs.rdx = regs->edx;
+ req->x86_regs.rbx = regs->ebx;
+ req->x86_regs.rsp = regs->esp;
+ req->x86_regs.rbp = regs->ebp;
+ req->x86_regs.rsi = regs->esi;
+ req->x86_regs.rdi = regs->edi;
+
+ req->x86_regs.r8 = regs->r8;
+ req->x86_regs.r9 = regs->r9;
+ req->x86_regs.r10 = regs->r10;
+ req->x86_regs.r11 = regs->r11;
+ req->x86_regs.r12 = regs->r12;
+ req->x86_regs.r13 = regs->r13;
+ req->x86_regs.r14 = regs->r14;
+ req->x86_regs.r15 = regs->r15;
+
+ req->x86_regs.rflags = regs->eflags;
+ req->x86_regs.rip = regs->eip;
+
+ req->x86_regs.dr7 = curr->arch.debugreg[7];
+ req->x86_regs.cr0 = ctxt.cr0;
+ req->x86_regs.cr2 = ctxt.cr2;
+ req->x86_regs.cr3 = ctxt.cr3;
+ req->x86_regs.cr4 = ctxt.cr4;
+
+ req->x86_regs.sysenter_cs = ctxt.sysenter_cs;
+ req->x86_regs.sysenter_esp = ctxt.sysenter_esp;
+ req->x86_regs.sysenter_eip = ctxt.sysenter_eip;
+
+ req->x86_regs.msr_efer = ctxt.msr_efer;
+ req->x86_regs.msr_star = ctxt.msr_star;
+ req->x86_regs.msr_lstar = ctxt.msr_lstar;
+
+ hvm_get_segment_register(curr, x86_seg_fs, &seg);
+ req->x86_regs.fs_base = seg.base;
+
+ hvm_get_segment_register(curr, x86_seg_gs, &seg);
+ req->x86_regs.gs_base = seg.base;
+
+ hvm_get_segment_register(curr, x86_seg_cs, &seg);
+ req->x86_regs.cs_arbytes = seg.attr.bytes;
+}
+
bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
struct npfec npfec,
mem_event_request_t **req_ptr)
@@ -1413,6 +1468,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
req->access_w = npfec.write_access;
req->access_x = npfec.insn_fetch;
req->vcpu_id = v->vcpu_id;
+
+ p2m_mem_event_fill_regs(req);
}
/* Pause the current VCPU */
diff --git a/xen/include/public/mem_event.h b/xen/include/public/mem_event.h
index fc12697..d3dd9c6 100644
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -48,6 +48,44 @@
#define MEM_EVENT_REASON_MSR 7 /* MSR was hit: gfn is MSR value,
gla is MSR address;
does NOT honour
HVMPME_onchangeonly */
+/* Using a custom struct (not hvm_hw_cpu) so as to not fill
+ * the mem_event ring buffer too quickly. */
+struct mem_event_regs_x86 {
+ uint64_t rax;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbx;
+ uint64_t rsp;
+ uint64_t rbp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t rflags;
+ uint64_t dr7;
+ uint64_t rip;
+ uint64_t cr0;
+ uint64_t cr2;
+ uint64_t cr3;
+ uint64_t cr4;
+ uint64_t sysenter_cs;
+ uint64_t sysenter_esp;
+ uint64_t sysenter_eip;
+ uint64_t msr_efer;
+ uint64_t msr_star;
+ uint64_t msr_lstar;
+ uint64_t fs_base;
+ uint64_t gs_base;
+ uint32_t cs_arbytes;
+ uint32_t _pad;
+};
+
typedef struct mem_event_st {
uint32_t flags;
uint32_t vcpu_id;
@@ -67,6 +105,7 @@ typedef struct mem_event_st {
uint16_t available:10;
uint16_t reason;
+ struct mem_event_regs_x86 x86_regs;
} mem_event_request_t, mem_event_response_t;
DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |