|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/5] xen/vm_access: Support for memory-content hiding
This patch adds support for memory-content hiding, by modifying the
value returned by emulated instructions that read certain memory
addresses that contain sensitive data. The patch only applies to
cases where MEM_ACCESS_EMULATE or MEM_ACCESS_EMULATE_NOWRITE have
been set to a vm_event response.
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 155 ++++++++++++++++++++++++++++++++-----
xen/arch/x86/mm/p2m.c | 15 +++-
xen/include/asm-x86/domain.h | 2 +
xen/include/asm-x86/hvm/emulate.h | 10 ++-
xen/include/public/vm_event.h | 20 ++++-
5 files changed, 178 insertions(+), 24 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index ac9c9d6..0058b15 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -578,6 +578,25 @@ static int hvmemul_read(
container_of(ctxt, struct hvm_emulate_ctxt, ctxt));
}
+static int hvmemul_read_set_context(
+ enum x86_segment seg,
+ unsigned long offset,
+ void *p_data,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct vcpu *curr = current;
+ unsigned int len =
+ (bytes > curr->arch.vm_event.emul_read_data.size ?
+ curr->arch.vm_event.emul_read_data.size : bytes);
+
+ if ( len )
+ memcpy(p_data, curr->arch.vm_event.emul_read_data.data,
+ curr->arch.vm_event.emul_read_data.size);
+
+ return X86EMUL_OKAY;
+}
+
static int hvmemul_insn_fetch(
enum x86_segment seg,
unsigned long offset,
@@ -891,14 +910,15 @@ static int hvmemul_rep_outs(
!!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
}
-static int hvmemul_rep_movs(
+static int _hvmemul_rep_movs(
enum x86_segment src_seg,
unsigned long src_offset,
enum x86_segment dst_seg,
unsigned long dst_offset,
unsigned int bytes_per_rep,
unsigned long *reps,
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ bool_t set_context)
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
@@ -970,20 +990,38 @@ static int hvmemul_rep_movs(
if ( df )
dgpa -= bytes - bytes_per_rep;
- /* Allocate temporary buffer. Fall back to slow emulation if this fails. */
- buf = xmalloc_bytes(bytes);
- if ( buf == NULL )
- return X86EMUL_UNHANDLEABLE;
+ if ( unlikely(set_context) )
+ {
+ struct vcpu* curr = current;
- /*
- * We do a modicum of checking here, just for paranoia's sake and to
- * definitely avoid copying an unitialised buffer into guest address space.
- */
- rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
- if ( rc == HVMCOPY_okay )
- rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
+ bytes = bytes < curr->arch.vm_event.emul_read_data.size ?
+ bytes : curr->arch.vm_event.emul_read_data.size;
- xfree(buf);
+ rc = hvm_copy_to_guest_phys(dgpa,
+ curr->arch.vm_event.emul_read_data.data,
+ bytes);
+ }
+ else
+ {
+ /*
+ * Allocate temporary buffer. Fall back to slow emulation if this
+ * fails.
+ */
+ buf = xmalloc_bytes(bytes);
+ if ( buf == NULL )
+ return X86EMUL_UNHANDLEABLE;
+
+ /*
+ * We do a modicum of checking here, just for paranoia's sake and to
+ * definitely avoid copying an unitialised buffer into guest address
+ * space.
+ */
+ rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
+ if ( rc == HVMCOPY_okay )
+ rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
+
+ xfree(buf);
+ }
if ( rc == HVMCOPY_gfn_paged_out )
return X86EMUL_RETRY;
@@ -1000,6 +1038,32 @@ static int hvmemul_rep_movs(
return X86EMUL_OKAY;
}
+static int hvmemul_rep_movs(
+ enum x86_segment src_seg,
+ unsigned long src_offset,
+ enum x86_segment dst_seg,
+ unsigned long dst_offset,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
+{
+ return _hvmemul_rep_movs(src_seg, src_offset, dst_seg, dst_offset,
+ bytes_per_rep, reps, ctxt, 0);
+}
+
+static int hvmemul_rep_movs_set_context(
+ enum x86_segment src_seg,
+ unsigned long src_offset,
+ enum x86_segment dst_seg,
+ unsigned long dst_offset,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
+{
+ return _hvmemul_rep_movs(src_seg, src_offset, dst_seg, dst_offset,
+ bytes_per_rep, reps, ctxt, 1);
+}
+
static int hvmemul_rep_stos(
void *p_data,
enum x86_segment seg,
@@ -1107,6 +1171,22 @@ static int hvmemul_rep_stos(
}
}
+static int hvmemul_rep_stos_set_context(
+ void *p_data,
+ enum x86_segment seg,
+ unsigned long offset,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct vcpu *curr = current;
+ unsigned long local_reps = 1;
+
+ return hvmemul_rep_stos(curr->arch.vm_event.emul_read_data.data, seg,
+ offset, curr->arch.vm_event.emul_read_data.size,
+ &local_reps, ctxt);
+}
+
static int hvmemul_read_segment(
enum x86_segment seg,
struct segment_register *reg,
@@ -1408,6 +1488,32 @@ static const struct x86_emulate_ops
hvm_emulate_ops_no_write = {
.invlpg = hvmemul_invlpg
};
+static const struct x86_emulate_ops hvm_emulate_ops_set_context = {
+ .read = hvmemul_read_set_context,
+ .insn_fetch = hvmemul_insn_fetch,
+ .write = hvmemul_write,
+ .cmpxchg = hvmemul_cmpxchg,
+ .rep_ins = hvmemul_rep_ins,
+ .rep_outs = hvmemul_rep_outs,
+ .rep_movs = hvmemul_rep_movs_set_context,
+ .rep_stos = hvmemul_rep_stos_set_context,
+ .read_segment = hvmemul_read_segment,
+ .write_segment = hvmemul_write_segment,
+ .read_io = hvmemul_read_io,
+ .write_io = hvmemul_write_io,
+ .read_cr = hvmemul_read_cr,
+ .write_cr = hvmemul_write_cr,
+ .read_msr = hvmemul_read_msr,
+ .write_msr = hvmemul_write_msr,
+ .wbinvd = hvmemul_wbinvd,
+ .cpuid = hvmemul_cpuid,
+ .inject_hw_exception = hvmemul_inject_hw_exception,
+ .inject_sw_interrupt = hvmemul_inject_sw_interrupt,
+ .get_fpu = hvmemul_get_fpu,
+ .put_fpu = hvmemul_put_fpu,
+ .invlpg = hvmemul_invlpg
+};
+
static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
const struct x86_emulate_ops *ops)
{
@@ -1528,18 +1634,31 @@ int hvm_emulate_one_no_write(
return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops_no_write);
}
-void hvm_mem_access_emulate_one(bool_t nowrite, unsigned int trapnr,
+int hvm_emulate_one_set_context(
+ struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+ return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops_set_context);
+}
+
+void hvm_mem_access_emulate_one(enum emul_kind kind, unsigned int trapnr,
unsigned int errcode)
{
struct hvm_emulate_ctxt ctx = {{ 0 }};
- int rc;
+ int rc = X86EMUL_UNHANDLEABLE;
hvm_emulate_prepare(&ctx, guest_cpu_user_regs());
- if ( nowrite )
+ switch ( kind ) {
+ case EMUL_KIND_NOWRITE:
rc = hvm_emulate_one_no_write(&ctx);
- else
+ break;
+ case EMUL_KIND_NORMAL:
rc = hvm_emulate_one(&ctx);
+ break;
+ case EMUL_KIND_SET_CONTEXT:
+ rc = hvm_emulate_one_set_context(&ctx);
+ break;
+ }
switch ( rc )
{
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 1fd1194..0b2f9a6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1466,6 +1466,9 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
}
v->arch.vm_event.emulate_flags = violation ? rsp->flags : 0;
+
+ if ( rsp->flags & MEM_ACCESS_SET_EMUL_READ_DATA )
+ v->arch.vm_event.emul_read_data = rsp->emul_read_data;
}
}
@@ -1552,9 +1555,15 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
if ( v->arch.vm_event.emulate_flags )
{
- hvm_mem_access_emulate_one((v->arch.vm_event.emulate_flags &
- MEM_ACCESS_EMULATE_NOWRITE) != 0,
- TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ enum emul_kind kind = EMUL_KIND_NORMAL;
+
+ if ( v->arch.vm_event.emulate_flags & MEM_ACCESS_SET_EMUL_READ_DATA )
+ kind = EMUL_KIND_SET_CONTEXT;
+ else if ( v->arch.vm_event.emulate_flags & MEM_ACCESS_EMULATE_NOWRITE )
+ kind = EMUL_KIND_NOWRITE;
+
+ hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
+ HVM_DELIVER_NO_ERROR_CODE);
v->arch.vm_event.emulate_flags = 0;
return 1;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 452a9b3..2b89182 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -10,6 +10,7 @@
#include <asm/mce.h>
#include <public/vcpu.h>
#include <public/hvm/hvm_info_table.h>
+#include <public/vm_event.h>
#define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
#define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv)
@@ -512,6 +513,7 @@ struct arch_vcpu
uint32_t emulate_flags;
unsigned long gpa;
unsigned long eip;
+ struct vm_event_emul_read_data emul_read_data;
} vm_event;
};
diff --git a/xen/include/asm-x86/hvm/emulate.h
b/xen/include/asm-x86/hvm/emulate.h
index b3971c8..65ccfd8 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -34,11 +34,19 @@ struct hvm_emulate_ctxt {
uint32_t intr_shadow;
};
+enum emul_kind {
+ EMUL_KIND_NORMAL,
+ EMUL_KIND_NOWRITE,
+ EMUL_KIND_SET_CONTEXT
+};
+
int hvm_emulate_one(
struct hvm_emulate_ctxt *hvmemul_ctxt);
int hvm_emulate_one_no_write(
struct hvm_emulate_ctxt *hvmemul_ctxt);
-void hvm_mem_access_emulate_one(bool_t nowrite,
+int hvm_emulate_one_set_context(
+ struct hvm_emulate_ctxt *hvmemul_ctxt);
+void hvm_mem_access_emulate_one(enum emul_kind kind,
unsigned int trapnr,
unsigned int errcode);
void hvm_emulate_prepare(
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 71fe9ba..bce3e3e 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -149,6 +149,13 @@ struct vm_event_regs_x86 {
* potentially having side effects (like memory mapped or port I/O) disabled.
*/
#define MEM_ACCESS_EMULATE_NOWRITE (1 << 7)
+/*
+ * Data is being sent back to the hypervisor in the event response, to be
+ * returned by the read function when emulating an instruction.
+ * This flag is only useful when combined with MEM_ACCESS_EMULATE or
+ * MEM_ACCESS_EMULATE_NOWRITE.
+ */
+#define MEM_ACCESS_SET_EMUL_READ_DATA (1 << 8)
struct vm_event_mem_access {
uint64_t gfn;
@@ -193,6 +200,11 @@ struct vm_event_xsetbv {
uint64_t value;
};
+struct vm_event_emul_read_data {
+ uint32_t size;
+ uint8_t data[164];
+};
+
typedef struct vm_event_st {
uint32_t version; /* VM_EVENT_INTERFACE_VERSION */
uint32_t flags; /* VM_EVENT_FLAG_* */
@@ -211,8 +223,12 @@ typedef struct vm_event_st {
} u;
union {
- struct vm_event_regs_x86 x86;
- } regs;
+ union {
+ struct vm_event_regs_x86 x86;
+ } regs;
+
+ struct vm_event_emul_read_data emul_read_data;
+ };
} vm_event_request_t, vm_event_response_t;
DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |