|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] mem_access: sanitize code around sending vm_event request
commit 3adef8e3270f5a524dc31c90db6ca389df61e1f0
Author: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
AuthorDate: Tue Sep 6 10:17:46 2016 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Sep 6 10:17:46 2016 +0200
mem_access: sanitize code around sending vm_event request
The two functions monitor_traps and mem_access_send_req duplicate some of
the
same functionality. The mem_access_send_req however leaves a lot of the
standard vm_event fields to be filled by other functions.
Remove mem_access_send_req() completely, making use of monitor_traps() to
put
requests into the monitor ring. This in turn causes some cleanup around the
old callsites of mem_access_send_req(). We also update monitor_traps to now
include setting the common vcpu_id field so that all other call-sites can
ommit
this step.
Finally, this change identifies that errors from mem_access_send_req() were
never checked. As errors constitute a problem with the monitor ring,
crashing the domain is the most appropriate action to take.
Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
xen/arch/arm/p2m.c | 15 ++++-----------
xen/arch/x86/hvm/hvm.c | 16 ++++++++++------
xen/arch/x86/hvm/monitor.c | 5 -----
xen/arch/x86/mm/p2m.c | 24 +++---------------------
xen/common/mem_access.c | 11 -----------
xen/common/monitor.c | 2 ++
xen/include/asm-x86/p2m.h | 13 ++++++++-----
xen/include/xen/mem_access.h | 7 -------
8 files changed, 27 insertions(+), 66 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index d60fbbf..b648a9d 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -5,7 +5,7 @@
#include <xen/domain_page.h>
#include <xen/bitops.h>
#include <xen/vm_event.h>
-#include <xen/mem_access.h>
+#include <xen/monitor.h>
#include <xen/iocap.h>
#include <public/vm_event.h>
#include <asm/flushtlb.h>
@@ -1748,10 +1748,6 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
const struct npfec npfec)
{
req->reason = VM_EVENT_REASON_MEM_ACCESS;
- /* Pause the current VCPU */
- if ( xma != XENMEM_access_n2rwx )
- req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
-
/* Send request to mem access subscriber */
req->u.mem_access.gfn = gpa >> PAGE_SHIFT;
req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
@@ -1768,16 +1764,13 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
const struct npfec npfec)
req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0;
req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0;
req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0;
- req->vcpu_id = v->vcpu_id;
- mem_access_send_req(v->domain, req);
+ if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 )
+ domain_crash(v->domain);
+
xfree(req);
}
- /* Pause the current VCPU */
- if ( xma != XENMEM_access_n2rwx )
- vm_event_vcpu_pause(v);
-
return false;
}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2c89984..c0c270a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1707,7 +1707,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
int rc, fall_through = 0, paged = 0;
int sharing_enomem = 0;
vm_event_request_t *req_ptr = NULL;
- bool_t ap2m_active;
+ bool_t ap2m_active, sync = 0;
/* On Nested Virtualization, walk the guest page table.
* If this succeeds, all is fine.
@@ -1846,11 +1846,13 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned
long gla,
}
}
- if ( p2m_mem_access_check(gpa, gla, npfec, &req_ptr) )
- {
+ sync = p2m_mem_access_check(gpa, gla, npfec, &req_ptr);
+
+ if ( !sync )
fall_through = 1;
- } else {
- /* Rights not promoted, vcpu paused, work here is done */
+ else
+ {
+ /* Rights not promoted (aka. sync event), work here is done */
rc = 1;
goto out_put_gfn;
}
@@ -1956,7 +1958,9 @@ out:
}
if ( req_ptr )
{
- mem_access_send_req(currd, req_ptr);
+ if ( monitor_traps(curr, sync, req_ptr) < 0 )
+ rc = 0;
+
xfree(req_ptr);
}
return rc;
diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c
index 53ab804..401a8c6 100644
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -44,7 +44,6 @@ bool_t hvm_monitor_cr(unsigned int index, unsigned long
value, unsigned long old
vm_event_request_t req = {
.reason = VM_EVENT_REASON_WRITE_CTRLREG,
- .vcpu_id = curr->vcpu_id,
.u.write_ctrlreg.index = index,
.u.write_ctrlreg.new_value = value,
.u.write_ctrlreg.old_value = old
@@ -65,7 +64,6 @@ void hvm_monitor_msr(unsigned int msr, uint64_t value)
{
vm_event_request_t req = {
.reason = VM_EVENT_REASON_MOV_TO_MSR,
- .vcpu_id = curr->vcpu_id,
.u.mov_to_msr.msr = msr,
.u.mov_to_msr.value = value,
};
@@ -131,8 +129,6 @@ int hvm_monitor_debug(unsigned long rip, enum
hvm_monitor_debug_type type,
return -EOPNOTSUPP;
}
- req.vcpu_id = curr->vcpu_id;
-
return monitor_traps(curr, sync, &req);
}
@@ -147,7 +143,6 @@ int hvm_monitor_cpuid(unsigned long insn_length, unsigned
int leaf,
return 0;
req.reason = VM_EVENT_REASON_CPUID;
- req.vcpu_id = curr->vcpu_id;
req.u.cpuid.insn_length = insn_length;
req.u.cpuid.leaf = leaf;
req.u.cpuid.subleaf = subleaf;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 812dbf6..27f9d26 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1728,13 +1728,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
if ( req )
{
*req_ptr = req;
- req->reason = VM_EVENT_REASON_MEM_ACCESS;
-
- /* Pause the current VCPU */
- if ( p2ma != p2m_access_n2rwx )
- req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
- /* Send request to mem event */
+ req->reason = VM_EVENT_REASON_MEM_ACCESS;
req->u.mem_access.gfn = gfn;
req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
if ( npfec.gla_valid )
@@ -1750,23 +1745,10 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0;
req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0;
req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0;
- req->vcpu_id = v->vcpu_id;
-
- vm_event_fill_regs(req);
-
- if ( altp2m_active(v->domain) )
- {
- req->flags |= VM_EVENT_FLAG_ALTERNATE_P2M;
- req->altp2m_idx = vcpu_altp2m(v).p2midx;
- }
}
- /* Pause the current VCPU */
- if ( p2ma != p2m_access_n2rwx )
- vm_event_vcpu_pause(v);
-
- /* VCPU may be paused, return whether we promoted automatically */
- return (p2ma == p2m_access_n2rwx);
+ /* Return whether vCPU pause is required (aka. sync event) */
+ return (p2ma != p2m_access_n2rwx);
}
static inline
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index b4033f0..82f4bad 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -108,17 +108,6 @@ int mem_access_memop(unsigned long cmd,
return rc;
}
-int mem_access_send_req(struct domain *d, vm_event_request_t *req)
-{
- int rc = vm_event_claim_slot(d, &d->vm_event->monitor);
- if ( rc < 0 )
- return rc;
-
- vm_event_put_request(d, &d->vm_event->monitor, req);
-
- return 0;
-}
-
/*
* Local variables:
* mode: C
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index c73d1d5..451f42f 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -107,6 +107,8 @@ int monitor_traps(struct vcpu *v, bool_t sync,
vm_event_request_t *req)
return rc;
};
+ req->vcpu_id = v->vcpu_id;
+
if ( sync )
{
req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d5fd546..9fc9ead 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -663,11 +663,14 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long
gfn, uint64_t buffer);
/* Resume normal operation (in case a domain was paused) */
void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp);
-/* Send mem event based on the access (gla is -1ull if not available). Handles
- * the rw2rx conversion. Boolean return value indicates if access rights have
- * been promoted with no underlying vcpu pause. If the req_ptr has been
populated,
- * then the caller must put the event in the ring (once having released
get_gfn*
- * locks -- caller must also xfree the request. */
+/*
+ * Setup vm_event request based on the access (gla is -1ull if not available).
+ * Handles the rw2rx conversion. Boolean return value indicates if event type
+ * is syncronous (aka. requires vCPU pause). If the req_ptr has been populated,
+ * then the caller should use monitor_traps to send the event on the MONITOR
+ * ring. Once having released get_gfn* locks caller must also xfree the
+ * request.
+ */
bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
struct npfec npfec,
vm_event_request_t **req_ptr);
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index 272f1e4..3d054e0 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -29,7 +29,6 @@
int mem_access_memop(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
-int mem_access_send_req(struct domain *d, vm_event_request_t *req);
static inline
void mem_access_resume(struct vcpu *v, vm_event_response_t *rsp)
@@ -47,12 +46,6 @@ int mem_access_memop(unsigned long cmd,
}
static inline
-int mem_access_send_req(struct domain *d, vm_event_request_t *req)
-{
- return -ENOSYS;
-}
-
-static inline
void mem_access_resume(struct vcpu *vcpu, vm_event_response_t *rsp)
{
/* Nothing to do. */
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |