|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 1/2] x86/mem_event: Deliver gla fault EPT violation information
On Intel EPT the exit qualification generated by a violation also includes a
bit (EPT_GLA_FAULT) which describes the following information:
Set if the access causing the EPT violation is to a guest-physical address that
is the translation of a linear address. Clear if the access causing the EPT
violation is to a paging-structure entry as part of a page walk or the update
of an accessed or dirty bit.
For more information see Table 27-7 in the Intel SDM.
This patch extends the mem_event system to deliver this extra information,
which could be useful for determining the cause of a violation.
v3: Style fixes.
v2: Split gla_fault into fault_in_gpt and fault_gla to be more compatible with
the AMD implementation.
Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 8 ++++++--
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 11 ++++++++++-
xen/arch/x86/mm/p2m.c | 5 ++++-
xen/include/asm-x86/hvm/hvm.h | 5 ++++-
xen/include/asm-x86/p2m.h | 3 ++-
xen/include/public/mem_event.h | 4 +++-
7 files changed, 30 insertions(+), 8 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e834406..15bd01f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2725,6 +2725,8 @@ void hvm_inject_page_fault(int errcode, unsigned long cr2)
int hvm_hap_nested_page_fault(paddr_t gpa,
bool_t gla_valid,
unsigned long gla,
+ bool_t fault_in_gpt,
+ bool_t fault_gla,
bool_t access_r,
bool_t access_w,
bool_t access_x)
@@ -2832,8 +2834,10 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
if ( violation )
{
- if ( p2m_mem_access_check(gpa, gla_valid, gla, access_r,
- access_w, access_x, &req_ptr) )
+ if ( p2m_mem_access_check(gpa, gla_valid, gla,
+ fault_in_gpt, fault_gla,
+ access_r, access_w, access_x,
+ &req_ptr) )
{
fall_through = 1;
} else {
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 76616ac..9e35e7a 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1403,7 +1403,7 @@ static void svm_do_nested_pgfault(struct vcpu *v,
p2m_access_t p2ma;
struct p2m_domain *p2m = NULL;
- ret = hvm_hap_nested_page_fault(gpa, 0, ~0ul,
+ ret = hvm_hap_nested_page_fault(gpa, 0, ~0ul, 0, 0,
1, /* All NPFs count as reads */
npfec & PFEC_write_access,
npfec & PFEC_insn_fetch);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2caa04a..a7a0396 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2353,6 +2353,7 @@ static void ept_handle_violation(unsigned long
qualification, paddr_t gpa)
p2m_type_t p2mt;
int ret;
struct domain *d = current->domain;
+ bool_t fault_in_gpt, fault_gla;
if ( tb_init_done )
{
@@ -2371,11 +2372,19 @@ static void ept_handle_violation(unsigned long
qualification, paddr_t gpa)
}
if ( qualification & EPT_GLA_VALID )
+ {
__vmread(GUEST_LINEAR_ADDRESS, &gla);
+ fault_gla = !!(qualification & EPT_GLA_FAULT);
+ fault_in_gpt = !fault_gla;
+ }
else
+ {
gla = ~0ull;
+ fault_gla = fault_in_gpt = 0;
+ }
ret = hvm_hap_nested_page_fault(gpa,
- !!(qualification & EPT_GLA_VALID), gla,
+ !!(qualification & EPT_GLA_VALID),
+ gla, fault_in_gpt, fault_gla,
!!(qualification & EPT_READ_VIOLATION),
!!(qualification & EPT_WRITE_VIOLATION),
!!(qualification & EPT_EXEC_VIOLATION));
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index bca9f0f..132f0d2 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1323,7 +1323,8 @@ void p2m_mem_paging_resume(struct domain *d)
}
}
-bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
+bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
+ bool_t fault_in_gpt, bool_t fault_gla,
bool_t access_r, bool_t access_w, bool_t access_x,
mem_event_request_t **req_ptr)
{
@@ -1405,6 +1406,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t
gla_valid, unsigned long gla,
req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
req->gla_valid = gla_valid;
req->gla = gla;
+ req->fault_in_gpt = fault_in_gpt;
+ req->fault_gla = fault_gla;
req->access_r = access_r;
req->access_w = access_w;
req->access_x = access_x;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0ebd478..de755b6 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -456,7 +456,10 @@ static inline void hvm_invalidate_regs_fields(struct
cpu_user_regs *regs)
}
int hvm_hap_nested_page_fault(paddr_t gpa,
- bool_t gla_valid, unsigned long gla,
+ bool_t gla_valid,
+ unsigned long gla,
+ bool_t fault_in_gpt,
+ bool_t fault_gla,
bool_t access_r,
bool_t access_w,
bool_t access_x);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 0ddbadb..59803c5 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -597,7 +597,8 @@ void p2m_mem_paging_resume(struct domain *d);
* been promoted with no underlying vcpu pause. If the req_ptr has been
populated,
* then the caller must put the event in the ring (once having released
get_gfn*
* locks -- caller must also xfree the request. */
-bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
+bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
+ bool_t fault_in_gpt, bool_t fault_gla,
bool_t access_r, bool_t access_w, bool_t access_x,
mem_event_request_t **req_ptr);
/* Resumes the running of the VCPU, restarting the last instruction */
diff --git a/xen/include/public/mem_event.h b/xen/include/public/mem_event.h
index 3831b41..1ba6863 100644
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -62,7 +62,9 @@ typedef struct mem_event_st {
uint16_t access_w:1;
uint16_t access_x:1;
uint16_t gla_valid:1;
- uint16_t available:12;
+ uint16_t fault_in_gpt:1;
+ uint16_t fault_gla:1;
+ uint16_t available:10;
uint16_t reason;
} mem_event_request_t, mem_event_response_t;
--
2.0.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |