|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 1/3] x86: Consolidate boolean inputs in hvm and p2m into their own respective bitmaps.
This patch consolidates the boolean input parameters of
hvm_hap_nested_page_fault and p2m_mem_access_check into bitmaps and defines the
bitmap members accordingly.
Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 54 +++++++++++++++++++++++++------------------
xen/arch/x86/hvm/svm/svm.c | 14 ++++++-----
xen/arch/x86/hvm/vmx/vmx.c | 15 ++++++++----
xen/arch/x86/mm/p2m.c | 18 +++++++--------
xen/include/asm-x86/hvm/hvm.h | 19 +++++++++++----
xen/include/asm-x86/p2m.h | 20 +++++++++++++---
6 files changed, 89 insertions(+), 51 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e834406..4c9d4d6 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2722,18 +2722,15 @@ void hvm_inject_page_fault(int errcode, unsigned long
cr2)
hvm_inject_trap(&trap);
}
-int hvm_hap_nested_page_fault(paddr_t gpa,
- bool_t gla_valid,
- unsigned long gla,
- bool_t access_r,
- bool_t access_w,
- bool_t access_x)
+int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
+ struct npf_info npfec)
{
unsigned long gfn = gpa >> PAGE_SHIFT;
p2m_type_t p2mt;
p2m_access_t p2ma;
mfn_t mfn;
struct vcpu *v = current;
+ struct p2m_access_check_info p2mcheck = {0};
struct p2m_domain *p2m;
int rc, fall_through = 0, paged = 0;
int sharing_enomem = 0;
@@ -2756,8 +2753,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
* into l1 guest if not fixable. The algorithm is
* the same as for shadow paging.
*/
- rv = nestedhvm_hap_nested_page_fault(v, &gpa,
- access_r, access_w, access_x);
+
+ rv = nestedhvm_hap_nested_page_fault(v, &gpa,
+ npfec.read_access,
+ npfec.write_access,
+ npfec.insn_fetch);
switch (rv) {
case NESTEDHVM_PAGEFAULT_DONE:
case NESTEDHVM_PAGEFAULT_RETRY:
@@ -2793,38 +2793,39 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
p2m = p2m_get_hostp2m(v->domain);
mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma,
- P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), NULL);
+ P2M_ALLOC | npfec.write_access ? P2M_UNSHARE : 0,
+ NULL);
/* Check access permissions first, then handle faults */
if ( mfn_x(mfn) != INVALID_MFN )
{
- int violation = 0;
+ bool_t violation = 0;
/* If the access is against the permissions, then send to mem_event */
switch (p2ma)
{
case p2m_access_n:
case p2m_access_n2rwx:
default:
- violation = access_r || access_w || access_x;
+ violation = (npfec.read_access || npfec.write_access ||
npfec.insn_fetch);
break;
case p2m_access_r:
- violation = access_w || access_x;
+ violation = (npfec.write_access || npfec.insn_fetch);
break;
case p2m_access_w:
- violation = access_r || access_x;
+ violation = (npfec.read_access || npfec.insn_fetch);
break;
case p2m_access_x:
- violation = access_r || access_w;
+ violation = (npfec.read_access || npfec.write_access);
break;
case p2m_access_rx:
case p2m_access_rx2rw:
- violation = access_w;
+ violation = npfec.write_access;
break;
case p2m_access_wx:
- violation = access_r;
+ violation = npfec.read_access;
break;
case p2m_access_rw:
- violation = access_x;
+ violation = npfec.insn_fetch;
break;
case p2m_access_rwx:
break;
@@ -2832,8 +2833,15 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
if ( violation )
{
- if ( p2m_mem_access_check(gpa, gla_valid, gla, access_r,
- access_w, access_x, &req_ptr) )
+
+ p2mcheck.read_access = npfec.read_access;
+ p2mcheck.write_access = npfec.write_access;
+ p2mcheck.insn_fetch = npfec.insn_fetch;
+ p2mcheck.gla_valid = npfec.gla_valid;
+ p2mcheck.have_extra_fault_info = npfec.have_extra_fault_info;
+ p2mcheck.extra_fault_info = npfec.extra_fault_info;
+
+ if ( p2m_mem_access_check(gpa, gla, p2mcheck, &req_ptr) )
{
fall_through = 1;
} else {
@@ -2849,7 +2857,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
* to the mmio handler.
*/
if ( (p2mt == p2m_mmio_dm) ||
- (access_w && (p2mt == p2m_ram_ro)) )
+ (npfec.write_access && (p2mt == p2m_ram_ro)) )
{
put_gfn(p2m->domain, gfn);
@@ -2868,7 +2876,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
paged = 1;
/* Mem sharing: unshare the page and try again */
- if ( access_w && (p2mt == p2m_ram_shared) )
+ if ( npfec.write_access && (p2mt == p2m_ram_shared) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
sharing_enomem =
@@ -2885,7 +2893,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
* a large page, we do not change other pages type within that large
* page.
*/
- if ( access_w )
+ if ( npfec.write_access )
{
paging_mark_dirty(v->domain, mfn_x(mfn));
p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
@@ -2895,7 +2903,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
}
/* Shouldn't happen: Maybe the guest was writing to a r/o grant mapping? */
- if ( access_w && (p2mt == p2m_grant_map_ro) )
+ if ( npfec.write_access && (p2mt == p2m_grant_map_ro) )
{
gdprintk(XENLOG_WARNING,
"trying to write to read-only grant mapping\n");
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 76616ac..c41a8fe 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1394,7 +1394,7 @@ const struct hvm_function_table * __init start_svm(void)
}
static void svm_do_nested_pgfault(struct vcpu *v,
- struct cpu_user_regs *regs, uint32_t npfec, paddr_t gpa)
+ struct cpu_user_regs *regs, uint64_t pfec, paddr_t gpa)
{
int ret;
unsigned long gfn = gpa >> PAGE_SHIFT;
@@ -1403,10 +1403,12 @@ static void svm_do_nested_pgfault(struct vcpu *v,
p2m_access_t p2ma;
struct p2m_domain *p2m = NULL;
- ret = hvm_hap_nested_page_fault(gpa, 0, ~0ul,
- 1, /* All NPFs count as reads */
- npfec & PFEC_write_access,
- npfec & PFEC_insn_fetch);
+ struct npf_info npfec = {0};
+ npfec.read_access = 1; /* All NPFs count as reads */
+ npfec.write_access = !!(pfec & PFEC_write_access);
+ npfec.insn_fetch = !!(pfec & PFEC_insn_fetch);
+
+ ret = hvm_hap_nested_page_fault(gpa, ~0ul, npfec);
if ( tb_init_done )
{
@@ -1434,7 +1436,7 @@ static void svm_do_nested_pgfault(struct vcpu *v,
case -1:
ASSERT(nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v));
/* inject #VMEXIT(NPF) into guest. */
- nestedsvm_vmexit_defer(v, VMEXIT_NPF, npfec, gpa);
+ nestedsvm_vmexit_defer(v, VMEXIT_NPF, pfec, gpa);
return;
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2caa04a..907115b 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2353,6 +2353,7 @@ static void ept_handle_violation(unsigned long
qualification, paddr_t gpa)
p2m_type_t p2mt;
int ret;
struct domain *d = current->domain;
+ struct npf_info npfec = {0};
if ( tb_init_done )
{
@@ -2370,15 +2371,19 @@ static void ept_handle_violation(unsigned long
qualification, paddr_t gpa)
__trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
}
+ npfec.read_access = !!(qualification & EPT_READ_VIOLATION);
+ npfec.write_access = !!(qualification & EPT_WRITE_VIOLATION);
+ npfec.insn_fetch = !!(qualification & EPT_EXEC_VIOLATION);
+
if ( qualification & EPT_GLA_VALID )
+ {
__vmread(GUEST_LINEAR_ADDRESS, &gla);
+ npfec.gla_valid = 1;
+ }
else
gla = ~0ull;
- ret = hvm_hap_nested_page_fault(gpa,
- !!(qualification & EPT_GLA_VALID), gla,
- !!(qualification & EPT_READ_VIOLATION),
- !!(qualification & EPT_WRITE_VIOLATION),
- !!(qualification & EPT_EXEC_VIOLATION));
+
+ ret = hvm_hap_nested_page_fault(gpa, gla, npfec);
switch ( ret )
{
case 0: // Unhandled L1 EPT violation
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index bca9f0f..8104dbb 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1323,9 +1323,9 @@ void p2m_mem_paging_resume(struct domain *d)
}
}
-bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
- bool_t access_r, bool_t access_w, bool_t access_x,
- mem_event_request_t **req_ptr)
+bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
+ struct p2m_access_check_info check,
+ mem_event_request_t **req_ptr)
{
struct vcpu *v = current;
unsigned long gfn = gpa >> PAGE_SHIFT;
@@ -1343,7 +1343,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t
gla_valid, unsigned long gla,
gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL);
- if ( access_w && p2ma == p2m_access_rx2rw )
+ if ( check.write_access && p2ma == p2m_access_rx2rw )
{
rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
ASSERT(rc == 0);
@@ -1352,7 +1352,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t
gla_valid, unsigned long gla,
}
else if ( p2ma == p2m_access_n2rwx )
{
- ASSERT(access_w || access_r || access_x);
+ ASSERT(check.write_access || check.read_access || check.insn_fetch);
rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
p2mt, p2m_access_rwx);
ASSERT(rc == 0);
@@ -1403,11 +1403,11 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t
gla_valid, unsigned long gla,
/* Send request to mem event */
req->gfn = gfn;
req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
- req->gla_valid = gla_valid;
+ req->gla_valid = check.gla_valid;
req->gla = gla;
- req->access_r = access_r;
- req->access_w = access_w;
- req->access_x = access_x;
+ req->access_r = check.read_access;
+ req->access_w = check.write_access;
+ req->access_x = check.insn_fetch;
req->vcpu_id = v->vcpu_id;
}
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0ebd478..6f16eac 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -455,11 +455,20 @@ static inline void hvm_invalidate_regs_fields(struct
cpu_user_regs *regs)
#endif
}
-int hvm_hap_nested_page_fault(paddr_t gpa,
- bool_t gla_valid, unsigned long gla,
- bool_t access_r,
- bool_t access_w,
- bool_t access_x);
+struct npf_info {
+ uint8_t read_access:1;
+ uint8_t write_access:1;
+ uint8_t insn_fetch:1;
+ uint8_t gla_valid:1;
+ uint8_t have_extra_fault_info:1;
+ uint8_t extra_fault_info:1;
+};
+
+#define NPFEC_fault_in_gpt 0U
+#define NPFEC_fault_with_gla 1U
+
+int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
+ struct npf_info npfec);
#define hvm_msr_tsc_aux(v) ({ \
struct domain *__d = (v)->domain; \
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 0ddbadb..5681cf3 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -101,6 +101,20 @@ typedef enum {
/* NOTE: Assumed to be only 4 bits right now */
} p2m_access_t;
+/*
+ * Information used to perform mem access checks.
+ */
+struct p2m_access_check_info {
+ uint8_t read_access:1;
+ uint8_t write_access:1;
+ uint8_t insn_fetch:1;
+ uint8_t gla_valid:1;
+ uint8_t have_extra_fault_info:1;
+ uint8_t extra_fault_info:1;
+};
+#define P2M_FAULT_IN_GPT 0u
+#define P2M_FAULT_WITH_GLA 1u
+
/* Modifiers to the query */
typedef unsigned int p2m_query_t;
#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */
@@ -597,9 +611,9 @@ void p2m_mem_paging_resume(struct domain *d);
* been promoted with no underlying vcpu pause. If the req_ptr has been
populated,
* then the caller must put the event in the ring (once having released
get_gfn*
* locks -- caller must also xfree the request. */
-bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
- bool_t access_r, bool_t access_w, bool_t access_x,
- mem_event_request_t **req_ptr);
+bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
+ struct p2m_access_check_info check,
+ mem_event_request_t **req_ptr);
/* Resumes the running of the VCPU, restarting the last instruction */
void p2m_mem_access_resume(struct domain *d);
--
2.0.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |