|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 18/25] arm/altp2m: Add HVMOP_altp2m_set_mem_access.
The HVMOP HVMOP_altp2m_set_mem_access allows to set gfn permissions of
(currently one page at a time) of a specific altp2m view. In case the
view does not hold the requested gfn entry, it will be first copied from
the hostp2m table and then modified as requested.
Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
---
v2: Prevent the page reference count from being falsely updated on
altp2m modification. Therefore, we add a check determining whether
the target p2m is a hostp2m before p2m_put_l3_page is called.
---
xen/arch/arm/altp2m.c | 68 +++++++++++++++++++++++++++++++++++++
xen/arch/arm/hvm.c | 7 +++-
xen/arch/arm/p2m.c | 81 +++++++++++++++++++++++++++++++++++++-------
xen/include/asm-arm/altp2m.h | 10 ++++++
xen/include/asm-arm/p2m.h | 21 ++++++++++++
5 files changed, 173 insertions(+), 14 deletions(-)
diff --git a/xen/arch/arm/altp2m.c b/xen/arch/arm/altp2m.c
index 7404f42..f98fd73 100644
--- a/xen/arch/arm/altp2m.c
+++ b/xen/arch/arm/altp2m.c
@@ -65,6 +65,74 @@ int altp2m_switch_domain_altp2m_by_id(struct domain *d,
unsigned int idx)
return rc;
}
+int altp2m_set_mem_access(struct domain *d,
+ struct p2m_domain *hp2m,
+ struct p2m_domain *ap2m,
+ p2m_access_t a,
+ gfn_t gfn)
+{
+ p2m_type_t p2mt;
+ xenmem_access_t xma_old;
+ paddr_t gpa = pfn_to_paddr(gfn_x(gfn));
+ mfn_t mfn;
+ unsigned int level;
+ int rc;
+
+ static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+ ACCESS(rx2rw),
+ ACCESS(n2rwx),
+#undef ACCESS
+ };
+
+ altp2m_lock(d);
+
+ /* Check if entry is part of the altp2m view. */
+ mfn = p2m_lookup_attr(ap2m, gfn, &p2mt, &level, NULL, NULL);
+
+ /* Check host p2m if no valid entry in ap2m. */
+ if ( mfn_eq(mfn, INVALID_MFN) )
+ {
+ /* Check if entry is part of the host p2m view. */
+ mfn = p2m_lookup_attr(hp2m, gfn, &p2mt, &level, NULL, &xma_old);
+ if ( mfn_eq(mfn, INVALID_MFN) || p2mt != p2m_ram_rw )
+ {
+ rc = -ESRCH;
+ goto out;
+ }
+
+ /* If this is a superpage, copy that first. */
+ if ( level != 3 )
+ {
+ rc = modify_altp2m_entry(d, ap2m, gpa, pfn_to_paddr(mfn_x(mfn)),
+ level, p2mt, memaccess[xma_old]);
+ if ( rc < 0 )
+ {
+ rc = -ESRCH;
+ goto out;
+ }
+ }
+ }
+
+ /* Set mem access attributes - currently supporting only one (4K) page. */
+ level = 3;
+ rc = modify_altp2m_entry(d, ap2m, gpa, pfn_to_paddr(mfn_x(mfn)),
+ level, p2mt, a);
+
+out:
+ altp2m_unlock(d);
+
+ return rc;
+}
+
static void altp2m_vcpu_reset(struct vcpu *v)
{
struct altp2mvcpu *av = &vcpu_altp2m(v);
diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
index 3b508df..00a244a 100644
--- a/xen/arch/arm/hvm.c
+++ b/xen/arch/arm/hvm.c
@@ -133,7 +133,12 @@ static int do_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg)
break;
case HVMOP_altp2m_set_mem_access:
- rc = -EOPNOTSUPP;
+ if ( a.u.set_mem_access.pad )
+ rc = -EINVAL;
+ else
+ rc = p2m_set_mem_access(d, _gfn(a.u.set_mem_access.gfn), 1, 0, 0,
+ a.u.set_mem_access.hvmmem_access,
+ a.u.set_mem_access.view);
break;
case HVMOP_altp2m_change_gfn:
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index d4b7c92..e0a7f38 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -288,6 +288,19 @@ mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t
*t)
return ret;
}
+mfn_t p2m_lookup_attr(struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t,
+ unsigned int *level, unsigned int *mattr,
+ xenmem_access_t *xma)
+{
+ mfn_t ret;
+
+ p2m_read_lock(p2m);
+ ret = __p2m_lookup(p2m, gfn, t, level, mattr, xma);
+ p2m_read_unlock(p2m);
+
+ return ret;
+}
+
int guest_physmap_mark_populate_on_demand(struct domain *d,
unsigned long gfn,
unsigned int order)
@@ -760,7 +773,7 @@ static int apply_one_level(struct domain *d,
* of the p2m tree which we would be about to lop off.
*/
BUG_ON(level < 3 && p2m_table(orig_pte));
- if ( level == 3 )
+ if ( level == 3 && p2m_is_hostp2m(p2m) )
p2m_put_l3_page(orig_pte);
}
else /* New mapping */
@@ -859,7 +872,7 @@ static int apply_one_level(struct domain *d,
p2m->stats.mappings[level]--;
- if ( level == 3 )
+ if ( level == 3 && p2m_is_hostp2m(p2m) )
p2m_put_l3_page(orig_pte);
/*
@@ -1303,6 +1316,21 @@ void guest_physmap_remove_page(struct domain *d,
p2m_remove_mapping(d, p2m_get_hostp2m(d), gfn, (1 << page_order), mfn);
}
+int modify_altp2m_entry(struct domain *d, struct p2m_domain *ap2m,
+ paddr_t gpa, paddr_t maddr, unsigned int level,
+ p2m_type_t t, p2m_access_t a)
+{
+ paddr_t size = level_sizes[level];
+ paddr_t mask = level_masks[level];
+ gfn_t gfn = _gfn(paddr_to_pfn(gpa & mask));
+ mfn_t mfn = _mfn(paddr_to_pfn(maddr & mask));
+ unsigned long nr = paddr_to_pfn(size);
+
+ ASSERT(p2m_is_altp2m(ap2m));
+
+ return apply_p2m_changes(d, ap2m, INSERT, gfn, nr, mfn, 0, t, a);
+}
+
int p2m_alloc_table(struct p2m_domain *p2m)
{
unsigned int i;
@@ -1920,7 +1948,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
uint32_t start, uint32_t mask, xenmem_access_t access,
unsigned int altp2m_idx)
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ struct p2m_domain *hp2m = p2m_get_hostp2m(d), *ap2m = NULL;
p2m_access_t a;
long rc = 0;
@@ -1939,33 +1967,60 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
#undef ACCESS
};
+ /* altp2m view 0 is treated as the hostp2m */
+ if ( altp2m_idx )
+ {
+ if ( altp2m_idx >= MAX_ALTP2M ||
+ d->arch.altp2m_vttbr[altp2m_idx] == INVALID_VTTBR )
+ return -EINVAL;
+
+ ap2m = d->arch.altp2m_p2m[altp2m_idx];
+ }
+
switch ( access )
{
case 0 ... ARRAY_SIZE(memaccess) - 1:
a = memaccess[access];
break;
case XENMEM_access_default:
- a = p2m->default_access;
+ a = hp2m->default_access;
break;
default:
return -EINVAL;
}
- /*
- * Flip mem_access_enabled to true when a permission is set, as to prevent
- * allocating or inserting super-pages.
- */
- p2m->mem_access_enabled = true;
-
/* If request to set default access. */
if ( gfn_eq(gfn, INVALID_GFN) )
{
- p2m->default_access = a;
+ hp2m->default_access = a;
return 0;
}
- rc = apply_p2m_changes(d, p2m, MEMACCESS, gfn_add(gfn, start),
- (nr - start), INVALID_MFN, mask, 0, a);
+ if ( ap2m )
+ {
+ /*
+ * Flip mem_access_enabled to true when a permission is set, as to
prevent
+ * allocating or inserting super-pages.
+ */
+ ap2m->mem_access_enabled = true;
+
+ /*
+ * ARM altp2m currently supports only setting of memory access rights
+ * of only one (4K) page at a time.
+ */
+ rc = altp2m_set_mem_access(d, hp2m, ap2m, a, gfn);
+ }
+ else
+ {
+ /*
+ * Flip mem_access_enabled to true when a permission is set, as to
prevent
+ * allocating or inserting super-pages.
+ */
+ hp2m->mem_access_enabled = true;
+
+ rc = apply_p2m_changes(d, hp2m, MEMACCESS, gfn_add(gfn, start),
+ (nr - start), INVALID_MFN, mask, 0, a);
+ }
if ( rc < 0 )
return rc;
else if ( rc > 0 )
diff --git a/xen/include/asm-arm/altp2m.h b/xen/include/asm-arm/altp2m.h
index a6496b7..dc41f93 100644
--- a/xen/include/asm-arm/altp2m.h
+++ b/xen/include/asm-arm/altp2m.h
@@ -71,4 +71,14 @@ void altp2m_flush(struct domain *d);
int altp2m_destroy_by_id(struct domain *d,
unsigned int idx);
+/* Set memory access attributes of the gfn in the altp2m view. If the altp2m
+ * view does not contain the particular entry, copy it first from the hostp2m.
+ *
+ * Currently supports memory attribute adoptions of only one (4K) page. */
+int altp2m_set_mem_access(struct domain *d,
+ struct p2m_domain *hp2m,
+ struct p2m_domain *ap2m,
+ p2m_access_t a,
+ gfn_t gfn);
+
#endif /* __ASM_ARM_ALTP2M_H */
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 32326cb..9859ad1 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -180,6 +180,17 @@ void p2m_dump_info(struct domain *d);
/* Look up the MFN corresponding to a domain's GFN. */
mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t);
+/* Lookup the MFN, memory attributes, and page table level corresponding to a
+ * domain's GFN. */
+mfn_t p2m_lookup_attr(struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t,
+ unsigned int *level, unsigned int *mattr,
+ xenmem_access_t *xma);
+
+/* Modify an altp2m view's entry or its attributes. */
+int modify_altp2m_entry(struct domain *d, struct p2m_domain *p2m,
+ paddr_t gpa, paddr_t maddr, unsigned int level,
+ p2m_type_t t, p2m_access_t a);
+
/* Clean & invalidate caches corresponding to a region of guest address space
*/
int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr);
@@ -303,6 +314,16 @@ static inline int get_page_and_type(struct page_info *page,
/* get host p2m table */
#define p2m_get_hostp2m(d) (&(d)->arch.p2m)
+static inline bool_t p2m_is_hostp2m(const struct p2m_domain *p2m)
+{
+ return p2m->p2m_class == p2m_host;
+}
+
+static inline bool_t p2m_is_altp2m(const struct p2m_domain *p2m)
+{
+ return p2m->p2m_class == p2m_alternate;
+}
+
/* vm_event and mem_access are supported on any ARM guest */
static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
{
--
2.9.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |