|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 26/39] arm/p2m: Add HVMOP_altp2m_set_mem_access
The HVMOP_altp2m_set_mem_access allows to set gfn permissions of
(currently one page at a time) of a specific altp2m view. In case the
view does not hold the requested gfn entry, it will be first copied from
the host's p2m table and then modified as requested.
Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
Cc: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
---
v2: Prevent the page reference count from being falsely updated on
altp2m modification. Therefore, we add a check determining whether
the target p2m is a hostp2m before p2m_put_l3_page is called.
v3: Cosmetic fixes.
Added the functionality to set/get the default_access also in/from
the requested altp2m view.
Read-locked hp2m in "altp2m_set_mem_access".
Moved the functions "p2m_is_(hostp2m|altp2m)" out of this commit.
Moved the funtion "modify_altp2m_entry" out of this commit.
Moved the function "p2m_lookup_attr" out of this commit.
Moved guards for "p2m_put_l3_page" out of this commit.
v4: Cosmetic fixes.
Removed locking altp2m_lock, as it unnecessarily serializes accesses
to "altp2m_set_mem_access".
Use the functions "p2m_(set|get)_entry" instead of the helpers
"p2m_lookup_attr" and "modify_altp2m_entry".
Removes the restriction enforcing changing the memory access of
p2m_ram_(rw|ro). Instead, we allow to set memory permissions of all
pages of the particular altp2m view.
Move the functionality locking ap2m and hp2m out of "altp2m_set_mem_access"
into "p2m_set_mem_access".
Comment the need for the default access in altp2m views.
---
xen/arch/arm/altp2m.c | 46 ++++++++++++++++++++++++++++
xen/arch/arm/hvm.c | 7 ++++-
xen/arch/arm/mem_access.c | 72 +++++++++++++++++++++++++++++++++++---------
xen/include/asm-arm/altp2m.h | 12 ++++++++
4 files changed, 122 insertions(+), 15 deletions(-)
diff --git a/xen/arch/arm/altp2m.c b/xen/arch/arm/altp2m.c
index 9a2cf5a018..8c3212780a 100644
--- a/xen/arch/arm/altp2m.c
+++ b/xen/arch/arm/altp2m.c
@@ -77,6 +77,52 @@ int altp2m_switch_domain_altp2m_by_id(struct domain *d,
unsigned int idx)
return rc;
}
+int altp2m_set_mem_access(struct domain *d,
+ struct p2m_domain *hp2m,
+ struct p2m_domain *ap2m,
+ p2m_access_t a,
+ gfn_t gfn)
+{
+ p2m_type_t p2mt;
+ p2m_access_t old_a;
+ mfn_t mfn, mfn_sp;
+ gfn_t gfn_sp;
+ unsigned int order;
+ int rc;
+
+ /* Check if entry is part of the altp2m view. */
+ mfn = p2m_get_entry(ap2m, gfn, &p2mt, NULL, &order);
+
+ /* Check host p2m if no valid entry in ap2m. */
+ if ( mfn_eq(mfn, INVALID_MFN) )
+ {
+ /* Check if entry is part of the host p2m view. */
+ mfn = p2m_get_entry(hp2m, gfn, &p2mt, &old_a, &order);
+ if ( mfn_eq(mfn, INVALID_MFN) )
+ return -ESRCH;
+
+ /* If this is a superpage, copy that first. */
+ if ( order != THIRD_ORDER )
+ {
+ /* Align the gfn and mfn to the given pager order. */
+ gfn_sp = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
+ mfn_sp = _mfn(mfn_x(mfn) & ~((1UL << order) - 1));
+
+ rc = p2m_set_entry(ap2m, gfn_sp, (1UL << order), mfn_sp, p2mt,
old_a);
+ if ( rc )
+ return rc;
+ }
+ }
+
+ /* Align the gfn and mfn to the given pager order. */
+ gfn = _gfn(gfn_x(gfn) & ~((1UL << THIRD_ORDER) - 1));
+ mfn = _mfn(mfn_x(mfn) & ~((1UL << THIRD_ORDER) - 1));
+
+ rc = p2m_set_entry(ap2m, gfn, (1UL << THIRD_ORDER), mfn, p2mt, a);
+
+ return rc;
+}
+
static void altp2m_vcpu_reset(struct vcpu *v)
{
v->arch.ap2m_idx = INVALID_ALTP2M;
diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
index 9bddc7e17e..7e91f2436d 100644
--- a/xen/arch/arm/hvm.c
+++ b/xen/arch/arm/hvm.c
@@ -139,7 +139,12 @@ static int do_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg)
break;
case HVMOP_altp2m_set_mem_access:
- rc = -EOPNOTSUPP;
+ if ( a.u.set_mem_access.pad )
+ rc = -EINVAL;
+ else
+ rc = p2m_set_mem_access(d, _gfn(a.u.set_mem_access.gfn), 1, 0, 0,
+ a.u.set_mem_access.hvmmem_access,
+ a.u.set_mem_access.view);
break;
case HVMOP_altp2m_change_gfn:
diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
index ebc3a86af3..ee2a43fc6e 100644
--- a/xen/arch/arm/mem_access.c
+++ b/xen/arch/arm/mem_access.c
@@ -374,7 +374,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
uint32_t start, uint32_t mask, xenmem_access_t access,
unsigned int altp2m_idx)
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ struct p2m_domain *hp2m = p2m_get_hostp2m(d), *ap2m = NULL;
p2m_access_t a;
unsigned int order;
long rc = 0;
@@ -394,13 +394,26 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
#undef ACCESS
};
+ /* altp2m view 0 is treated as the hostp2m */
+ if ( altp2m_idx )
+ {
+ if ( altp2m_idx >= MAX_ALTP2M ||
+ d->arch.altp2m_p2m[altp2m_idx] == NULL )
+ return -EINVAL;
+
+ ap2m = d->arch.altp2m_p2m[altp2m_idx];
+ }
+
switch ( access )
{
case 0 ... ARRAY_SIZE(memaccess) - 1:
a = memaccess[access];
break;
case XENMEM_access_default:
- a = p2m->default_access;
+ if ( ap2m )
+ a = ap2m->default_access;
+ else
+ a = hp2m->default_access;
break;
default:
return -EINVAL;
@@ -410,31 +423,60 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
* Flip mem_access_enabled to true when a permission is set, as to prevent
* allocating or inserting super-pages.
*/
- p2m->mem_access_enabled = true;
+ if ( ap2m )
+ ap2m->mem_access_enabled = true;
+ else
+ hp2m->mem_access_enabled = true;
/* If request to set default access. */
if ( gfn_eq(gfn, INVALID_GFN) )
{
- p2m->default_access = a;
+ if ( ap2m )
+ /*
+ * XXX: Currently, we allow to set the default access of individual
+ * altp2m views. The default access is required, e.g, when
+ * splitting a superpage belonging to an altp2m view. By setting
+ * the default access, we can limit the access to the split pages
+ * without excplicitely accessing these.
+ */
+ ap2m->default_access = a;
+ else
+ hp2m->default_access = a;
+
return 0;
}
- p2m_write_lock(p2m);
+ p2m_write_lock(hp2m);
+ if ( ap2m )
+ p2m_write_lock(ap2m);
for ( gfn = gfn_add(gfn, start); nr > start;
gfn = gfn_next_boundary(gfn, order) )
{
- p2m_type_t t;
- mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order);
-
-
- if ( !mfn_eq(mfn, INVALID_MFN) )
+ if ( ap2m )
{
- order = 0;
- rc = p2m_set_entry(p2m, gfn, 1, mfn, t, a);
- if ( rc )
+ /*
+ * TODO: ARM altp2m currently supports only setting of memory
+ * access rights of only one (4K) page at a time.
+ */
+
+ rc = altp2m_set_mem_access(d, hp2m, ap2m, a, gfn);
+ if ( rc && rc != -ESRCH )
break;
}
+ else
+ {
+ p2m_type_t t;
+ mfn_t mfn = p2m_get_entry(hp2m, gfn, &t, NULL, &order);
+
+ if ( !mfn_eq(mfn, INVALID_MFN) )
+ {
+ order = 0;
+ rc = p2m_set_entry(hp2m, gfn, 1, mfn, t, a);
+ if ( rc )
+ break;
+ }
+ }
start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn);
/* Check for continuation if it is not the last iteration */
@@ -445,7 +487,9 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
}
}
- p2m_write_unlock(p2m);
+ if ( ap2m )
+ p2m_write_unlock(ap2m);
+ p2m_write_unlock(hp2m);
return rc;
}
diff --git a/xen/include/asm-arm/altp2m.h b/xen/include/asm-arm/altp2m.h
index d59f704489..f8e772f120 100644
--- a/xen/include/asm-arm/altp2m.h
+++ b/xen/include/asm-arm/altp2m.h
@@ -68,4 +68,16 @@ void altp2m_flush_complete(struct domain *d);
int altp2m_destroy_by_id(struct domain *d,
unsigned int idx);
+/*
+ * Set memory access attributes of the gfn in the altp2m view. If the altp2m
+ * view does not contain the particular entry, copy it first from the hostp2m.
+ *
+ * Currently supports memory attribute adoptions of only one (4K) page.
+ */
+int altp2m_set_mem_access(struct domain *d,
+ struct p2m_domain *hp2m,
+ struct p2m_domain *ap2m,
+ p2m_access_t a,
+ gfn_t gfn);
+
#endif /* __ASM_ARM_ALTP2M_H */
--
2.13.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |