|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 08/25] arm/altp2m: Add HVMOP_altp2m_set_domain_state.
The HVMOP_altp2m_set_domain_state allows to activate altp2m on a
specific domain. This commit adopts the x86
HVMOP_altp2m_set_domain_state implementation. Note that the function
altp2m_flush is currently implemented in form of a stub.
Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
---
v2: Dynamically allocate memory for altp2m views only when needed.
Move altp2m related helpers to altp2m.c.
p2m_flush_tlb is made publicly accessible.
---
xen/arch/arm/altp2m.c | 116 +++++++++++++++++++++++++++++++++++++++++
xen/arch/arm/hvm.c | 34 +++++++++++-
xen/arch/arm/p2m.c | 2 +-
xen/include/asm-arm/altp2m.h | 15 ++++++
xen/include/asm-arm/domain.h | 9 ++++
xen/include/asm-arm/flushtlb.h | 4 ++
xen/include/asm-arm/p2m.h | 11 ++++
7 files changed, 189 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/altp2m.c b/xen/arch/arm/altp2m.c
index abbd39a..767f233 100644
--- a/xen/arch/arm/altp2m.c
+++ b/xen/arch/arm/altp2m.c
@@ -19,6 +19,122 @@
#include <asm/p2m.h>
#include <asm/altp2m.h>
+#include <asm/flushtlb.h>
+
+struct p2m_domain *altp2m_get_altp2m(struct vcpu *v)
+{
+ unsigned int index = vcpu_altp2m(v).p2midx;
+
+ if ( index == INVALID_ALTP2M )
+ return NULL;
+
+ BUG_ON(index >= MAX_ALTP2M);
+
+ return v->domain->arch.altp2m_p2m[index];
+}
+
+static void altp2m_vcpu_reset(struct vcpu *v)
+{
+ struct altp2mvcpu *av = &vcpu_altp2m(v);
+
+ av->p2midx = INVALID_ALTP2M;
+}
+
+void altp2m_vcpu_initialise(struct vcpu *v)
+{
+ if ( v != current )
+ vcpu_pause(v);
+
+ altp2m_vcpu_reset(v);
+ vcpu_altp2m(v).p2midx = 0;
+ atomic_inc(&altp2m_get_altp2m(v)->active_vcpus);
+
+ if ( v != current )
+ vcpu_unpause(v);
+}
+
+void altp2m_vcpu_destroy(struct vcpu *v)
+{
+ struct p2m_domain *p2m;
+
+ if ( v != current )
+ vcpu_pause(v);
+
+ if ( (p2m = altp2m_get_altp2m(v)) )
+ atomic_dec(&p2m->active_vcpus);
+
+ altp2m_vcpu_reset(v);
+
+ if ( v != current )
+ vcpu_unpause(v);
+}
+
+static int altp2m_init_helper(struct domain *d, unsigned int idx)
+{
+ int rc;
+ struct p2m_domain *p2m = d->arch.altp2m_p2m[idx];
+
+ if ( p2m == NULL )
+ {
+ /* Allocate a new, zeroed altp2m view. */
+ p2m = xzalloc(struct p2m_domain);
+ if ( p2m == NULL)
+ {
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+
+ /* Initialize the new altp2m view. */
+ rc = p2m_init_one(d, p2m);
+ if ( rc )
+ goto err;
+
+ /* Allocate a root table for the altp2m view. */
+ rc = p2m_alloc_table(p2m);
+ if ( rc )
+ goto err;
+
+ p2m->p2m_class = p2m_alternate;
+ p2m->access_required = 1;
+ _atomic_set(&p2m->active_vcpus, 0);
+
+ d->arch.altp2m_p2m[idx] = p2m;
+ d->arch.altp2m_vttbr[idx] = p2m->vttbr.vttbr;
+
+ /*
+ * Make sure that all TLBs corresponding to the current VMID are flushed
+ * before using it.
+ */
+ p2m_flush_tlb(p2m);
+
+ return rc;
+
+err:
+ if ( p2m )
+ xfree(p2m);
+
+ d->arch.altp2m_p2m[idx] = NULL;
+
+ return rc;
+}
+
+int altp2m_init_by_id(struct domain *d, unsigned int idx)
+{
+ int rc = -EINVAL;
+
+ if ( idx >= MAX_ALTP2M )
+ return rc;
+
+ altp2m_lock(d);
+
+ if ( d->arch.altp2m_vttbr[idx] == INVALID_VTTBR )
+ rc = altp2m_init_helper(d, idx);
+
+ altp2m_unlock(d);
+
+ return rc;
+}
int altp2m_init(struct domain *d)
{
diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
index 01a3243..78370c6 100644
--- a/xen/arch/arm/hvm.c
+++ b/xen/arch/arm/hvm.c
@@ -80,8 +80,40 @@ static int do_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg)
break;
case HVMOP_altp2m_set_domain_state:
- rc = -EOPNOTSUPP;
+ {
+ struct vcpu *v;
+ bool_t ostate;
+
+ if ( !altp2m_enabled(d) )
+ {
+ rc = -EINVAL;
+ break;
+ }
+
+ ostate = d->arch.altp2m_active;
+ d->arch.altp2m_active = !!a.u.domain_state.state;
+
+ /* If the alternate p2m state has changed, handle appropriately */
+ if ( (d->arch.altp2m_active != ostate) &&
+ (ostate || !(rc = altp2m_init_by_id(d, 0))) )
+ {
+ for_each_vcpu( d, v )
+ {
+ if ( !ostate )
+ altp2m_vcpu_initialise(v);
+ else
+ altp2m_vcpu_destroy(v);
+ }
+
+ /*
+ * The altp2m_active state has been deactivated. It is now safe to
+ * flush all altp2m views -- including altp2m[0].
+ */
+ if ( ostate )
+ altp2m_flush(d);
+ }
break;
+ }
case HVMOP_altp2m_vcpu_enable_notify:
rc = -EOPNOTSUPP;
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 29ec5e5..8afea11 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -139,7 +139,7 @@ void p2m_restore_state(struct vcpu *n)
isb();
}
-static void p2m_flush_tlb(struct p2m_domain *p2m)
+void p2m_flush_tlb(struct p2m_domain *p2m)
{
unsigned long flags = 0;
uint64_t ovttbr;
diff --git a/xen/include/asm-arm/altp2m.h b/xen/include/asm-arm/altp2m.h
index 79ea66b..a33c740 100644
--- a/xen/include/asm-arm/altp2m.h
+++ b/xen/include/asm-arm/altp2m.h
@@ -22,6 +22,8 @@
#include <xen/sched.h>
+#define INVALID_ALTP2M 0xffff
+
#define altp2m_lock(d) spin_lock(&(d)->arch.altp2m_lock)
#define altp2m_unlock(d) spin_unlock(&(d)->arch.altp2m_lock)
@@ -44,4 +46,17 @@ static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
int altp2m_init(struct domain *d);
void altp2m_teardown(struct domain *d);
+void altp2m_vcpu_initialise(struct vcpu *v);
+void altp2m_vcpu_destroy(struct vcpu *v);
+
+/* Make a specific alternate p2m valid. */
+int altp2m_init_by_id(struct domain *d,
+ unsigned int idx);
+
+/* Flush all the alternate p2m's for a domain */
+static inline void altp2m_flush(struct domain *d)
+{
+ /* Not yet implemented. */
+}
+
#endif /* __ASM_ARM_ALTP2M_H */
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 3c25ea5..63a9650 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -135,6 +135,12 @@ struct arch_domain
spinlock_t altp2m_lock;
} __cacheline_aligned;
+struct altp2mvcpu {
+ uint16_t p2midx; /* alternate p2m index */
+};
+
+#define vcpu_altp2m(v) ((v)->arch.avcpu)
+
struct arch_vcpu
{
struct {
@@ -264,6 +270,9 @@ struct arch_vcpu
struct vtimer phys_timer;
struct vtimer virt_timer;
bool_t vtimer_initialized;
+
+ /* Alternate p2m context */
+ struct altp2mvcpu avcpu;
} __cacheline_aligned;
void vcpu_show_execution_state(struct vcpu *);
diff --git a/xen/include/asm-arm/flushtlb.h b/xen/include/asm-arm/flushtlb.h
index 329fbb4..57c3c34 100644
--- a/xen/include/asm-arm/flushtlb.h
+++ b/xen/include/asm-arm/flushtlb.h
@@ -2,6 +2,7 @@
#define __ASM_ARM_FLUSHTLB_H__
#include <xen/cpumask.h>
+#include <asm/p2m.h>
/*
* Filter the given set of CPUs, removing those that definitely flushed their
@@ -25,6 +26,9 @@ do {
\
/* Flush specified CPUs' TLBs */
void flush_tlb_mask(const cpumask_t *mask);
+/* Flush CPU's TLBs for the specified domain */
+void p2m_flush_tlb(struct p2m_domain *p2m);
+
#endif /* __ASM_ARM_FLUSHTLB_H__ */
/*
* Local variables:
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 24a1f61..f13f285 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -9,6 +9,8 @@
#include <xen/p2m-common.h>
#include <public/memory.h>
+#include <asm/atomic.h>
+
#define MAX_ALTP2M 10 /* ARM might contain an arbitrary number of
altp2m views. */
#define paddr_bits PADDR_BITS
@@ -86,6 +88,9 @@ struct p2m_domain {
*/
struct radix_tree_root mem_access_settings;
+ /* Alternate p2m: count of vcpu's currently using this p2m. */
+ atomic_t active_vcpus;
+
/* Choose between: host/alternate */
p2m_class_t p2m_class;
@@ -214,6 +219,12 @@ void guest_physmap_remove_page(struct domain *d,
mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
+/* Allocates page table for a p2m. */
+int p2m_alloc_table(struct p2m_domain *p2m);
+
+/* Initialize the p2m structure. */
+int p2m_init_one(struct domain *d, struct p2m_domain *p2m);
+
/* Release resources held by the p2m structure. */
void p2m_free_one(struct p2m_domain *p2m);
--
2.9.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |