|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 10/18] arm/altp2m: Renamed and extended p2m_alloc_table.
The initially named function "p2m_alloc_table" allocated pages solely
required for p2m. The new implementation leaves p2m allocation related
parts inside of this function (which is made static) and provides an
overlay function "p2m_table_init" that can be called from extern to
generally initialize p2m tables. Therefore, it distinguishes between
the domain's p2m and altp2m mappings, which are allocated similarly.
NOTE: Inside the function "p2m_alloc_table" we do not lock the p2m lock
anymore. Also, we flush the TLBs outside of the function
"p2m_alloc_table". Instead, we perform the associate locking and TLB
flushing as part of the function p2m_table_init. This allows us to
provide a uniform interface for p2m-related table allocation, which can
be used for altp2m (and potentially nested p2m tables in the future
implementation) -- as it is done in the x86 implementation.
Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
---
xen/arch/arm/domain.c | 2 +-
xen/arch/arm/p2m.c | 53 +++++++++++++++++++++++++++++++++++++----------
xen/include/asm-arm/p2m.h | 2 +-
3 files changed, 44 insertions(+), 13 deletions(-)
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 6ce4645..6102ed0 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -573,7 +573,7 @@ int arch_domain_create(struct domain *d, unsigned int
domcr_flags,
if ( (rc = domain_io_init(d)) != 0 )
goto fail;
- if ( (rc = p2m_alloc_table(d)) != 0 )
+ if ( (rc = p2m_table_init(d)) != 0 )
goto fail;
switch ( config->gic_version )
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 8bf23ee..7e721f9 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1315,35 +1315,66 @@ void guest_physmap_remove_page(struct domain *d,
d->arch.p2m.default_access);
}
-int p2m_alloc_table(struct domain *d)
+static int p2m_alloc_table(struct p2m_domain *p2m)
{
- struct p2m_domain *p2m = &d->arch.p2m;
- struct page_info *page;
+ struct page_info *page = NULL;
unsigned int i;
page = alloc_domheap_pages(NULL, P2M_ROOT_ORDER, 0);
if ( page == NULL )
return -ENOMEM;
- spin_lock(&p2m->lock);
-
- /* Clear both first level pages */
+ /* Clear all first level pages */
for ( i = 0; i < P2M_ROOT_PAGES; i++ )
clear_and_clean_page(page + i);
p2m->root = page;
- d->arch.vttbr = page_to_maddr(p2m->root)
- | ((uint64_t)p2m->vmid&0xff)<<48;
+ p2m->vttbr.vttbr = 0;
+ p2m->vttbr.vttbr_vmid = p2m->vmid & 0xff;
+ p2m->vttbr.vttbr_baddr = page_to_maddr(p2m->root);
- /* Make sure that all TLBs corresponding to the new VMID are flushed
- * before using it
+ return 0;
+}
+
+int p2m_table_init(struct domain *d)
+{
+ int i = 0;
+ int rc = -ENOMEM;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ spin_lock(&p2m->lock);
+
+ rc = p2m_alloc_table(p2m);
+ if ( rc != 0 )
+ goto out;
+
+ d->arch.vttbr = d->arch.p2m.vttbr.vttbr;
+
+ /*
+ * Make sure that all TLBs corresponding to the new VMID are flushed
+ * before using it.
*/
flush_tlb_domain(d);
spin_unlock(&p2m->lock);
- return 0;
+ if ( hvm_altp2m_supported() )
+ {
+ /* Init alternate p2m data */
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ d->arch.altp2m_vttbr[i] = INVALID_MFN;
+ rc = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+ if ( rc != 0 )
+ goto out;
+ }
+
+ d->arch.altp2m_active = 0;
+ }
+
+out:
+ return rc;
}
#define MAX_VMID 256
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 783db5c..451b097 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -171,7 +171,7 @@ int relinquish_p2m_mapping(struct domain *d);
*
* Returns 0 for success or -errno.
*/
-int p2m_alloc_table(struct domain *d);
+int p2m_table_init(struct domain *d);
/* Context switch */
void p2m_save_state(struct vcpu *p);
--
2.8.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |