|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-4.5 v11 1/9] xen/arm: p2m changes for mem_access support
Add necessary changes for page table construction routines to pass
the default access information. We store the p2m_access_t info in a
Radix tree as the PTE lacks enough software programmable bits.
Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
---
v11: - Move including common/mem_event.h down the series.
v10: - Typo fix and drop reshuffling things that no longer need
shuffling.
v8: - Drop lock inputs as common mem_access_check is postponed.
- Resurrect the radix tree with an extra boolean access_in_use flag
to indicate if the tree is empty to avoid lookups.
v7: - Remove radix tree init/destroy and move p2m_access_t store to page_info.
- Add p2m_gpfn_lock/unlock functions.
- Add bool_t lock input to p2m_lookup and apply_p2m_changes so the caller
can specify if locking should be performed. This is needed in order to
support mem_access_check from common.
v6: - Move mem_event header include to first patch that needs it.
v5: - #include grouping style-fix.
v4: - Move p2m_get_hostp2m definition here.
---
xen/arch/arm/p2m.c | 49 +++++++++++++++++++++++++++--------------
xen/include/asm-arm/domain.h | 1 +
xen/include/asm-arm/p2m.h | 25 ++++++++++++++++++++-
xen/include/asm-arm/processor.h | 2 +-
4 files changed, 59 insertions(+), 18 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 70929fc..0f86088 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -228,7 +228,7 @@ int p2m_pod_decrease_reservation(struct domain *d,
}
static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
- p2m_type_t t)
+ p2m_type_t t, p2m_access_t a)
{
paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
/* sh, xn and write bit will be defined in the following switches
@@ -346,7 +346,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
for ( i=0 ; i < LPAE_ENTRIES; i++ )
{
pte = mfn_to_p2m_entry(base_pfn + (i<<(level_shift-LPAE_SHIFT)),
- MATTR_MEM, t);
+ MATTR_MEM, t, p2m->default_access);
/*
* First and second level super pages set p2m.table = 0, but
@@ -366,7 +366,8 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
unmap_domain_page(p);
- pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid);
+ pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid,
+ p2m->default_access);
p2m_write_pte(entry, pte, flush_cache);
@@ -469,7 +470,8 @@ static int apply_one_level(struct domain *d,
paddr_t *maddr,
bool_t *flush,
int mattr,
- p2m_type_t t)
+ p2m_type_t t,
+ p2m_access_t a)
{
const paddr_t level_size = level_sizes[level];
const paddr_t level_mask = level_masks[level];
@@ -498,7 +500,7 @@ static int apply_one_level(struct domain *d,
page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
if ( page )
{
- pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t);
+ pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
if ( level < 3 )
pte.p2m.table = 0;
p2m_write_pte(entry, pte, flush_cache);
@@ -533,7 +535,7 @@ static int apply_one_level(struct domain *d,
(level == 3 || !p2m_table(orig_pte)) )
{
/* New mapping is superpage aligned, make it */
- pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t);
+ pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
if ( level < 3 )
pte.p2m.table = 0; /* Superpage entry */
@@ -712,7 +714,9 @@ static int apply_p2m_changes(struct domain *d,
paddr_t end_gpaddr,
paddr_t maddr,
int mattr,
- p2m_type_t t)
+ uint32_t mask,
+ p2m_type_t t,
+ p2m_access_t a)
{
int rc, ret;
struct p2m_domain *p2m = &d->arch.p2m;
@@ -805,7 +809,7 @@ static int apply_p2m_changes(struct domain *d,
level, flush_pt, op,
start_gpaddr, end_gpaddr,
&addr, &maddr, &flush,
- mattr, t);
+ mattr, t, a);
if ( ret < 0 ) { rc = ret ; goto out; }
count += ret;
/* L3 had better have done something! We cannot descend any
further */
@@ -863,7 +867,7 @@ out:
*/
apply_p2m_changes(d, REMOVE,
start_gpaddr, addr + level_sizes[level], orig_maddr,
- mattr, p2m_invalid);
+ mattr, 0, p2m_invalid, d->arch.p2m.default_access);
}
for ( level = P2M_ROOT_LEVEL; level < 4; level ++ )
@@ -882,7 +886,8 @@ int p2m_populate_ram(struct domain *d,
paddr_t end)
{
return apply_p2m_changes(d, ALLOCATE, start, end,
- 0, MATTR_MEM, p2m_ram_rw);
+ 0, MATTR_MEM, 0, p2m_ram_rw,
+ d->arch.p2m.default_access);
}
int map_mmio_regions(struct domain *d,
@@ -894,7 +899,8 @@ int map_mmio_regions(struct domain *d,
pfn_to_paddr(start_gfn),
pfn_to_paddr(start_gfn + nr),
pfn_to_paddr(mfn),
- MATTR_DEV, p2m_mmio_direct);
+ MATTR_DEV, 0, p2m_mmio_direct,
+ d->arch.p2m.default_access);
}
int unmap_mmio_regions(struct domain *d,
@@ -906,7 +912,8 @@ int unmap_mmio_regions(struct domain *d,
pfn_to_paddr(start_gfn),
pfn_to_paddr(start_gfn + nr),
pfn_to_paddr(mfn),
- MATTR_DEV, p2m_invalid);
+ MATTR_DEV, 0, p2m_invalid,
+ d->arch.p2m.default_access);
}
int guest_physmap_add_entry(struct domain *d,
@@ -918,7 +925,8 @@ int guest_physmap_add_entry(struct domain *d,
return apply_p2m_changes(d, INSERT,
pfn_to_paddr(gpfn),
pfn_to_paddr(gpfn + (1 << page_order)),
- pfn_to_paddr(mfn), MATTR_MEM, t);
+ pfn_to_paddr(mfn), MATTR_MEM, 0, t,
+ d->arch.p2m.default_access);
}
void guest_physmap_remove_page(struct domain *d,
@@ -928,7 +936,8 @@ void guest_physmap_remove_page(struct domain *d,
apply_p2m_changes(d, REMOVE,
pfn_to_paddr(gpfn),
pfn_to_paddr(gpfn + (1<<page_order)),
- pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
+ pfn_to_paddr(mfn), MATTR_MEM, 0, p2m_invalid,
+ d->arch.p2m.default_access);
}
int arch_grant_map_page_identity(struct domain *d, unsigned long frame,
@@ -1058,6 +1067,8 @@ void p2m_teardown(struct domain *d)
p2m_free_vmid(d);
+ radix_tree_destroy(&p2m->mem_access_settings, NULL);
+
spin_unlock(&p2m->lock);
}
@@ -1083,6 +1094,10 @@ int p2m_init(struct domain *d)
p2m->max_mapped_gfn = 0;
p2m->lowest_mapped_gfn = ULONG_MAX;
+ p2m->default_access = p2m_access_rwx;
+ p2m->access_in_use = false;
+ radix_tree_init(&p2m->mem_access_settings);
+
err:
spin_unlock(&p2m->lock);
@@ -1097,7 +1112,8 @@ int relinquish_p2m_mapping(struct domain *d)
pfn_to_paddr(p2m->lowest_mapped_gfn),
pfn_to_paddr(p2m->max_mapped_gfn),
pfn_to_paddr(INVALID_MFN),
- MATTR_MEM, p2m_invalid);
+ MATTR_MEM, 0, p2m_invalid,
+ d->arch.p2m.default_access);
}
int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
@@ -1111,7 +1127,8 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t
start_mfn, xen_pfn_t end_mfn)
pfn_to_paddr(start_mfn),
pfn_to_paddr(end_mfn),
pfn_to_paddr(INVALID_MFN),
- MATTR_MEM, p2m_invalid);
+ MATTR_MEM, 0, p2m_invalid,
+ d->arch.p2m.default_access);
}
unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 787e93c..3d69152 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -17,6 +17,7 @@ struct hvm_domain
{
uint64_t params[HVM_NR_PARAMS];
struct hvm_iommu iommu;
+ bool_t introspection_enabled;
} __cacheline_aligned;
#ifdef CONFIG_ARM_64
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 10bf111..bda4837 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -2,7 +2,7 @@
#define _XEN_P2M_H
#include <xen/mm.h>
-
+#include <xen/radix-tree.h>
#include <xen/p2m-common.h>
#define paddr_bits PADDR_BITS
@@ -48,6 +48,18 @@ struct p2m_domain {
/* If true, and an access fault comes in and there is no mem_event
listener,
* pause domain. Otherwise, remove access restrictions. */
bool_t access_required;
+
+ /* Defines if mem_access is in use for the domain. */
+ bool_t access_in_use;
+
+ /* Default P2M access type for each page in the the domain: new pages,
+ * swapped in pages, cleared pages, and pages that are ambiguously
+ * retyped get this access type. See definition of p2m_access_t. */
+ p2m_access_t default_access;
+
+ /* Radix tree to store the p2m_access_t settings as the pte's don't have
+ * enough available bits to store this information. */
+ struct radix_tree_root mem_access_settings;
};
/* List of possible type for each page in the p2m entry.
@@ -221,6 +233,17 @@ int arch_grant_unmap_page_identity(struct domain *d,
unsigned long frame);
/* get host p2m table */
#define p2m_get_hostp2m(d) (&(d)->arch.p2m)
+/* mem_event and mem_access are supported on any ARM guest */
+static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
+{
+ return 1;
+}
+
+static inline bool_t p2m_mem_event_sanity_check(struct domain *d)
+{
+ return 1;
+}
+
#endif /* _XEN_P2M_H */
/*
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 07a421c..d74b6f4 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -435,7 +435,7 @@ union hsr {
struct hsr_dabt {
unsigned long dfsc:6; /* Data Fault Status Code */
unsigned long write:1; /* Write / not Read */
- unsigned long s1ptw:1; /* */
+ unsigned long s1ptw:1; /* Stage 2 fault during stage 1 translation */
unsigned long cache:1; /* Cache Maintenance */
unsigned long eat:1; /* External Abort Type */
#ifdef CONFIG_ARM_32
--
2.1.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |