|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V14 3/7] xen/arm: Allow hypervisor access to mem_access protected pages
The hypervisor may use the MMU to verify that the given guest has read/write
access to a given page during hypercalls. As we may have custom mem_access
permissions set on these pages, we do a software-based type checking in case
the MMU based approach failed, but only if mem_access_enabled is set.
These memory accesses are not forwarded to the mem_event listener. Accesses
performed by the hypervisor are currently not part of the mem_access scheme.
This is consistent behaviour with the x86 side as well.
Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
---
v14: - Move software-based lookup into p2m, add comments and clean it up a bit
Only page type allowed is rw
Extend gva_to_ipa to take flags input for lookup validation
v12: - Check for mfn_valid as well.
---
xen/arch/arm/p2m.c | 101 +++++++++++++++++++++++++++++++++++++++
xen/arch/arm/traps.c | 2 +-
xen/include/asm-arm/arm32/page.h | 7 ++-
xen/include/asm-arm/arm64/page.h | 7 ++-
xen/include/asm-arm/page.h | 4 +-
5 files changed, 114 insertions(+), 7 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 137e5a0..692e0c7 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1168,6 +1168,103 @@ unsigned long gmfn_to_mfn(struct domain *d, unsigned
long gpfn)
return p >> PAGE_SHIFT;
}
+/*
+ * If mem_access is in use it might have been the reason why get_page_from_gva
+ * failed to fetch the page, as it uses the MMU for the permission checking.
+ * Only in these cases we do a software-based type check and fetch the page if
+ * we indeed found a conflicting mem_access setting.
+ */
+static struct page_info*
+p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag)
+{
+ long rc;
+ paddr_t ipa;
+ unsigned long maddr;
+ unsigned long mfn;
+ xenmem_access_t xma;
+ p2m_type_t t;
+ struct page_info *page = NULL;
+
+ rc = gva_to_ipa(gva, &ipa, flag);
+ if ( rc < 0 )
+ goto err;
+
+ /*
+ * We do this first as this is faster in the default case when no
+ * permission is set on the page.
+ */
+ rc = p2m_get_mem_access(current->domain, paddr_to_pfn(ipa), &xma);
+ if ( rc < 0 )
+ goto err;
+
+ /* Let's check if mem_access limited the access. */
+ switch ( xma )
+ {
+ default:
+ case XENMEM_access_rwx:
+ case XENMEM_access_rw:
+ /*
+ * If mem_access contains no rw perm restrictions at all then the
original
+ * fault was correct.
+ */
+ goto err;
+ case XENMEM_access_n2rwx:
+ case XENMEM_access_n:
+ case XENMEM_access_x:
+ /*
+ * If no r/w is permitted by mem_access, this was a fault caused by
mem_access.
+ */
+ break;
+ case XENMEM_access_wx:
+ case XENMEM_access_w:
+ /*
+ * If this was a read then it was because of mem_access, but if it was
+ * a write then the original get_page_from_gva fault was correct.
+ */
+ if ( flag == GV2M_READ )
+ break;
+ else
+ goto err;
+ case XENMEM_access_rx2rw:
+ case XENMEM_access_rx:
+ case XENMEM_access_r:
+ /*
+ * If this was a write then it was because of mem_access, but if it was
+ * a read then the original get_page_from_gva fault was correct.
+ */
+ if ( flag == GV2M_WRITE )
+ break;
+ else
+ goto err;
+ }
+
+ /*
+ * We had a mem_access permission limiting the access, but the page type
+ * could also be limiting, so we need to check that as well.
+ */
+ maddr = p2m_lookup(current->domain, ipa, &t);
+ if ( maddr == INVALID_PADDR )
+ goto err;
+
+ mfn = maddr >> PAGE_SHIFT;
+ if ( !mfn_valid(mfn) )
+ goto err;
+
+ /*
+ * Base type doesn't allow r/w
+ */
+ if ( t != p2m_ram_rw )
+ goto err;
+
+ page = mfn_to_page(mfn);
+
+ if ( unlikely(!get_page(page, current->domain)) )
+ page = NULL;
+
+err:
+ return page;
+}
+
struct page_info *get_page_from_gva(struct domain *d, vaddr_t va,
unsigned long flags)
{
@@ -1209,6 +1306,10 @@ struct page_info *get_page_from_gva(struct domain *d,
vaddr_t va,
err:
spin_unlock(&p2m->lock);
+
+ if ( !page && p2m->mem_access_enabled )
+ page = p2m_mem_access_check_and_get_page(va, flags);
+
return page;
}
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index ad046e8..5d90609 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1988,7 +1988,7 @@ static void do_trap_data_abort_guest(struct cpu_user_regs
*regs,
if (dabt.s1ptw)
goto bad_data_abort;
- rc = gva_to_ipa(info.gva, &info.gpa);
+ rc = gva_to_ipa(info.gva, &info.gpa, GV2M_READ);
if ( rc == -EFAULT )
goto bad_data_abort;
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index a07e217..bccdbfc 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -103,11 +103,14 @@ static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned
int flags)
WRITE_CP64(tmp, PAR);
return par;
}
-static inline uint64_t gva_to_ipa_par(vaddr_t va)
+static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags)
{
uint64_t par, tmp;
tmp = READ_CP64(PAR);
- WRITE_CP32(va, ATS1CPR);
+ if ( (flags & GV2M_WRITE) == GV2M_WRITE )
+ WRITE_CP32(va, ATS1CPW);
+ else
+ WRITE_CP32(va, ATS1CPR);
isb(); /* Ensure result is available. */
par = READ_CP64(PAR);
WRITE_CP64(tmp, PAR);
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index e7a761d..29a32cf 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -98,11 +98,14 @@ static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned
int flags)
return par;
}
-static inline uint64_t gva_to_ipa_par(vaddr_t va)
+static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags)
{
uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
- asm volatile ("at s1e1r, %0;" : : "r" (va));
+ if ( (flags & GV2M_WRITE) == GV2M_WRITE )
+ asm volatile ("at s1e1w, %0;" : : "r" (va));
+ else
+ asm volatile ("at s1e1r, %0;" : : "r" (va));
isb();
par = READ_SYSREG64(PAR_EL1);
WRITE_SYSREG64(tmp, PAR_EL1);
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 3e7b0ae..b31e161 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -423,9 +423,9 @@ static inline uint64_t va_to_par(vaddr_t va)
return par;
}
-static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr)
+static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr, unsigned int flags)
{
- uint64_t par = gva_to_ipa_par(va);
+ uint64_t par = gva_to_ipa_par(va, flags);
if ( par & PAR_F )
return -EFAULT;
*paddr = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va &
~PAGE_MASK);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |