[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 09/17] xen/arm: p2m type definitions and changes



Define p2m_access_t in ARM and add necessary changes for page table
construction routines to pass the default access information. Also,
define the Radix tree that will hold access permission settings as
the PTE's don't have enough software programmable bits available.

Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
---
v5: #include grouping style-fix.
v4: move p2m_get_hostp2m definition here.
---
 xen/arch/arm/p2m.c        | 54 ++++++++++++++++++----------
 xen/include/asm-arm/p2m.h | 92 ++++++++++++++++++++++++++++++++++++-----------
 2 files changed, 108 insertions(+), 38 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index fc8c0dd..e6b4bb6 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -5,6 +5,9 @@
 #include <xen/errno.h>
 #include <xen/domain_page.h>
 #include <xen/bitops.h>
+#include <xen/mem_event.h>
+#include <xen/mem_access.h>
+#include <public/mem_event.h>
 #include <asm/flushtlb.h>
 #include <asm/gic.h>
 #include <asm/event.h>
@@ -229,7 +232,7 @@ int p2m_pod_decrease_reservation(struct domain *d,
 }
 
 static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
-                               p2m_type_t t)
+                               p2m_type_t t, p2m_access_t a)
 {
     paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
     /* sh, xn and write bit will be defined in the following switches
@@ -347,7 +350,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
          for ( i=0 ; i < LPAE_ENTRIES; i++ )
          {
              pte = mfn_to_p2m_entry(base_pfn + (i<<(level_shift-LPAE_SHIFT)),
-                                    MATTR_MEM, t);
+                                    MATTR_MEM, t, p2m->default_access);
 
              /*
               * First and second level super pages set p2m.table = 0, but
@@ -367,7 +370,8 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
 
     unmap_domain_page(p);
 
-    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid);
+    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid,
+                           p2m->default_access);
 
     p2m_write_pte(entry, pte, flush_cache);
 
@@ -470,7 +474,8 @@ static int apply_one_level(struct domain *d,
                            paddr_t *maddr,
                            bool_t *flush,
                            int mattr,
-                           p2m_type_t t)
+                           p2m_type_t t,
+                           p2m_access_t a)
 {
     const paddr_t level_size = level_sizes[level];
     const paddr_t level_mask = level_masks[level];
@@ -499,7 +504,7 @@ static int apply_one_level(struct domain *d,
             page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
             if ( page )
             {
-                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t);
+                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
                 if ( level < 3 )
                     pte.p2m.table = 0;
                 p2m_write_pte(entry, pte, flush_cache);
@@ -534,7 +539,7 @@ static int apply_one_level(struct domain *d,
              (level == 3 || !p2m_table(orig_pte)) )
         {
             /* New mapping is superpage aligned, make it */
-            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t);
+            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
             if ( level < 3 )
                 pte.p2m.table = 0; /* Superpage entry */
 
@@ -713,7 +718,8 @@ static int apply_p2m_changes(struct domain *d,
                      paddr_t end_gpaddr,
                      paddr_t maddr,
                      int mattr,
-                     p2m_type_t t)
+                     p2m_type_t t,
+                     p2m_access_t a)
 {
     int rc, ret;
     struct p2m_domain *p2m = &d->arch.p2m;
@@ -780,7 +786,7 @@ static int apply_p2m_changes(struct domain *d,
                               level, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         count += ret;
         if ( ret != P2M_ONE_DESCEND ) continue;
@@ -802,7 +808,7 @@ static int apply_p2m_changes(struct domain *d,
                               level, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         count += ret;
         if ( ret != P2M_ONE_DESCEND ) continue;
@@ -822,7 +828,7 @@ static int apply_p2m_changes(struct domain *d,
                               level, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         /* L3 had better have done something! We cannot descend any further */
         BUG_ON(ret == P2M_ONE_DESCEND);
@@ -865,7 +871,7 @@ out:
          */
         apply_p2m_changes(d, REMOVE,
                           start_gpaddr, addr + level_sizes[level], orig_maddr,
-                          mattr, p2m_invalid);
+                          mattr, p2m_invalid, d->arch.p2m.default_access);
     }
 
     spin_unlock(&p2m->lock);
@@ -878,7 +884,8 @@ int p2m_populate_ram(struct domain *d,
                      paddr_t end)
 {
     return apply_p2m_changes(d, ALLOCATE, start, end,
-                             0, MATTR_MEM, p2m_ram_rw);
+                             0, MATTR_MEM, p2m_ram_rw,
+                             d->arch.p2m.default_access);
 }
 
 int map_mmio_regions(struct domain *d,
@@ -890,7 +897,8 @@ int map_mmio_regions(struct domain *d,
                              pfn_to_paddr(start_gfn),
                              pfn_to_paddr(start_gfn + nr),
                              pfn_to_paddr(mfn),
-                             MATTR_DEV, p2m_mmio_direct);
+                             MATTR_DEV, p2m_mmio_direct,
+                             d->arch.p2m.default_access);
 }
 
 int unmap_mmio_regions(struct domain *d,
@@ -902,7 +910,8 @@ int unmap_mmio_regions(struct domain *d,
                              pfn_to_paddr(start_gfn),
                              pfn_to_paddr(start_gfn + nr),
                              pfn_to_paddr(mfn),
-                             MATTR_DEV, p2m_invalid);
+                             MATTR_DEV, p2m_invalid,
+                             d->arch.p2m.default_access);
 }
 
 int guest_physmap_add_entry(struct domain *d,
@@ -914,7 +923,8 @@ int guest_physmap_add_entry(struct domain *d,
     return apply_p2m_changes(d, INSERT,
                              pfn_to_paddr(gpfn),
                              pfn_to_paddr(gpfn + (1 << page_order)),
-                             pfn_to_paddr(mfn), MATTR_MEM, t);
+                             pfn_to_paddr(mfn), MATTR_MEM, t,
+                             d->arch.p2m.default_access);
 }
 
 void guest_physmap_remove_page(struct domain *d,
@@ -924,7 +934,8 @@ void guest_physmap_remove_page(struct domain *d,
     apply_p2m_changes(d, REMOVE,
                       pfn_to_paddr(gpfn),
                       pfn_to_paddr(gpfn + (1<<page_order)),
-                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
+                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid,
+                      d->arch.p2m.default_access);
 }
 
 int p2m_alloc_table(struct domain *d)
@@ -1027,6 +1038,8 @@ void p2m_teardown(struct domain *d)
 
     p2m_free_vmid(d);
 
+    radix_tree_destroy(&p2m->mem_access_settings, NULL);
+
     spin_unlock(&p2m->lock);
 }
 
@@ -1052,6 +1065,9 @@ int p2m_init(struct domain *d)
     p2m->max_mapped_gfn = 0;
     p2m->lowest_mapped_gfn = ULONG_MAX;
 
+    p2m->default_access = p2m_access_rwx;
+    radix_tree_init(&p2m->mem_access_settings);
+
 err:
     spin_unlock(&p2m->lock);
 
@@ -1066,7 +1082,8 @@ int relinquish_p2m_mapping(struct domain *d)
                               pfn_to_paddr(p2m->lowest_mapped_gfn),
                               pfn_to_paddr(p2m->max_mapped_gfn),
                               pfn_to_paddr(INVALID_MFN),
-                              MATTR_MEM, p2m_invalid);
+                              MATTR_MEM, p2m_invalid,
+                              d->arch.p2m.default_access);
 }
 
 int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
@@ -1080,7 +1097,8 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t 
start_mfn, xen_pfn_t end_mfn)
                              pfn_to_paddr(start_mfn),
                              pfn_to_paddr(end_mfn),
                              pfn_to_paddr(INVALID_MFN),
-                             MATTR_MEM, p2m_invalid);
+                             MATTR_MEM, p2m_invalid,
+                             d->arch.p2m.default_access);
 }
 
 unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 08ce07b..b4ca86d 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -2,6 +2,9 @@
 #define _XEN_P2M_H
 
 #include <xen/mm.h>
+#include <xen/radix-tree.h>
+#include <public/memory.h>
+#include <public/mem_event.h>
 
 #include <xen/p2m-common.h>
 
@@ -11,6 +14,48 @@ struct domain;
 
 extern void memory_type_changed(struct domain *);
 
+/* List of possible type for each page in the p2m entry.
+ * The number of available bit per page in the pte for this purpose is 4 bits.
+ * So it's possible to only have 16 fields. If we run out of value in the
+ * future, it's possible to use higher value for pseudo-type and don't store
+ * them in the p2m entry.
+ */
+typedef enum {
+    p2m_invalid = 0,    /* Nothing mapped here */
+    p2m_ram_rw,         /* Normal read/write guest RAM */
+    p2m_ram_ro,         /* Read-only; writes are silently dropped */
+    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
+    p2m_map_foreign,    /* Ram pages from foreign domain */
+    p2m_grant_map_rw,   /* Read/write grant mapping */
+    p2m_grant_map_ro,   /* Read-only grant mapping */
+    /* The types below are only used to decide the page attribute in the P2M */
+    p2m_iommu_map_rw,   /* Read/write iommu mapping */
+    p2m_iommu_map_ro,   /* Read-only iommu mapping */
+    p2m_max_real_type,  /* Types after this won't be store in the p2m */
+} p2m_type_t;
+
+/*
+ * Additional access types, which are used to further restrict
+ * the permissions given by the p2m_type_t memory type. Violations
+ * caused by p2m_access_t restrictions are sent to the mem_event
+ * interface.
+ *
+ * The access permissions are soft state: when any ambigious change of page
+ * type or use occurs, or when pages are flushed, swapped, or at any other
+ * convenient type, the access permissions can get reset to the p2m_domain
+ * default.
+ */
+typedef enum {
+    p2m_access_n    = 0, /* No access permissions allowed */
+    p2m_access_r    = 1,
+    p2m_access_w    = 2,
+    p2m_access_rw   = 3,
+    p2m_access_x    = 4,
+    p2m_access_rx   = 5,
+    p2m_access_wx   = 6,
+    p2m_access_rwx  = 7
+} p2m_access_t;
+
 /* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
@@ -44,27 +89,20 @@ struct p2m_domain {
          * at each p2m tree level. */
         unsigned long shattered[4];
     } stats;
-};
 
-/* List of possible type for each page in the p2m entry.
- * The number of available bit per page in the pte for this purpose is 4 bits.
- * So it's possible to only have 16 fields. If we run out of value in the
- * future, it's possible to use higher value for pseudo-type and don't store
- * them in the p2m entry.
- */
-typedef enum {
-    p2m_invalid = 0,    /* Nothing mapped here */
-    p2m_ram_rw,         /* Normal read/write guest RAM */
-    p2m_ram_ro,         /* Read-only; writes are silently dropped */
-    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
-    p2m_map_foreign,    /* Ram pages from foreign domain */
-    p2m_grant_map_rw,   /* Read/write grant mapping */
-    p2m_grant_map_ro,   /* Read-only grant mapping */
-    /* The types below are only used to decide the page attribute in the P2M */
-    p2m_iommu_map_rw,   /* Read/write iommu mapping */
-    p2m_iommu_map_ro,   /* Read-only iommu mapping */
-    p2m_max_real_type,  /* Types after this won't be store in the p2m */
-} p2m_type_t;
+    /* Default P2M access type for each page in the the domain: new pages,
+     * swapped in pages, cleared pages, and pages that are ambiquously
+     * retyped get this access type. See definition of p2m_access_t. */
+    p2m_access_t default_access;
+
+    /* If true, and an access fault comes in and there is no mem_event 
listener,
+     * pause domain. Otherwise, remove access restrictions. */
+    bool_t access_required;
+
+    /* Radix tree to store the p2m_access_t settings as the pte's don't have
+     * enough available bits to store this information. */
+    struct radix_tree_root mem_access_settings;
+};
 
 #define p2m_is_foreign(_t)  ((_t) == p2m_map_foreign)
 #define p2m_is_ram(_t)      ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)
@@ -194,6 +232,20 @@ static inline int get_page_and_type(struct page_info *page,
     return rc;
 }
 
+/* get host p2m table */
+#define p2m_get_hostp2m(d) (&((d)->arch.p2m))
+
+/* mem_event and mem_access are supported on all ARM */
+static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
+{
+    return 1;
+}
+
+static inline bool_t p2m_mem_event_sanity_check(struct domain *d)
+{
+    return 1;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.