[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 8/9] libxc: rework of domain builder's page table handler



In order to prepare a p2m list outside of the initial kernel mapping
do a rework of the domain builder's page table handler. The goal is
to be able to use common helpers for page table allocation and setup
for initial kernel page tables and page tables mapping the p2m list.
This is achieved by supporting multiple mapping areas. The mapped
virtual addresses of the single areas must not overlap, while the
page tables of a new area added might already be partially present.
Especially the top level page table is existing only once, of course.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 tools/libxc/xc_dom_x86.c | 404 ++++++++++++++++++++++++++++-------------------
 1 file changed, 240 insertions(+), 164 deletions(-)

diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index c815e10..333ef6b 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -65,17 +65,27 @@
 #define NR_IOREQ_SERVER_PAGES 8
 #define ioreq_server_pfn(x) (special_pfn(0) - NR_IOREQ_SERVER_PAGES + (x))
 
-#define bits_to_mask(bits)       (((xen_vaddr_t)1 << (bits))-1)
+#define bits_to_mask(bits)       (((xen_vaddr_t)1 << (bits)) - 1)
 #define round_down(addr, mask)   ((addr) & ~(mask))
 #define round_up(addr, mask)     ((addr) | (mask))
 
-struct xc_dom_image_x86 {
-    /* initial page tables */
+struct xc_dom_x86_mapping_lvl {
+    xen_vaddr_t from;
+    xen_vaddr_t to;
+    xen_pfn_t pfn;
     unsigned int pgtables;
-    unsigned int pg_l4;
-    unsigned int pg_l3;
-    unsigned int pg_l2;
-    unsigned int pg_l1;
+};
+
+struct xc_dom_x86_mapping {
+    struct xc_dom_x86_mapping_lvl area;
+    struct xc_dom_x86_mapping_lvl lvls[4];
+    xen_pfn_t pfn_start;
+};
+
+struct xc_dom_image_x86 {
+    unsigned n_mappings;
+#define MAPPING_MAX 1
+    struct xc_dom_x86_mapping maps[MAPPING_MAX];
 };
 
 /* get guest IO ABI protocol */
@@ -105,43 +115,107 @@ const char *xc_domain_get_native_protocol(xc_interface 
*xch,
     return protocol;
 }
 
-static unsigned long
-nr_page_tables(struct xc_dom_image *dom,
-               xen_vaddr_t start, xen_vaddr_t end, unsigned long bits)
+static void
+nr_page_tables(struct xc_dom_image *dom, int lvl,
+               xen_vaddr_t from, xen_vaddr_t to, unsigned long bits)
 {
     xen_vaddr_t mask = bits_to_mask(bits);
-    int tables;
+    struct xc_dom_image_x86 *domx86 = dom->arch_private;
+    struct xc_dom_x86_mapping *map = domx86->maps + domx86->n_mappings;
+    struct xc_dom_x86_mapping *map_cmp;
+    unsigned map_n;
 
     if ( bits == 0 )
-        return 0;  /* unused */
+        return;  /* unused */
+
+    if ( lvl == 3 && domx86->n_mappings != 0 )
+        return;  /* Top level page table already in first mapping. */
 
     if ( bits == (8 * sizeof(unsigned long)) )
     {
-        /* must be pgd, need one */
-        start = 0;
-        end = -1;
-        tables = 1;
+        /* 32 bit top level page table special case */
+        map->lvls[lvl].from = 0;
+        map->lvls[lvl].to = -1;
+        map->lvls[lvl].pgtables = 1;
+        goto done;
     }
-    else
+
+    from = round_down(from, mask);
+    to = round_up(to, mask);
+
+    for ( map_n = 0, map_cmp = domx86->maps; map_n < domx86->n_mappings;
+          map_n++, map_cmp++ )
     {
-        start = round_down(start, mask);
-        end = round_up(end, mask);
-        tables = ((end - start) >> bits) + 1;
+        if ( map_cmp->lvls[lvl].from == map_cmp->lvls[lvl].to )
+            continue;
+        if ( from >= map_cmp->lvls[lvl].from && to <= map_cmp->lvls[lvl].to )
+            return;  /* Area already completely covered on this level. */
+        if ( from >= map_cmp->lvls[lvl].from && from <= map_cmp->lvls[lvl].to )
+            from = map_cmp->lvls[lvl].to + 1;
+        if ( to >= map_cmp->lvls[lvl].from && to <= map_cmp->lvls[lvl].to )
+            to = map_cmp->lvls[lvl].from - 1;
     }
 
+    map->lvls[lvl].from = from;
+    map->lvls[lvl].to = to;
+    map->lvls[lvl].pgtables = ((to - from) >> bits) + 1;
+
+ done:
     DOMPRINTF("%s: 0x%016" PRIx64 "/%ld: 0x%016" PRIx64
-              " -> 0x%016" PRIx64 ", %d table(s)",
-              __FUNCTION__, mask, bits, start, end, tables);
-    return tables;
+              " -> 0x%016" PRIx64 ", %d table(s)", __FUNCTION__, mask, bits,
+              map->lvls[lvl].from, map->lvls[lvl].to, map->lvls[lvl].pgtables);
 }
 
-static int alloc_pgtables(struct xc_dom_image *dom, int pae,
-                          int l4_bits, int l3_bits, int l2_bits, int l1_bits)
+static int count_pgtables(struct xc_dom_image *dom, int pae, int bits[4],
+                          xen_vaddr_t from, xen_vaddr_t to, xen_pfn_t pfn)
+{
+    struct xc_dom_image_x86 *domx86 = dom->arch_private;
+    struct xc_dom_x86_mapping *map;
+    xen_pfn_t pfn_end;
+    int level;
+
+    if ( domx86->n_mappings == MAPPING_MAX )
+    {
+        xc_dom_panic(dom->xch, XC_OUT_OF_MEMORY,
+                     "%s: too many mappings\n", __FUNCTION__);
+        return -ENOMEM;
+    }
+    map = domx86->maps + domx86->n_mappings;
+
+    pfn_end = pfn + ((to - from) >> PAGE_SHIFT_X86);
+    if ( pfn_end >= dom->p2m_size )
+    {
+        xc_dom_panic(dom->xch, XC_OUT_OF_MEMORY,
+                     "%s: not enough memory for initial mapping (%#"PRIpfn" > 
%#"PRIpfn")",
+                     __FUNCTION__, pfn_end, dom->p2m_size);
+        return -ENOMEM;
+    }
+
+    memset(map, 0, sizeof(*map));
+
+    map->area.from = from;
+    map->area.to = to;
+    for (level = 3; level >= 0; level--)
+    {
+        map->lvls[level].pfn = pfn + map->area.pgtables;
+        nr_page_tables(dom, level, from, to, bits[level]);
+        if ( pae && to < 0xc0000000 && level == 1)
+        {
+            DOMPRINTF("%s: PAE: extra l2 page table for l3#3", __FUNCTION__);
+            map->lvls[level].pgtables++;
+        }
+        map->area.pgtables += map->lvls[level].pgtables;
+    }
+
+    return 0;
+}
+
+static int alloc_pgtables(struct xc_dom_image *dom, int pae, int bits[4])
 {
     int pages, extra_pages;
     xen_vaddr_t try_virt_end;
-    xen_pfn_t try_pfn_end;
     struct xc_dom_image_x86 *domx86 = dom->arch_private;
+    struct xc_dom_x86_mapping *map = domx86->maps + domx86->n_mappings;
 
     extra_pages = dom->alloc_bootstack ? 1 : 0;
     extra_pages += 128; /* 512kB padding */
@@ -151,40 +225,20 @@ static int alloc_pgtables(struct xc_dom_image *dom, int 
pae,
         try_virt_end = round_up(dom->virt_alloc_end + pages * PAGE_SIZE_X86,
                                 bits_to_mask(22)); /* 4MB alignment */
 
-        try_pfn_end = (try_virt_end - dom->parms.virt_base) >> PAGE_SHIFT_X86;
-
-        if ( try_pfn_end > dom->p2m_size )
-        {
-            xc_dom_panic(dom->xch, XC_OUT_OF_MEMORY,
-                         "%s: not enough memory for initial mapping 
(%#"PRIpfn" > %#"PRIpfn")",
-                         __FUNCTION__, try_pfn_end, dom->p2m_size);
-            return -ENOMEM;
-        }
+        if ( count_pgtables(dom, pae, bits, dom->parms.virt_base, try_virt_end,
+                            dom->pfn_alloc_end) )
+            return -1;
 
-        domx86->pg_l4 =
-            nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l4_bits);
-        domx86->pg_l3 =
-            nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l3_bits);
-        domx86->pg_l2 =
-            nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l2_bits);
-        domx86->pg_l1 =
-            nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l1_bits);
-        if (pae && try_virt_end < 0xc0000000)
-        {
-            DOMPRINTF("%s: PAE: extra l2 page table for l3#3",
-                      __FUNCTION__);
-            domx86->pg_l2++;
-        }
-        domx86->pgtables = domx86->pg_l4 + domx86->pg_l3 +
-                           domx86->pg_l2 + domx86->pg_l1;
-        pages = domx86->pgtables + extra_pages;
+        pages = map->area.pgtables + extra_pages;
         if ( dom->virt_alloc_end + pages * PAGE_SIZE_X86 <= try_virt_end + 1 )
             break;
     }
+    map->area.pfn = 0;
+    domx86->n_mappings++;
     dom->virt_pgtab_end = try_virt_end + 1;
 
     return xc_dom_alloc_segment(dom, &dom->pgtables_seg, "page tables", 0,
-                                domx86->pgtables * PAGE_SIZE_X86);
+                                map->area.pgtables * PAGE_SIZE_X86);
 }
 
 /* ------------------------------------------------------------------------ */
@@ -194,13 +248,17 @@ static int alloc_pgtables(struct xc_dom_image *dom, int 
pae,
 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
 #define L3_PROT (_PAGE_PRESENT)
 
+static int pgtblshift_x86_32_pae[4] =
+    /* Top level page table must be index 3! */
+    { L2_PAGETABLE_SHIFT_PAE, L3_PAGETABLE_SHIFT_PAE, 0, 32 };
+
 static int alloc_pgtables_x86_32_pae(struct xc_dom_image *dom)
 {
-    return alloc_pgtables(dom, 1, 0, 32,
-                          L3_PAGETABLE_SHIFT_PAE, L2_PAGETABLE_SHIFT_PAE);
+    return alloc_pgtables(dom, 1, pgtblshift_x86_32_pae);
 }
 
 #define pfn_to_paddr(pfn) ((xen_paddr_t)(pfn) << PAGE_SHIFT_X86)
+#define pgentry_to_pfn(entry) ((xen_pfn_t)((entry) >> PAGE_SHIFT_X86))
 
 /*
  * Move the l3 page table page below 4G for guests which do not
@@ -273,17 +331,16 @@ static xen_pfn_t move_l3_below_4G(struct xc_dom_image 
*dom,
 static int setup_pgtables_x86_32_pae(struct xc_dom_image *dom)
 {
     struct xc_dom_image_x86 *domx86 = dom->arch_private;
-    xen_pfn_t l3pfn = dom->pgtables_seg.pfn;
-    xen_pfn_t l2pfn = l3pfn + domx86->pg_l3;
-    xen_pfn_t l1pfn = l2pfn + domx86->pg_l2;
-    l3_pgentry_64_t *l3tab;
-    l2_pgentry_64_t *l2tab = NULL;
-    l1_pgentry_64_t *l1tab = NULL;
-    unsigned long l3off, l2off = 0, l1off;
+    struct xc_dom_x86_mapping *map;
+    xen_pfn_t l3mfn, l3pfn, l2pfn, l1pfn;
+    l3_pgentry_64_t *l3tab, *l2tab, *l1tab;
+    unsigned long l3off, l2off, l1off;
     xen_vaddr_t addr;
+    unsigned mapping;
     xen_pfn_t pgpfn;
-    xen_pfn_t l3mfn = xc_dom_p2m(dom, l3pfn);
 
+    l3pfn = domx86->maps[0].lvls[3].pfn;
+    l3mfn = xc_dom_p2m(dom, l3pfn);
     if ( dom->parms.pae == XEN_PAE_YES )
     {
         if ( l3mfn >= 0x100000 )
@@ -302,55 +359,64 @@ static int setup_pgtables_x86_32_pae(struct xc_dom_image 
*dom)
     l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
     if ( l3tab == NULL )
         goto pfn_error;
-
-    for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
-          addr += PAGE_SIZE_X86 )
+    for ( mapping = 0; mapping < domx86->n_mappings; mapping++ )
     {
-        if ( l2tab == NULL )
+        map = domx86->maps + mapping;
+        l2tab = NULL;
+        l1tab = NULL;
+        l2off = 0;
+
+        for ( addr = map->area.from; addr < map->area.to;
+              addr += PAGE_SIZE_X86 )
         {
-            /* get L2 tab, make L3 entry */
-            l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
             if ( l2tab == NULL )
-                goto pfn_error;
-            l3off = l3_table_offset_pae(addr);
-            l3tab[l3off] =
-                pfn_to_paddr(xc_dom_p2m(dom, l2pfn)) | L3_PROT;
-            l2pfn++;
-        }
+            {
+                /* get L2 tab, make L3 entry */
+                l3off = l3_table_offset_pae(addr);
+                pgpfn = (addr - map->lvls[1].from) >> L3_PAGETABLE_SHIFT_PAE;
+                l2pfn = l3tab[l3off] ? pgentry_to_pfn(l3tab[l3off]) :
+                        map->lvls[1].pfn + pgpfn;
+                l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
+                if ( l2tab == NULL )
+                    goto pfn_error;
+                l3tab[l3off] = pfn_to_paddr(xc_dom_p2m(dom, l2pfn)) | L3_PROT;
+            }
 
-        if ( l1tab == NULL )
-        {
-            /* get L1 tab, make L2 entry */
-            l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
             if ( l1tab == NULL )
-                goto pfn_error;
-            l2off = l2_table_offset_pae(addr);
-            l2tab[l2off] =
-                pfn_to_paddr(xc_dom_p2m(dom, l1pfn)) | L2_PROT;
-            l1pfn++;
-        }
+            {
+                /* get L1 tab, make L2 entry */
+                l2off = l2_table_offset_pae(addr);
+                pgpfn = (addr - map->lvls[0].from) >> L2_PAGETABLE_SHIFT_PAE;
+                l1pfn = l2tab[l2off] ? pgentry_to_pfn(l2tab[l2off]) :
+                        map->lvls[0].pfn + pgpfn;
+                l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
+                if ( l1tab == NULL )
+                    goto pfn_error;
+                l2tab[l2off] = pfn_to_paddr(xc_dom_p2m(dom, l1pfn)) | L2_PROT;
+            }
 
-        /* make L1 entry */
-        l1off = l1_table_offset_pae(addr);
-        pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
-        l1tab[l1off] =
-            pfn_to_paddr(xc_dom_p2m(dom, pgpfn)) | L1_PROT;
-        if ( (!dom->pvh_enabled)                &&
-             (addr >= dom->pgtables_seg.vstart) &&
-             (addr < dom->pgtables_seg.vend) )
-            l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
-
-        if ( l1off == (L1_PAGETABLE_ENTRIES_PAE - 1) )
-        {
-            l1tab = NULL;
-            if ( l2off == (L2_PAGETABLE_ENTRIES_PAE - 1) )
-                l2tab = NULL;
+            /* make L1 entry */
+            l1off = l1_table_offset_pae(addr);
+            pgpfn = ((addr - map->area.from) >> PAGE_SHIFT_X86) + 
map->area.pfn;
+            l1tab[l1off] = pfn_to_paddr(xc_dom_p2m(dom, pgpfn)) | L1_PROT;
+            if ( (!dom->pvh_enabled)            &&
+                 (pgpfn >= map->lvls[3].pfn)    &&
+                 (pgpfn < map->lvls[3].pfn + map->area.pgtables) )
+                l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
+
+            if ( l1off == (L1_PAGETABLE_ENTRIES_PAE - 1) )
+            {
+                l1tab = NULL;
+                if ( l2off == (L2_PAGETABLE_ENTRIES_PAE - 1) )
+                    l2tab = NULL;
+            }
         }
     }
 
     if ( dom->virt_pgtab_end <= 0xc0000000 )
     {
         DOMPRINTF("%s: PAE: extra l2 page table for l3#3", __FUNCTION__);
+        l2pfn = domx86->maps[0].lvls[0].pfn - 1;
         l3tab[3] = pfn_to_paddr(xc_dom_p2m(dom, l2pfn)) | L3_PROT;
     }
     return 0;
@@ -368,13 +434,13 @@ pfn_error:
 /* ------------------------------------------------------------------------ */
 /* x86_64 pagetables                                                        */
 
+static int pgtblshift_x86_64[4] =
+    { L2_PAGETABLE_SHIFT_X86_64, L3_PAGETABLE_SHIFT_X86_64,
+      L4_PAGETABLE_SHIFT_X86_64, L4_PAGETABLE_SHIFT_X86_64 + 9 };
+
 static int alloc_pgtables_x86_64(struct xc_dom_image *dom)
 {
-    return alloc_pgtables(dom, 0,
-                          L4_PAGETABLE_SHIFT_X86_64 + 9,
-                          L4_PAGETABLE_SHIFT_X86_64,
-                          L3_PAGETABLE_SHIFT_X86_64,
-                          L2_PAGETABLE_SHIFT_X86_64);
+    return alloc_pgtables(dom, 0, pgtblshift_x86_64);
 }
 
 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
@@ -385,78 +451,88 @@ static int alloc_pgtables_x86_64(struct xc_dom_image *dom)
 static int setup_pgtables_x86_64(struct xc_dom_image *dom)
 {
     struct xc_dom_image_x86 *domx86 = dom->arch_private;
-    xen_pfn_t l4pfn = dom->pgtables_seg.pfn;
-    xen_pfn_t l3pfn = l4pfn + domx86->pg_l4;
-    xen_pfn_t l2pfn = l3pfn + domx86->pg_l3;
-    xen_pfn_t l1pfn = l2pfn + domx86->pg_l2;
-    l4_pgentry_64_t *l4tab = xc_dom_pfn_to_ptr(dom, l4pfn, 1);
-    l3_pgentry_64_t *l3tab = NULL;
-    l2_pgentry_64_t *l2tab = NULL;
-    l1_pgentry_64_t *l1tab = NULL;
-    uint64_t l4off, l3off = 0, l2off = 0, l1off;
+    struct xc_dom_x86_mapping *map;
+    xen_pfn_t l4pfn, l3pfn, l2pfn, l1pfn;
+    l4_pgentry_64_t *l4tab, *l3tab, *l2tab, *l1tab;
+    uint64_t l4off, l3off, l2off, l1off;
     uint64_t addr;
+    unsigned mapping;
     xen_pfn_t pgpfn;
 
+    l4pfn = domx86->maps[0].lvls[3].pfn;
+    l4tab = xc_dom_pfn_to_ptr(dom, l4pfn, 1);
     if ( l4tab == NULL )
         goto pfn_error;
 
-    for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
-          addr += PAGE_SIZE_X86 )
+    for ( mapping = 0; mapping < domx86->n_mappings; mapping++ )
     {
-        if ( l3tab == NULL )
+        map = domx86->maps + mapping;
+        l3tab = NULL;
+        l2tab = NULL;
+        l1tab = NULL;
+        l3off = 0;
+        l2off = 0;
+
+        for ( addr = map->area.from; addr < map->area.to;
+              addr += PAGE_SIZE_X86 )
         {
-            /* get L3 tab, make L4 entry */
-            l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
             if ( l3tab == NULL )
-                goto pfn_error;
-            l4off = l4_table_offset_x86_64(addr);
-            l4tab[l4off] =
-                pfn_to_paddr(xc_dom_p2m(dom, l3pfn)) | L4_PROT;
-            l3pfn++;
-        }
+            {
+                /* get L3 tab, make L4 entry */
+                l4off = l4_table_offset_x86_64(addr);
+                pgpfn = (addr - map->lvls[2].from) >> 
L4_PAGETABLE_SHIFT_X86_64;
+                l3pfn = l4tab[l4off] ? pgentry_to_pfn(l4tab[l4off]) :
+                        map->lvls[2].pfn + pgpfn;
+                l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
+                if ( l3tab == NULL )
+                    goto pfn_error;
+                l4tab[l4off] = pfn_to_paddr(xc_dom_p2m(dom, l3pfn)) | L4_PROT;
+            }
 
-        if ( l2tab == NULL )
-        {
-            /* get L2 tab, make L3 entry */
-            l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
             if ( l2tab == NULL )
-                goto pfn_error;
-            l3off = l3_table_offset_x86_64(addr);
-            l3tab[l3off] =
-                pfn_to_paddr(xc_dom_p2m(dom, l2pfn)) | L3_PROT;
-            l2pfn++;
-        }
+            {
+                /* get L2 tab, make L3 entry */
+                l3off = l3_table_offset_x86_64(addr);
+                pgpfn = (addr - map->lvls[1].from) >> 
L3_PAGETABLE_SHIFT_X86_64;
+                l2pfn = l3tab[l3off] ? pgentry_to_pfn(l3tab[l3off]) :
+                        map->lvls[1].pfn + pgpfn;
+                l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
+                if ( l2tab == NULL )
+                    goto pfn_error;
+                l3tab[l3off] = pfn_to_paddr(xc_dom_p2m(dom, l2pfn)) | L3_PROT;
+            }
 
-        if ( l1tab == NULL )
-        {
-            /* get L1 tab, make L2 entry */
-            l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
             if ( l1tab == NULL )
-                goto pfn_error;
-            l2off = l2_table_offset_x86_64(addr);
-            l2tab[l2off] =
-                pfn_to_paddr(xc_dom_p2m(dom, l1pfn)) | L2_PROT;
-            l1pfn++;
-        }
+            {
+                /* get L1 tab, make L2 entry */
+                l2off = l2_table_offset_x86_64(addr);
+                pgpfn = (addr - map->lvls[0].from) >> 
L2_PAGETABLE_SHIFT_X86_64;
+                l1pfn = l2tab[l2off] ? pgentry_to_pfn(l2tab[l2off]) :
+                        map->lvls[0].pfn + pgpfn;
+                l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
+                if ( l1tab == NULL )
+                    goto pfn_error;
+                l2tab[l2off] = pfn_to_paddr(xc_dom_p2m(dom, l1pfn)) | L2_PROT;
+            }
 
-        /* make L1 entry */
-        l1off = l1_table_offset_x86_64(addr);
-        pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
-        l1tab[l1off] =
-            pfn_to_paddr(xc_dom_p2m(dom, pgpfn)) | L1_PROT;
-        if ( (!dom->pvh_enabled)                &&
-             (addr >= dom->pgtables_seg.vstart) &&
-             (addr < dom->pgtables_seg.vend) )
-            l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
-
-        if ( l1off == (L1_PAGETABLE_ENTRIES_X86_64 - 1) )
-        {
-            l1tab = NULL;
-            if ( l2off == (L2_PAGETABLE_ENTRIES_X86_64 - 1) )
+            /* make L1 entry */
+            l1off = l1_table_offset_x86_64(addr);
+            pgpfn = ((addr - map->area.from) >> PAGE_SHIFT_X86) + 
map->area.pfn;
+            l1tab[l1off] = pfn_to_paddr(xc_dom_p2m(dom, pgpfn)) | L1_PROT;
+            if ( (!dom->pvh_enabled)            &&
+                 (pgpfn >= map->lvls[3].pfn)    &&
+                 (pgpfn < map->lvls[3].pfn + map->area.pgtables) )
+                l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
+
+            if ( l1off == (L1_PAGETABLE_ENTRIES_X86_64 - 1) )
             {
-                l2tab = NULL;
-                if ( l3off == (L3_PAGETABLE_ENTRIES_X86_64 - 1) )
-                    l3tab = NULL;
+                l1tab = NULL;
+                if ( l2off == (L2_PAGETABLE_ENTRIES_X86_64 - 1) )
+                {
+                    l2tab = NULL;
+                    if ( l3off == (L3_PAGETABLE_ENTRIES_X86_64 - 1) )
+                        l3tab = NULL;
+                }
             }
         }
     }
@@ -659,7 +735,7 @@ static int start_info_x86_32(struct xc_dom_image *dom)
     start_info->nr_pages = dom->total_pages;
     start_info->shared_info = shinfo << PAGE_SHIFT_X86;
     start_info->pt_base = dom->pgtables_seg.vstart;
-    start_info->nr_pt_frames = domx86->pgtables;
+    start_info->nr_pt_frames = domx86->maps[0].area.pgtables;
     start_info->mfn_list = dom->p2m_seg.vstart;
 
     start_info->flags = dom->flags;
@@ -706,7 +782,7 @@ static int start_info_x86_64(struct xc_dom_image *dom)
     start_info->nr_pages = dom->total_pages;
     start_info->shared_info = shinfo << PAGE_SHIFT_X86;
     start_info->pt_base = dom->pgtables_seg.vstart;
-    start_info->nr_pt_frames = domx86->pgtables;
+    start_info->nr_pt_frames = domx86->maps[0].area.pgtables;
     start_info->mfn_list = dom->p2m_seg.vstart;
 
     start_info->flags = dom->flags;
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.