|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out pv_arch_init_memory"
commit 824785e469f47aa9a8a2f4a6f4757dfedd6ec940
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Sep 25 11:11:05 2017 +0100
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Oct 18 12:40:40 2017 +0100
Revert "x86/mm: move PV l4 table setup code" and "x86/mm: factor out
pv_arch_init_memory"
This reverts commit f3b95fd07fdb55b1db091fede1b9a7c71f1eaa1b and
1bd39738a5a34f529a610fb275cc83ee5ac7547a.
The following patches (post XSA-243 fixes) requires init_guest_l4_table()
being common code.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Release-acked-by: Julien Grall <julien.gral@xxxxxxxxxx>
---
xen/arch/x86/mm.c | 77 +++++++++++++++++++++++++++++++++++++++--
xen/arch/x86/pv/dom0_build.c | 2 --
xen/arch/x86/pv/domain.c | 5 ---
xen/arch/x86/pv/mm.c | 82 --------------------------------------------
xen/arch/x86/pv/mm.h | 3 --
xen/include/asm-x86/mm.h | 2 ++
xen/include/asm-x86/pv/mm.h | 4 ---
7 files changed, 77 insertions(+), 98 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 62d313e..0a89a9c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -125,7 +125,6 @@
#include <asm/hvm/grant_table.h>
#include <asm/pv/grant_table.h>
-#include <asm/pv/mm.h>
#include "pv/mm.h"
@@ -243,6 +242,14 @@ void __init init_frametable(void)
memset(end_pg, -1, (unsigned long)top_pg - (unsigned long)end_pg);
}
+#ifndef NDEBUG
+static unsigned int __read_mostly root_pgt_pv_xen_slots
+ = ROOT_PAGETABLE_PV_XEN_SLOTS;
+static l4_pgentry_t __read_mostly split_l4e;
+#else
+#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS
+#endif
+
void __init arch_init_memory(void)
{
unsigned long i, pfn, rstart_pfn, rend_pfn, iostart_pfn, ioend_pfn;
@@ -338,7 +345,39 @@ void __init arch_init_memory(void)
mem_sharing_init();
- pv_arch_init_memory();
+#ifndef NDEBUG
+ if ( highmem_start )
+ {
+ unsigned long split_va = (unsigned long)__va(highmem_start);
+
+ if ( split_va < HYPERVISOR_VIRT_END &&
+ split_va - 1 == (unsigned long)__va(highmem_start - 1) )
+ {
+ root_pgt_pv_xen_slots = l4_table_offset(split_va) -
+ ROOT_PAGETABLE_FIRST_XEN_SLOT;
+ ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
+ if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
+ {
+ l3_pgentry_t *l3tab = alloc_xen_pagetable();
+
+ if ( l3tab )
+ {
+ const l3_pgentry_t *l3idle =
+ l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
+
+ for ( i = 0; i < l3_table_offset(split_va); ++i )
+ l3tab[i] = l3idle[i];
+ for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
+ l3tab[i] = l3e_empty();
+ split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
+ __PAGE_HYPERVISOR_RW);
+ }
+ else
+ ++root_pgt_pv_xen_slots;
+ }
+ }
+ }
+#endif
}
int page_is_ram_type(unsigned long mfn, unsigned long mem_type)
@@ -1479,6 +1518,40 @@ static int alloc_l3_table(struct page_info *page)
return rc > 0 ? 0 : rc;
}
+/*
+ * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
+ * values a guest may have left there from alloc_l4_table().
+ */
+void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
+ bool zap_ro_mpt)
+{
+ /* Xen private mappings. */
+ memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+ &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+ root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
+#ifndef NDEBUG
+ if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
+ {
+ l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
+ root_pgt_pv_xen_slots];
+
+ if ( l4e_get_intpte(split_l4e) )
+ *next++ = split_l4e;
+
+ memset(next, 0,
+ _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
+ }
+#else
+ BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
+#endif
+ l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
+ l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
+ l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
+ l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
+ if ( zap_ro_mpt || is_pv_32bit_domain(d) )
+ l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+}
+
bool fill_ro_mpt(mfn_t mfn)
{
l4_pgentry_t *l4tab = map_domain_page(mfn);
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 8ad7e3d..b817153 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -20,8 +20,6 @@
#include <asm/page.h>
#include <asm/setup.h>
-#include "mm.h"
-
/* Allow ring-3 access in long mode as guest cannot use ring 1 ... */
#define BASE_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
#define L1_PROT (BASE_PROT|_PAGE_GUEST_KERNEL)
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 90d5569..c8b9cb6 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -9,13 +9,8 @@
#include <xen/lib.h>
#include <xen/sched.h>
-#include <asm/p2m.h>
-#include <asm/paging.h>
-#include <asm/setup.h>
#include <asm/pv/domain.h>
-#include "mm.h"
-
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c
index e45d628..6890e80 100644
--- a/xen/arch/x86/pv/mm.c
+++ b/xen/arch/x86/pv/mm.c
@@ -23,7 +23,6 @@
#include <asm/current.h>
#include <asm/p2m.h>
-#include <asm/setup.h>
#include "mm.h"
@@ -134,87 +133,6 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
return true;
}
-#ifndef NDEBUG
-static unsigned int __read_mostly root_pgt_pv_xen_slots
- = ROOT_PAGETABLE_PV_XEN_SLOTS;
-static l4_pgentry_t __read_mostly split_l4e;
-#else
-#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS
-#endif
-
-/*
- * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
- * values a guest may have left there from alloc_l4_table().
- */
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
- bool zap_ro_mpt)
-{
- /* Xen private mappings. */
- memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
-#ifndef NDEBUG
- if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
- {
- l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
- root_pgt_pv_xen_slots];
-
- if ( l4e_get_intpte(split_l4e) )
- *next++ = split_l4e;
-
- memset(next, 0,
- _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
- }
-#else
- BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
-#endif
- l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
- l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
- if ( zap_ro_mpt || is_pv_32bit_domain(d) )
- l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
-}
-
-void pv_arch_init_memory(void)
-{
-#ifndef NDEBUG
- unsigned int i;
-
- if ( highmem_start )
- {
- unsigned long split_va = (unsigned long)__va(highmem_start);
-
- if ( split_va < HYPERVISOR_VIRT_END &&
- split_va - 1 == (unsigned long)__va(highmem_start - 1) )
- {
- root_pgt_pv_xen_slots = l4_table_offset(split_va) -
- ROOT_PAGETABLE_FIRST_XEN_SLOT;
- ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
- if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
- {
- l3_pgentry_t *l3tab = alloc_xen_pagetable();
-
- if ( l3tab )
- {
- const l3_pgentry_t *l3idle =
- l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
-
- for ( i = 0; i < l3_table_offset(split_va); ++i )
- l3tab[i] = l3idle[i];
- for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
- l3tab[i] = l3e_empty();
- split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
- __PAGE_HYPERVISOR_RW);
- }
- else
- ++root_pgt_pv_xen_slots;
- }
- }
- }
-#endif
-}
-
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/pv/mm.h b/xen/arch/x86/pv/mm.h
index 169c9e0..7502d53 100644
--- a/xen/arch/x86/pv/mm.h
+++ b/xen/arch/x86/pv/mm.h
@@ -3,9 +3,6 @@
l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn);
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
- bool zap_ro_mpt);
-
int new_guest_cr3(mfn_t mfn);
/* Read a PV guest's l1e that maps this linear address. */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 26f0153..eeac4d7 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -340,6 +340,8 @@ static inline void *__page_to_virt(const struct page_info
*pg)
int free_page_type(struct page_info *page, unsigned long type,
int preemptible);
+void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
+ bool_t zap_ro_mpt);
bool fill_ro_mpt(mfn_t mfn);
void zap_ro_mpt(mfn_t mfn);
diff --git a/xen/include/asm-x86/pv/mm.h b/xen/include/asm-x86/pv/mm.h
index 07785e0..5d2fe4c 100644
--- a/xen/include/asm-x86/pv/mm.h
+++ b/xen/include/asm-x86/pv/mm.h
@@ -30,8 +30,6 @@ void pv_destroy_gdt(struct vcpu *v);
bool pv_map_ldt_shadow_page(unsigned int off);
-void pv_arch_init_memory(void);
-
#else
#include <xen/errno.h>
@@ -51,8 +49,6 @@ static inline void pv_destroy_gdt(struct vcpu *v) {
ASSERT_UNREACHABLE(); }
static inline bool pv_map_ldt_shadow_page(unsigned int off) { return false; }
-static inline void pv_arch_init_memory(void) {}
-
#endif
#endif /* __X86_PV_MM_H__ */
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |