[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/5] x86/mm: allow for building without shadow mode support



Considering the complexity of the code, it seems to be a reasonable
thing to allow people to disable that code entirely even outside the
immediate need for this by the next patch.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Set mode table in shadow_vcpu_init() stub. Convert BUG()/BUG_ON()
    to ASSERT()/ASSERT_UNREACHABLE() and make various of the stub
    functions macros or inline. Hide opt_dom0_shadow when
    !CONFIG_SHADOW_PAGING. Adjust Makefile modification.

--- a/xen/arch/x86/Rules.mk
+++ b/xen/arch/x86/Rules.mk
@@ -32,9 +32,13 @@ x86 := y
 x86_32 := n
 x86_64 := y
 
+shadow-paging ?= y
+
 CFLAGS += -mno-red-zone -mno-sse -fpic
 CFLAGS += -fno-asynchronous-unwind-tables
 # -fvisibility=hidden reduces -fpic cost, if it's available
 ifneq ($(call cc-option,$(CC),-fvisibility=hidden,n),n)
 CFLAGS += -DGCC_HAS_VISIBILITY_ATTRIBUTE
 endif
+
+CFLAGS-$(shadow-paging) += -DCONFIG_SHADOW_PAGING
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -128,8 +128,10 @@ struct vcpu *__init alloc_dom0_vcpu0(str
     return alloc_vcpu(dom0, 0, 0);
 }
 
+#ifdef CONFIG_SHADOW_PAGING
 static bool_t __initdata opt_dom0_shadow;
 boolean_param("dom0_shadow", opt_dom0_shadow);
+#endif
 
 static char __initdata opt_dom0_ioports_disable[200] = "";
 string_param("dom0_ioports_disable", opt_dom0_ioports_disable);
@@ -1399,6 +1401,7 @@ int __init construct_dom0(
     regs->esi = vstartinfo_start;
     regs->eflags = X86_EFLAGS_IF;
 
+#ifdef CONFIG_SHADOW_PAGING
     if ( opt_dom0_shadow )
     {
         if ( is_pvh_domain(d) )
@@ -1409,6 +1412,7 @@ int __init construct_dom0(
         if ( paging_enable(d, PG_SH_enable) == 0 ) 
             paging_update_paging_modes(v);
     }
+#endif
 
     /*
      * PVH Fixme: XENFEAT_supervisor_mode_kernel has been reused in PVH with a
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -635,16 +635,16 @@ int paging_domain_init(struct domain *d,
      * don't want to leak any active log-dirty bitmaps */
     d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
 
-    /* The order of the *_init calls below is important, as the later
-     * ones may rewrite some common fields.  Shadow pagetables are the
-     * default... */
-    shadow_domain_init(d, domcr_flags);
-
-    /* ... but we will use hardware assistance if it's available. */
+    /*
+     * Shadow pagetables are the default, but we will use
+     * hardware assistance if it's available and enabled.
+     */
     if ( hap_enabled(d) )
         hap_domain_init(d);
+    else
+        rc = shadow_domain_init(d, domcr_flags);
 
-    return 0;
+    return rc;
 }
 
 /* vcpu paging struct initialization goes here */
@@ -822,12 +822,16 @@ int paging_enable(struct domain *d, u32 
  * and therefore its pagetables will soon be discarded */
 void pagetable_dying(struct domain *d, paddr_t gpa)
 {
+#ifdef CONFIG_SHADOW_PAGING
     struct vcpu *v;
 
     ASSERT(paging_mode_shadow(d));
 
     v = d->vcpu[0];
     v->arch.paging.mode->shadow.pagetable_dying(v, gpa);
+#else
+    BUG();
+#endif
 }
 
 /* Print paging-assistance info to the console */
--- a/xen/arch/x86/mm/shadow/Makefile
+++ b/xen/arch/x86/mm/shadow/Makefile
@@ -1,4 +1,8 @@
-obj-$(x86_64) += common.o guest_2.o guest_3.o guest_4.o
+ifeq ($(shadow-paging),y)
+obj-y += common.o guest_2.o guest_3.o guest_4.o
+else
+obj-y += none.o
+endif
 
 guest_%.o: multi.c Makefile
        $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -47,7 +47,7 @@ static void sh_clean_dirty_bitmap(struct
 
 /* Set up the shadow-specific parts of a domain struct at start of day.
  * Called for every domain from arch_domain_create() */
-void shadow_domain_init(struct domain *d, unsigned int domcr_flags)
+int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
 {
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
@@ -61,6 +61,8 @@ void shadow_domain_init(struct domain *d
     d->arch.paging.shadow.oos_off = (domcr_flags & DOMCRF_oos_off) ?  1 : 0;
 #endif
     d->arch.paging.shadow.pagetable_dying_op = 0;
+
+    return 0;
 }
 
 /* Setup the shadow-specfic parts of a vcpu struct. Note: The most important
--- /dev/null
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -0,0 +1,78 @@
+#include <xen/mm.h>
+#include <asm/shadow.h>
+
+static int _enable_log_dirty(struct domain *d, bool_t log_global)
+{
+    ASSERT(is_pv_domain(d));
+    return -EOPNOTSUPP;
+}
+
+static int _disable_log_dirty(struct domain *d)
+{
+    ASSERT(is_pv_domain(d));
+    return -EOPNOTSUPP;
+}
+
+static void _clean_dirty_bitmap(struct domain *d)
+{
+    ASSERT(is_pv_domain(d));
+}
+
+int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
+{
+    paging_log_dirty_init(d, _enable_log_dirty,
+                          _disable_log_dirty, _clean_dirty_bitmap);
+    return is_pv_domain(d) ? 0 : -EOPNOTSUPP;
+}
+
+static int _page_fault(struct vcpu *v, unsigned long va,
+                       struct cpu_user_regs *regs)
+{
+    ASSERT_UNREACHABLE();
+    return 0;
+}
+
+static int _invlpg(struct vcpu *v, unsigned long va)
+{
+    ASSERT_UNREACHABLE();
+    return -EOPNOTSUPP;
+}
+
+static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
+                                 unsigned long va, uint32_t *pfec)
+{
+    ASSERT_UNREACHABLE();
+    return INVALID_GFN;
+}
+
+static void _update_cr3(struct vcpu *v, int do_locking)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static void _update_paging_modes(struct vcpu *v)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static void _write_p2m_entry(struct domain *d, unsigned long gfn,
+                             l1_pgentry_t *p, l1_pgentry_t new,
+                             unsigned int level)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static const struct paging_mode sh_paging_none = {
+    .page_fault                    = _page_fault,
+    .invlpg                        = _invlpg,
+    .gva_to_gfn                    = _gva_to_gfn,
+    .update_cr3                    = _update_cr3,
+    .update_paging_modes           = _update_paging_modes,
+    .write_p2m_entry               = _write_p2m_entry,
+};
+
+void shadow_vcpu_init(struct vcpu *v)
+{
+    ASSERT(is_pv_domain(v->domain));
+    v->arch.paging.mode = &sh_paging_none;
+}
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -87,6 +87,7 @@ void hypercall_page_initialise(struct do
 /*          shadow paging extension             */
 /************************************************/
 struct shadow_domain {
+#ifdef CONFIG_SHADOW_PAGING
     unsigned int      opt_flags;    /* runtime tunable optimizations on/off */
     struct page_list_head pinned_shadows;
 
@@ -116,9 +117,11 @@ struct shadow_domain {
 
     /* Has this domain ever used HVMOP_pagetable_dying? */
     bool_t pagetable_dying_op;
+#endif
 };
 
 struct shadow_vcpu {
+#ifdef CONFIG_SHADOW_PAGING
     /* PAE guests: per-vcpu shadow top-level table */
     l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
     /* PAE guests: per-vcpu cache of the top-level *guest* entries */
@@ -144,6 +147,7 @@ struct shadow_vcpu {
     } oos_fixup[SHADOW_OOS_PAGES];
 
     bool_t pagetable_dying;
+#endif
 };
 
 /************************************************/
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -39,7 +39,11 @@
 #define PG_SH_shift    20
 #define PG_HAP_shift   21
 /* We're in one of the shadow modes */
+#ifdef CONFIG_SHADOW_PAGING
 #define PG_SH_enable   (1U << PG_SH_shift)
+#else
+#define PG_SH_enable   0
+#endif
 #define PG_HAP_enable  (1U << PG_HAP_shift)
 
 /* common paging mode bits */
@@ -74,6 +78,7 @@
 
 struct sh_emulate_ctxt;
 struct shadow_paging_mode {
+#ifdef CONFIG_SHADOW_PAGING
     void          (*detach_old_tables     )(struct vcpu *v);
     int           (*x86_emulate_write     )(struct vcpu *v, unsigned long va,
                                             void *src, u32 bytes,
@@ -88,6 +93,7 @@ struct shadow_paging_mode {
     int           (*guess_wrmap           )(struct vcpu *v, 
                                             unsigned long vaddr, mfn_t gmfn);
     void          (*pagetable_dying       )(struct vcpu *v, paddr_t gpa);
+#endif
     /* For outsiders to tell what mode we're in */
     unsigned int shadow_levels;
 };
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -49,12 +49,14 @@
 
 /* Set up the shadow-specific parts of a domain struct at start of day.
  * Called from paging_domain_init(). */
-void shadow_domain_init(struct domain *d, unsigned int domcr_flags);
+int shadow_domain_init(struct domain *d, unsigned int domcr_flags);
 
 /* Setup the shadow-specific parts of a vcpu struct. It is called by
  * paging_vcpu_init() in paging.c */
 void shadow_vcpu_init(struct vcpu *v);
 
+#ifdef CONFIG_SHADOW_PAGING
+
 /* Enable an arbitrary shadow mode.  Call once at domain creation. */
 int shadow_enable(struct domain *d, u32 mode);
 
@@ -77,17 +79,40 @@ void shadow_teardown(struct domain *d);
 /* Call once all of the references to the domain have gone away */
 void shadow_final_teardown(struct domain *d);
 
-/* Remove all shadows of the guest mfn. */
 void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all);
+
+/* Discard _all_ mappings from the domain's shadows. */
+void shadow_blow_tables_per_domain(struct domain *d);
+
+#else /* !CONFIG_SHADOW_PAGING */
+
+#define shadow_teardown(d) ASSERT(is_pv_domain(d))
+#define shadow_final_teardown(d) ASSERT(is_pv_domain(d))
+#define shadow_enable(d, mode) \
+    ({ ASSERT(is_pv_domain(d)); -EOPNOTSUPP; })
+#define shadow_track_dirty_vram(d, begin_pfn, nr, bitmap) \
+    ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
+
+static inline void sh_remove_shadows(struct vcpu *v, mfn_t gmfn,
+                                     bool_t fast, bool_t all) {}
+
+static inline void shadow_blow_tables_per_domain(struct domain *d) {}
+
+static inline int shadow_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
+                                XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+{
+    return -EINVAL;
+}
+
+#endif /* CONFIG_SHADOW_PAGING */
+
+/* Remove all shadows of the guest mfn. */
 static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn)
 {
     /* See the comment about locking in sh_remove_shadows */
     sh_remove_shadows(v, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
 }
 
-/* Discard _all_ mappings from the domain's shadows. */
-void shadow_blow_tables_per_domain(struct domain *d);
-
 #endif /* _XEN_SHADOW_H */
 
 /*
--- a/xen/include/xen/paging.h
+++ b/xen/include/xen/paging.h
@@ -7,7 +7,7 @@
 #include <asm/paging.h>
 #include <asm/p2m.h>
 
-#elif defined CONFIG_SHADOW
+#elif defined CONFIG_SHADOW_PAGING
 
 #include <asm/shadow.h>
 


Attachment: x86-no-shadow.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.