|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 11/16] x86/mm: put HVM only code under CONFIG_HVM
Going through the code, HAP, EPT, PoD and ALTP2M depend on HVM code.
Put these components under CONFIG_HVM. This further requires putting
one of the vm event under CONFIG_HVM.
Altp2m requires a bit more attention because its code is embedded in
generic x86 p2m code.
Also make hap_enabled evaluate to false when !CONFIG_HVM. Make sure it
evaluate its parameter to avoid unused variable warnings in its users.
Also sort items in Makefile while at it.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
xen/arch/x86/mm/Makefile | 11 ++++++-----
xen/arch/x86/mm/mem_access.c | 18 +++++++++++++++++-
xen/arch/x86/mm/mem_sharing.c | 2 ++
xen/arch/x86/mm/p2m.c | 23 ++++++++++++-----------
xen/common/vm_event.c | 2 ++
xen/include/asm-x86/altp2m.h | 13 ++++++++++++-
xen/include/asm-x86/domain.h | 2 +-
xen/include/asm-x86/hvm/domain.h | 4 ++++
xen/include/asm-x86/p2m.h | 7 ++++++-
9 files changed, 62 insertions(+), 20 deletions(-)
diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile
index 3017119813..9cbb2cfcde 100644
--- a/xen/arch/x86/mm/Makefile
+++ b/xen/arch/x86/mm/Makefile
@@ -1,15 +1,16 @@
subdir-y += shadow
-subdir-y += hap
+subdir-$(CONFIG_HVM) += hap
-obj-y += paging.o
-obj-y += p2m.o p2m-pt.o p2m-ept.o p2m-pod.o
-obj-y += altp2m.o
+obj-$(CONFIG_HVM) += altp2m.o
obj-y += guest_walk_2.o
obj-y += guest_walk_3.o
obj-y += guest_walk_4.o
+obj-y += mem_access.o
obj-y += mem_paging.o
obj-y += mem_sharing.o
-obj-y += mem_access.o
+obj-y += p2m.o p2m-pt.o
+obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
+obj-y += paging.o
guest_walk_%.o: guest_walk.c Makefile
$(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index a8b3e99ec4..84879b7faf 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -246,7 +246,6 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
/* Return whether vCPU pause is required (aka. sync event) */
return (p2ma != p2m_access_n2rwx);
}
-#endif
int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
struct p2m_domain *ap2m, p2m_access_t a,
@@ -288,6 +287,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct
p2m_domain *hp2m,
return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a,
current->domain != d);
}
+#endif
static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
struct p2m_domain *ap2m, p2m_access_t a,
@@ -295,6 +295,7 @@ static int set_mem_access(struct domain *d, struct
p2m_domain *p2m,
{
int rc = 0;
+#ifdef CONFIG_HVM
if ( ap2m )
{
rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
@@ -303,6 +304,9 @@ static int set_mem_access(struct domain *d, struct
p2m_domain *p2m,
rc = 0;
}
else
+#else
+ ASSERT(!ap2m);
+#endif
{
mfn_t mfn;
p2m_access_t _a;
@@ -364,6 +368,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
long rc = 0;
/* altp2m view 0 is treated as the hostp2m */
+#ifdef CONFIG_HVM
if ( altp2m_idx )
{
if ( altp2m_idx >= MAX_ALTP2M ||
@@ -372,6 +377,9 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
ap2m = d->arch.altp2m_p2m[altp2m_idx];
}
+#else
+ ASSERT(!altp2m_idx);
+#endif
if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
return -EINVAL;
@@ -419,6 +427,7 @@ long p2m_set_mem_access_multi(struct domain *d,
long rc = 0;
/* altp2m view 0 is treated as the hostp2m */
+#ifdef CONFIG_HVM
if ( altp2m_idx )
{
if ( altp2m_idx >= MAX_ALTP2M ||
@@ -427,6 +436,9 @@ long p2m_set_mem_access_multi(struct domain *d,
ap2m = d->arch.altp2m_p2m[altp2m_idx];
}
+#else
+ ASSERT(!altp2m_idx);
+#endif
p2m_lock(p2m);
if ( ap2m )
@@ -480,12 +492,15 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn,
xenmem_access_t *access)
void arch_p2m_set_access_required(struct domain *d, bool access_required)
{
+#ifdef CONFIG_HVM
unsigned int i;
+#endif
ASSERT(atomic_read(&d->pause_count));
p2m_get_hostp2m(d)->access_required = access_required;
+#ifdef CONFIG_HVM
if ( !altp2m_active(d) )
return;
@@ -496,6 +511,7 @@ void arch_p2m_set_access_required(struct domain *d, bool
access_required)
if ( p2m )
p2m->access_required = access_required;
}
+#endif
}
/*
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index d04f9c79b3..349e6fd2cf 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -802,6 +802,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
if ( !p2m_is_sharable(p2mt) )
goto out;
+#ifdef CONFIG_HVM
/* Check if there are mem_access/remapped altp2m entries for this page */
if ( altp2m_active(d) )
{
@@ -829,6 +830,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
altp2m_list_unlock(d);
}
+#endif
/* Try to convert the mfn to the sharable type */
page = mfn_to_page(mfn);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 7a12cd37e8..027202f39e 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -187,7 +187,6 @@ static int p2m_init_nestedp2m(struct domain *d)
return 0;
}
-#endif
static void p2m_teardown_altp2m(struct domain *d)
{
@@ -226,6 +225,7 @@ static int p2m_init_altp2m(struct domain *d)
return 0;
}
+#endif
int p2m_init(struct domain *d)
{
@@ -245,16 +245,14 @@ int p2m_init(struct domain *d)
p2m_teardown_hostp2m(d);
return rc;
}
-#endif
rc = p2m_init_altp2m(d);
if ( rc )
{
p2m_teardown_hostp2m(d);
-#ifdef CONFIG_HVM
p2m_teardown_nestedp2m(d);
-#endif
}
+#endif
return rc;
}
@@ -700,12 +698,12 @@ void p2m_teardown(struct p2m_domain *p2m)
void p2m_final_teardown(struct domain *d)
{
+#ifdef CONFIG_HVM
/*
* We must teardown both of them unconditionally because
* we initialise them unconditionally.
*/
p2m_teardown_altp2m(d);
-#ifdef CONFIG_HVM
p2m_teardown_nestedp2m(d);
#endif
@@ -1727,12 +1725,6 @@ void p2m_mem_paging_resume(struct domain *d,
vm_event_response_t *rsp)
}
}
-void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
-{
- if ( altp2m_active(v->domain) )
- p2m_switch_vcpu_altp2m_by_id(v, idx);
-}
-
#ifdef CONFIG_HVM
static struct p2m_domain *
p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
@@ -2182,6 +2174,14 @@ int unmap_mmio_regions(struct domain *d,
return i == nr ? 0 : i ?: ret;
}
+#ifdef CONFIG_HVM
+
+void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
+{
+ if ( altp2m_active(v->domain) )
+ p2m_switch_vcpu_altp2m_by_id(v, idx);
+}
+
bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
{
struct domain *d = v->domain;
@@ -2559,6 +2559,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t
gfn,
return ret;
}
+#endif /* CONFIG_HVM */
/*** Audit ***/
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index a3bbfc9474..12293c1588 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -429,9 +429,11 @@ void vm_event_resume(struct domain *d, struct
vm_event_domain *ved)
*/
vm_event_toggle_singlestep(d, v, &rsp);
+#ifdef CONFIG_HVM
/* Check for altp2m switch */
if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
p2m_altp2m_check(v, rsp.altp2m_idx);
+#endif
if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
vm_event_set_registers(v, &rsp);
diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
index 64c761873e..41fdd828a2 100644
--- a/xen/include/asm-x86/altp2m.h
+++ b/xen/include/asm-x86/altp2m.h
@@ -18,12 +18,14 @@
#ifndef __ASM_X86_ALTP2M_H
#define __ASM_X86_ALTP2M_H
+#ifdef CONFIG_HVM
+
#include <xen/types.h>
#include <xen/sched.h> /* for struct vcpu, struct domain */
#include <asm/hvm/vcpu.h> /* for vcpu_altp2m */
/* Alternate p2m HVM on/off per domain */
-static inline bool_t altp2m_active(const struct domain *d)
+static inline bool altp2m_active(const struct domain *d)
{
return d->arch.altp2m_active;
}
@@ -37,5 +39,14 @@ static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
{
return vcpu_altp2m(v).p2midx;
}
+#else
+
+static inline bool altp2m_active(const struct domain *d)
+{
+ return false;
+}
+
+uint16_t altp2m_vcpu_idx(const struct vcpu *v);
+#endif
#endif /* __ASM_X86_ALTP2M_H */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index b46cfb0ce4..cb0721e9d5 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -337,13 +337,13 @@ struct arch_domain
/* nestedhvm: translate l2 guest physical to host physical */
struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
mm_lock_t nested_p2m_lock;
-#endif
/* altp2m: allow multiple copies of host p2m */
bool_t altp2m_active;
struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
mm_lock_t altp2m_list_lock;
uint64_t *altp2m_eptp;
+#endif
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
struct radix_tree_root irq_pirq;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 7388cd895e..6b81f10074 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -195,7 +195,11 @@ struct hvm_domain {
};
};
+#ifdef CONFIG_HVM
#define hap_enabled(d) ((d)->arch.hvm.hap_enabled)
+#else
+#define hap_enabled(d) ({(void)(d); false;})
+#endif
#endif /* __ASM_X86_HVM_DOMAIN_H__ */
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 20cf3f1a25..5b5faf37f0 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -231,8 +231,10 @@ struct p2m_domain {
* host p2m's lock. */
int defer_nested_flush;
+#ifdef CONFIG_HVM
/* Alternate p2m: count of vcpu's currently using this p2m. */
atomic_t active_vcpus;
+#endif
/* Pages used to construct the p2m */
struct page_list_head pages;
@@ -823,7 +825,7 @@ void nestedp2m_write_p2m_entry(struct p2m_domain *p2m,
unsigned long gfn,
/*
* Alternate p2m: shadow p2m tables used for alternate memory views
*/
-
+#ifdef CONFIG_HVM
/* get current alternate p2m table */
static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
{
@@ -870,6 +872,9 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int
idx,
int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
mfn_t mfn, unsigned int page_order,
p2m_type_t p2mt, p2m_access_t p2ma);
+#else
+struct p2m_domain *p2m_get_altp2m(struct vcpu *v);
+#endif
/*
* p2m type to IOMMU flags
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |