|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 04/25] xen: consolidate CONFIG_VM_EVENT
File hvm/vm_event.c and x86/vm_event.c are the extend to vm_event handling
routines, and its compilation shall be guarded by CONFIG_VM_EVENT too.
Futhermore, features about monitor_op and memory access are both based on
vm event subsystem, so monitor.o/mem_access.o shall be wrapped under
CONFIG_VM_EVENT.
Although CONFIG_VM_EVENT is forcibly enabled on x86, we could disable it
through disabling CONFIG_DOMCTL in the future.
In consequence, a few functions, like the ones defined in hvm/monitor.h,
needs stub to pass compilation when CONFIG_VM_EVENT=n.
Remove the CONFIG_VM_EVENT wrapper for "#include <asm/mem_access.h>", as
we need declaration there to pass compilation when CONFIG_VM_EVENT=n
The following functions are developed on the basis of vm event framework, or
only invoked by vm_event.c/monitor.c/mem_access.c, so they all shall be
wrapped with CONFIG_VM_EVENT:
- hvm_toggle_singlestep
- hvm_fast_singlestep
- p2m_mem_paging_drop_page
- p2m_mem_paging_populate_page
- p2m_mem_paging_resume
- hvm_enable_msr_interception
- hvm_function_table.enable_msr_interception
- hvm_has_set_descriptor_access_existing
- hvm_function_table.set_descriptor_access_existing
- xsm_vm_event_control
Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
xen/arch/ppc/stubs.c | 2 +
xen/arch/x86/Makefile | 2 +-
xen/arch/x86/hvm/Makefile | 4 +-
xen/arch/x86/hvm/hvm.c | 2 +
xen/arch/x86/hvm/svm/svm.c | 8 +++
xen/arch/x86/hvm/vmx/vmx.c | 10 ++++
xen/arch/x86/include/asm/hvm/hvm.h | 10 ++++
xen/arch/x86/include/asm/hvm/monitor.h | 65 ++++++++++++++++++++++++-
xen/arch/x86/include/asm/hvm/vm_event.h | 4 ++
xen/arch/x86/include/asm/mem_access.h | 9 ++++
xen/arch/x86/include/asm/monitor.h | 15 ++++++
xen/arch/x86/include/asm/p2m.h | 6 +++
xen/arch/x86/mm/mem_paging.c | 2 +
xen/include/xen/mem_access.h | 36 ++++++++++++--
xen/include/xen/monitor.h | 8 ++-
xen/include/xen/vm_event.h | 24 ++++++++-
xen/include/xsm/xsm.h | 4 +-
xen/xsm/dummy.c | 2 +-
xen/xsm/flask/hooks.c | 4 +-
19 files changed, 200 insertions(+), 17 deletions(-)
diff --git a/xen/arch/ppc/stubs.c b/xen/arch/ppc/stubs.c
index 671e71aa0a..4679cf9360 100644
--- a/xen/arch/ppc/stubs.c
+++ b/xen/arch/ppc/stubs.c
@@ -60,6 +60,7 @@ void vcpu_show_execution_state(struct vcpu *v)
BUG_ON("unimplemented");
}
+#ifdef CONFIG_VM_EVENT
/* vm_event.c */
void vm_event_fill_regs(vm_event_request_t *req)
@@ -76,6 +77,7 @@ void vm_event_monitor_next_interrupt(struct vcpu *v)
{
/* Not supported on PPC. */
}
+#endif /* CONFIG_VM_EVENT */
/* domctl.c */
void arch_get_domain_info(const struct domain *d,
diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index 7676d7cdd8..17f0e1dbc9 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -76,7 +76,7 @@ obj-y += usercopy.o
obj-y += x86_emulate.o
obj-$(CONFIG_TBOOT) += tboot.o
obj-y += hpet.o
-obj-y += vm_event.o
+obj-$(CONFIG_VM_EVENT) += vm_event.o
obj-y += xstate.o
ifneq ($(CONFIG_PV_SHIM_EXCLUSIVE),y)
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index 6ec2c8f2db..952db00dd7 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -16,7 +16,7 @@ obj-y += io.o
obj-y += ioreq.o
obj-y += irq.o
obj-y += mmio.o
-obj-y += monitor.o
+obj-$(CONFIG_VM_EVENT) += monitor.o
obj-y += mtrr.o
obj-y += nestedhvm.o
obj-y += pmtimer.o
@@ -26,7 +26,7 @@ obj-y += save.o
obj-y += stdvga.o
obj-y += vioapic.o
obj-y += vlapic.o
-obj-y += vm_event.o
+obj-$(CONFIG_VM_EVENT) += vm_event.o
obj-y += vmsi.o
obj-y += vpic.o
obj-y += vpt.o
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ea97815deb..f464f401aa 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5251,6 +5251,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
return rc;
}
+#ifdef CONFIG_VM_EVENT
void hvm_toggle_singlestep(struct vcpu *v)
{
ASSERT(atomic_read(&v->pause_count));
@@ -5275,6 +5276,7 @@ void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx)
v->arch.hvm.fast_single_step.enabled = true;
v->arch.hvm.fast_single_step.p2midx = p2midx;
}
+#endif /* CONFIG_VM_EVENT */
/*
* Segment caches in VMCB/VMCS are inconsistent about which bits are checked,
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index fce750ca1f..d71f499986 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -297,6 +297,7 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int
flags)
__clear_bit(msr * 2 + 1, msr_bit);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t
msr)
{
struct vcpu *v;
@@ -304,6 +305,7 @@ static void cf_check svm_enable_msr_interception(struct
domain *d, uint32_t msr)
for_each_vcpu ( d, v )
svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);
}
+#endif /* CONFIG_VM_EVENT */
static void svm_save_dr(struct vcpu *v)
{
@@ -824,6 +826,7 @@ static void cf_check svm_set_rdtsc_exiting(struct vcpu *v,
bool enable)
vmcb_set_general2_intercepts(vmcb, general2_intercepts);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check svm_set_descriptor_access_exiting(
struct vcpu *v, bool enable)
{
@@ -841,6 +844,7 @@ static void cf_check svm_set_descriptor_access_exiting(
vmcb_set_general1_intercepts(vmcb, general1_intercepts);
}
+#endif /* CONFIG_VM_EVENT */
static unsigned int cf_check svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
{
@@ -2455,9 +2459,13 @@ static struct hvm_function_table __initdata_cf_clobber
svm_function_table = {
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
.msr_read_intercept = svm_msr_read_intercept,
.msr_write_intercept = svm_msr_write_intercept,
+#ifdef CONFIG_VM_EVENT
.enable_msr_interception = svm_enable_msr_interception,
+#endif
.set_rdtsc_exiting = svm_set_rdtsc_exiting,
+#ifdef CONFIG_VM_EVENT
.set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
+#endif
.get_insn_bytes = svm_get_insn_bytes,
.nhvm_vcpu_initialise = nsvm_vcpu_initialise,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index cb82d52ef0..9e4ad864fa 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1515,6 +1515,7 @@ static void cf_check vmx_set_rdtsc_exiting(struct vcpu
*v, bool enable)
vmx_vmcs_exit(v);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check vmx_set_descriptor_access_exiting(
struct vcpu *v, bool enable)
{
@@ -1529,6 +1530,7 @@ static void cf_check vmx_set_descriptor_access_exiting(
vmx_update_secondary_exec_control(v);
vmx_vmcs_exit(v);
}
+#endif /* CONFIG_VM_EVENT */
static void cf_check vmx_init_hypercall_page(void *p)
{
@@ -2408,6 +2410,7 @@ static void cf_check vmx_handle_eoi(uint8_t vector, int
isr)
printk_once(XENLOG_WARNING "EOI for %02x but SVI=%02x\n", vector,
old_svi);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check vmx_enable_msr_interception(struct domain *d, uint32_t
msr)
{
struct vcpu *v;
@@ -2415,6 +2418,7 @@ static void cf_check vmx_enable_msr_interception(struct
domain *d, uint32_t msr)
for_each_vcpu ( d, v )
vmx_set_msr_intercept(v, msr, VMX_MSR_W);
}
+#endif /* CONFIG_VM_EVENT */
static void cf_check vmx_vcpu_update_eptp(struct vcpu *v)
{
@@ -2862,7 +2866,9 @@ static struct hvm_function_table __initdata_cf_clobber
vmx_function_table = {
.nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
.update_vlapic_mode = vmx_vlapic_msr_changed,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
+#ifdef CONFIG_VM_EVENT
.enable_msr_interception = vmx_enable_msr_interception,
+#endif
.altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
.altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
.altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve,
@@ -3068,9 +3074,11 @@ const struct hvm_function_table * __init start_vmx(void)
vmx_function_table.caps.singlestep = cpu_has_monitor_trap_flag;
+#ifdef CONFIG_VM_EVENT
if ( cpu_has_vmx_dt_exiting )
vmx_function_table.set_descriptor_access_exiting =
vmx_set_descriptor_access_exiting;
+#endif
/*
* Do not enable EPT when (!cpu_has_vmx_pat), to prevent security hole
@@ -3141,8 +3149,10 @@ void __init vmx_fill_funcs(void)
if ( !cpu_has_xen_ibt )
return;
+#ifdef CONFIG_VM_EVENT
vmx_function_table.set_descriptor_access_exiting =
vmx_set_descriptor_access_exiting;
+#endif
vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
vmx_function_table.process_isr = vmx_process_isr;
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index bf8bc2e100..bd53fcf155 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -191,7 +191,9 @@ struct hvm_function_table {
void (*handle_cd)(struct vcpu *v, unsigned long value);
void (*set_info_guest)(struct vcpu *v);
void (*set_rdtsc_exiting)(struct vcpu *v, bool enable);
+#ifdef CONFIG_VM_EVENT
void (*set_descriptor_access_exiting)(struct vcpu *v, bool enable);
+#endif
/* Nested HVM */
int (*nhvm_vcpu_initialise)(struct vcpu *v);
@@ -223,7 +225,9 @@ struct hvm_function_table {
paddr_t *L1_gpa, unsigned int *page_order,
uint8_t *p2m_acc, struct npfec npfec);
+#ifdef CONFIG_VM_EVENT
void (*enable_msr_interception)(struct domain *d, uint32_t msr);
+#endif
/* Alternate p2m */
void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
@@ -430,10 +434,12 @@ static inline bool using_svm(void)
#define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA))
+#ifdef CONFIG_VM_EVENT
static inline bool hvm_has_set_descriptor_access_exiting(void)
{
return hvm_funcs.set_descriptor_access_exiting;
}
+#endif /* CONFIG_VM_EVENT */
static inline void hvm_domain_creation_finished(struct domain *d)
{
@@ -680,10 +686,12 @@ static inline int nhvm_hap_walk_L1_p2m(
v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
}
+#ifdef CONFIG_VM_EVENT
static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
{
alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
}
+#endif /* CONFIG_VM_EVENT */
static inline bool hvm_is_singlestep_supported(void)
{
@@ -923,10 +931,12 @@ static inline void hvm_inject_hw_exception(unsigned int
vector, int errcode)
ASSERT_UNREACHABLE();
}
+#ifdef CONFIG_VM_EVENT
static inline bool hvm_has_set_descriptor_access_exiting(void)
{
return false;
}
+#endif /* CONFIG_VM_EVENT */
static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
{
diff --git a/xen/arch/x86/include/asm/hvm/monitor.h
b/xen/arch/x86/include/asm/hvm/monitor.h
index 02021be47b..561ca2e585 100644
--- a/xen/arch/x86/include/asm/hvm/monitor.h
+++ b/xen/arch/x86/include/asm/hvm/monitor.h
@@ -17,14 +17,16 @@ enum hvm_monitor_debug_type
HVM_MONITOR_DEBUG_EXCEPTION,
};
+#define hvm_monitor_crX(cr, new, old) \
+ hvm_monitor_cr(VM_EVENT_X86_##cr, new, old)
+
+#ifdef CONFIG_VM_EVENT
/*
* Called for current VCPU on crX/MSR changes by guest. Bool return signals
* whether emulation should be postponed.
*/
bool hvm_monitor_cr(unsigned int index, unsigned long value,
unsigned long old);
-#define hvm_monitor_crX(cr, new, old) \
- hvm_monitor_cr(VM_EVENT_X86_##cr, new, old)
bool hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t old_value);
void hvm_monitor_descriptor_access(uint64_t exit_info,
uint64_t vmx_exit_qualification,
@@ -45,6 +47,65 @@ int hvm_monitor_vmexit(unsigned long exit_reason,
int hvm_monitor_io(unsigned int port, unsigned int bytes,
bool in, bool str);
+#else
+static inline bool hvm_monitor_cr(unsigned int index, unsigned long value,
+ unsigned long old)
+{
+ return false;
+}
+
+static inline bool hvm_monitor_msr(unsigned int msr, uint64_t new_value,
+ uint64_t old_value)
+{
+ return false;
+}
+
+static inline void hvm_monitor_descriptor_access(uint64_t exit_info,
+ uint64_t vmx_exit_qualification,
+ uint8_t descriptor, bool is_write) {}
+
+static inline int hvm_monitor_debug(unsigned long rip,
+ enum hvm_monitor_debug_type type,
+ unsigned int trap_type,
+ unsigned int insn_length,
+ unsigned int pending_dbg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hvm_monitor_cpuid(unsigned long insn_length,
+ unsigned int leaf, unsigned int subleaf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void hvm_monitor_interrupt(unsigned int vector,
+ unsigned int type,
+ unsigned int err, uint64_t cr2) {}
+
+static inline bool hvm_monitor_emul_unimplemented(void)
+{
+ return false;
+}
+
+static inline bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn,
+ uint32_t pfec, uint16_t kind)
+{
+ return false;
+}
+
+static inline int hvm_monitor_vmexit(unsigned long exit_reason,
+ unsigned long exit_qualification)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hvm_monitor_io(unsigned int port, unsigned int bytes,
+ bool in, bool str)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_VM_EVENT */
#endif /* __ASM_X86_HVM_MONITOR_H__ */
diff --git a/xen/arch/x86/include/asm/hvm/vm_event.h
b/xen/arch/x86/include/asm/hvm/vm_event.h
index 506a85c774..1628230182 100644
--- a/xen/arch/x86/include/asm/hvm/vm_event.h
+++ b/xen/arch/x86/include/asm/hvm/vm_event.h
@@ -8,7 +8,11 @@
#ifndef __ASM_X86_HVM_VM_EVENT_H__
#define __ASM_X86_HVM_VM_EVENT_H__
+#ifdef CONFIG_VM_EVENT
void hvm_vm_event_do_resume(struct vcpu *v);
+#else
+static inline void hvm_vm_event_do_resume(struct vcpu *v) {}
+#endif /* CONFIG_VM_EVENT */
#endif /* __ASM_X86_HVM_VM_EVENT_H__ */
diff --git a/xen/arch/x86/include/asm/mem_access.h
b/xen/arch/x86/include/asm/mem_access.h
index 1a52a10322..c786116310 100644
--- a/xen/arch/x86/include/asm/mem_access.h
+++ b/xen/arch/x86/include/asm/mem_access.h
@@ -14,6 +14,7 @@
#ifndef __ASM_X86_MEM_ACCESS_H__
#define __ASM_X86_MEM_ACCESS_H__
+#ifdef CONFIG_VM_EVENT
/*
* Setup vm_event request based on the access (gla is -1ull if not available).
* Handles the rw2rx conversion. Boolean return value indicates if event type
@@ -25,6 +26,14 @@
bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
struct npfec npfec,
struct vm_event_st **req_ptr);
+#else
+static inline bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
+ struct npfec npfec,
+ struct vm_event_st **req_ptr)
+{
+ return false;
+}
+#endif /* CONFIG_VM_EVENT */
/* Check for emulation and mark vcpu for skipping one instruction
* upon rescheduling if required. */
diff --git a/xen/arch/x86/include/asm/monitor.h
b/xen/arch/x86/include/asm/monitor.h
index 3c64d8258f..815de8fda2 100644
--- a/xen/arch/x86/include/asm/monitor.h
+++ b/xen/arch/x86/include/asm/monitor.h
@@ -71,6 +71,7 @@ int arch_monitor_domctl_op(struct domain *d, struct
xen_domctl_monitor_op *mop)
return rc;
}
+#ifdef CONFIG_VM_EVENT
static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
{
uint32_t capabilities = 0;
@@ -102,6 +103,13 @@ static inline uint32_t
arch_monitor_get_capabilities(struct domain *d)
return capabilities;
}
+#else
+static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
+{
+ ASSERT_UNREACHABLE();
+ return 0;
+}
+#endif /* CONFIG_VM_EVENT */
int arch_monitor_domctl_event(struct domain *d,
struct xen_domctl_monitor_op *mop);
@@ -123,7 +131,14 @@ static inline void arch_monitor_cleanup_domain(struct
domain *d) {}
#endif
+#ifdef CONFIG_VM_EVENT
bool monitored_msr(const struct domain *d, u32 msr);
+#else
+static inline bool monitored_msr(const struct domain *d, u32 msr)
+{
+ return false;
+}
+#endif
bool monitored_msr_onchangeonly(const struct domain *d, u32 msr);
#endif /* __ASM_X86_MONITOR_H__ */
diff --git a/xen/arch/x86/include/asm/p2m.h b/xen/arch/x86/include/asm/p2m.h
index 58b56e575e..9c92f67443 100644
--- a/xen/arch/x86/include/asm/p2m.h
+++ b/xen/arch/x86/include/asm/p2m.h
@@ -775,10 +775,16 @@ static inline int relinquish_p2m_mapping(struct domain *d)
/* Modify p2m table for shared gfn */
int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
+#ifdef CONFIG_VM_EVENT
/* Tell xenpaging to drop a paged out frame */
void p2m_mem_paging_drop_page(struct domain *d, gfn_t gfn, p2m_type_t p2mt);
/* Start populating a paged out frame */
void p2m_mem_paging_populate(struct domain *d, gfn_t gfn);
+#else
+static inline void p2m_mem_paging_drop_page(struct domain *d, gfn_t gfn,
+ p2m_type_t p2mt) {}
+static inline void p2m_mem_paging_populate(struct domain *d, gfn_t gfn) {}
+#endif /* CONFIG_VM_EVENT */
/* Resume normal operation (in case a domain was paused) */
struct vm_event_st;
void p2m_mem_paging_resume(struct domain *d, struct vm_event_st *rsp);
diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index ac8d34ffa0..636c0533b3 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -15,6 +15,7 @@
#include "mm-locks.h"
+#ifdef CONFIG_VM_EVENT
/*
* p2m_mem_paging_drop_page - Tell pager to drop its reference to a paged page
* @d: guest domain
@@ -186,6 +187,7 @@ void p2m_mem_paging_resume(struct domain *d,
vm_event_response_t *rsp)
gfn_unlock(p2m, gfn, 0);
}
}
+#endif /* CONFIG_VM_EVENT */
/*
* nominate - Mark a guest page as to-be-paged-out
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index 4de651038d..e9aaa7b321 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -33,9 +33,7 @@
*/
struct vm_event_st;
-#ifdef CONFIG_VM_EVENT
#include <asm/mem_access.h>
-#endif
/*
* Additional access types, which are used to further restrict
@@ -73,6 +71,7 @@ typedef enum {
/* NOTE: Assumed to be only 4 bits right now on x86. */
} p2m_access_t;
+#ifdef CONFIG_VM_EVENT
struct p2m_domain;
bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
xenmem_access_t xaccess,
@@ -99,10 +98,41 @@ long p2m_set_mem_access_multi(struct domain *d,
int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
unsigned int altp2m_idx);
-#ifdef CONFIG_VM_EVENT
int mem_access_memop(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
#else
+struct p2m_domain;
+static inline bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
+ xenmem_access_t xaccess,
+ p2m_access_t *paccess)
+{
+ return false;
+}
+
+static inline long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
+ uint32_t start, uint32_t mask,
+ xenmem_access_t access,
+ unsigned int altp2m_idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline long p2m_set_mem_access_multi(struct domain *d,
+ const XEN_GUEST_HANDLE(const_uint64) pfn_list,
+ const XEN_GUEST_HANDLE(const_uint8) access_list,
+ uint32_t nr, uint32_t start, uint32_t mask,
+ unsigned int altp2m_idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int p2m_get_mem_access(struct domain *d, gfn_t gfn,
+ xenmem_access_t *access,
+ unsigned int altp2m_idx)
+{
+ return -EOPNOTSUPP;
+}
+
static inline
int mem_access_memop(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
diff --git a/xen/include/xen/monitor.h b/xen/include/xen/monitor.h
index c086c4390c..1b7984909e 100644
--- a/xen/include/xen/monitor.h
+++ b/xen/include/xen/monitor.h
@@ -30,6 +30,7 @@ struct xen_domctl_monitor_op;
#ifdef CONFIG_VM_EVENT
int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop);
void monitor_guest_request(void);
+int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req);
#else /* !CONFIG_VM_EVENT */
static inline int monitor_domctl(struct domain *d,
struct xen_domctl_monitor_op *mop)
@@ -37,8 +38,11 @@ static inline int monitor_domctl(struct domain *d,
return -EOPNOTSUPP;
}
static inline void monitor_guest_request(void) {}
+static inline int monitor_traps(struct vcpu *v, bool sync,
+ vm_event_request_t *req)
+{
+ return -EOPNOTSUPP;
+}
#endif /* !CONFIG_VM_EVENT */
-int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req);
-
#endif /* __XEN_MONITOR_H__ */
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 27d0c74216..4b3d0d15ec 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -50,6 +50,7 @@ struct vm_event_domain
unsigned int last_vcpu_wake_up;
};
+#ifdef CONFIG_VM_EVENT
/* Returns whether a ring has been set up */
bool vm_event_check_ring(struct vm_event_domain *ved);
@@ -68,6 +69,20 @@ bool vm_event_check_ring(struct vm_event_domain *ved);
*/
int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
bool allow_sleep);
+#else
+static inline bool vm_event_check_ring(struct vm_event_domain *ved)
+{
+ return false;
+}
+
+static inline int __vm_event_claim_slot(struct domain *d,
+ struct vm_event_domain *ved,
+ bool allow_sleep)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_VM_EVENT */
+
static inline int vm_event_claim_slot(struct domain *d,
struct vm_event_domain *ved)
{
@@ -82,23 +97,28 @@ static inline int vm_event_claim_slot_nosleep(struct domain
*d,
void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved);
+#ifdef CONFIG_VM_EVENT
void vm_event_put_request(struct domain *d, struct vm_event_domain *ved,
vm_event_request_t *req);
-#ifdef CONFIG_VM_EVENT
/* Clean up on domain destruction */
void vm_event_cleanup(struct domain *d);
int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec);
+
+void vm_event_vcpu_pause(struct vcpu *v);
#else /* !CONFIG_VM_EVENT */
+static inline void vm_event_put_request(struct domain *d,
+ struct vm_event_domain *ved,
+ vm_event_request_t *req) {}
static inline void vm_event_cleanup(struct domain *d) {}
static inline int vm_event_domctl(struct domain *d,
struct xen_domctl_vm_event_op *vec)
{
return -EOPNOTSUPP;
}
+static inline void vm_event_vcpu_pause(struct vcpu *v) {};
#endif /* !CONFIG_VM_EVENT */
-void vm_event_vcpu_pause(struct vcpu *v);
void vm_event_vcpu_unpause(struct vcpu *v);
void vm_event_fill_regs(vm_event_request_t *req);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index dad69905c9..7c61f27366 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -155,9 +155,9 @@ struct xsm_ops {
int (*hvm_altp2mhvm_op)(struct domain *d, uint64_t mode, uint32_t op);
int (*get_vnumainfo)(struct domain *d);
+#ifdef CONFIG_VM_EVENT
int (*vm_event_control)(struct domain *d, int mode, int op);
-#ifdef CONFIG_VM_EVENT
int (*mem_access)(struct domain *d);
#endif
@@ -651,13 +651,13 @@ static inline int xsm_get_vnumainfo(xsm_default_t def,
struct domain *d)
return alternative_call(xsm_ops.get_vnumainfo, d);
}
+#ifdef CONFIG_VM_EVENT
static inline int xsm_vm_event_control(
xsm_default_t def, struct domain *d, int mode, int op)
{
return alternative_call(xsm_ops.vm_event_control, d, mode, op);
}
-#ifdef CONFIG_VM_EVENT
static inline int xsm_mem_access(xsm_default_t def, struct domain *d)
{
return alternative_call(xsm_ops.mem_access, d);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 8a893c6eee..6f8b06b45f 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -115,9 +115,9 @@ static const struct xsm_ops __initconst_cf_clobber
dummy_ops = {
.remove_from_physmap = xsm_remove_from_physmap,
.map_gmfn_foreign = xsm_map_gmfn_foreign,
+#ifdef CONFIG_VM_EVENT
.vm_event_control = xsm_vm_event_control,
-#ifdef CONFIG_VM_EVENT
.mem_access = xsm_mem_access,
#endif
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 0382182f94..fd7aea460f 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1365,12 +1365,12 @@ static int cf_check flask_hvm_altp2mhvm_op(struct
domain *d, uint64_t mode, uint
return current_has_perm(d, SECCLASS_HVM, HVM__ALTP2MHVM_OP);
}
+#ifdef CONFIG_VM_EVENT
static int cf_check flask_vm_event_control(struct domain *d, int mode, int op)
{
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT);
}
-#ifdef CONFIG_VM_EVENT
static int cf_check flask_mem_access(struct domain *d)
{
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_ACCESS);
@@ -1969,9 +1969,9 @@ static const struct xsm_ops __initconst_cf_clobber
flask_ops = {
.do_xsm_op = do_flask_op,
.get_vnumainfo = flask_get_vnumainfo,
+#ifdef CONFIG_VM_EVENT
.vm_event_control = flask_vm_event_control,
-#ifdef CONFIG_VM_EVENT
.mem_access = flask_mem_access,
#endif
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |