[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v1] x86: make Viridian support optional
Hi,
I'm surprised this isn't already possible. Neat! Add config option HVM_VIRIDIAN that covers viridian code within HVM.
Calls to viridian functions guarded by is_viridian_domain() and related macros.
Having this option may be beneficial by reducing code footprint for systems
that are not using Hyper-V.
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
---
xen/arch/x86/Kconfig | 5 +++++
xen/arch/x86/hvm/Makefile | 2 +-
xen/arch/x86/hvm/hvm.c | 27 ++++++++++++++++++---------
xen/arch/x86/hvm/vlapic.c | 11 +++++++----
xen/arch/x86/include/asm/hvm/domain.h | 4 ++--
xen/arch/x86/include/asm/hvm/hvm.h | 3 ++-
xen/arch/x86/include/asm/hvm/vcpu.h | 3 ++-
7 files changed, 37 insertions(+), 18 deletions(-)
diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig
index 6e41bc0fb4..34f9b79d98 100644
--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -348,6 +348,11 @@ config HYPERV_GUEST
endif
+config HVM_VIRIDIAN
+ bool "Viridian enlightenments support" if EXPERT
+ depends on HVM
+ default y
+
I don't see why this should be gated by EXPERT, provided a suitable (now absent) help message was to exist explaining what it does in plain simple words.
For the title, I'd say it needs to properly state it refers to enlightenments for guests, rather than enlightenments for Xen itself when running under Hyper-V. As it is, it sounds ambiguous (Maybe "Hyper-V enlighnments for guests"?).
On a personal nitpicky preference note, I'd say HVM_VIRIDIAN sounds rather redundant, and I think just VIRIDIAN works just as well while being shorter.
config MEM_PAGING
bool "Xen memory paging support (UNSUPPORTED)" if UNSUPPORTED
depends on HVM
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index 4c1fa5c6c2..6cc2e74fc4 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_AMD_SVM) += svm/
obj-$(CONFIG_INTEL_VMX) += vmx/
-obj-y += viridian/
+obj-$(CONFIG_HVM_VIRIDIAN) += viridian/
obj-y += asid.o
obj-y += dm.o
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2f31180b6f..4f51d0f66c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -695,9 +695,12 @@ int hvm_domain_initialise(struct domain *d,
if ( hvm_tsc_scaling_supported )
d->arch.hvm.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
- rc = viridian_domain_init(d);
- if ( rc )
- goto fail2;
+ if ( is_viridian_domain(d) )
+ {
+ rc = viridian_domain_init(d);
+ if ( rc )
+ goto fail2;
+ }
rc = alternative_call(hvm_funcs.domain_initialise, d);
if ( rc != 0 )
@@ -733,7 +736,8 @@ void hvm_domain_relinquish_resources(struct domain *d)
if ( hvm_funcs.nhvm_domain_relinquish_resources )
alternative_vcall(hvm_funcs.nhvm_domain_relinquish_resources, d);
- viridian_domain_deinit(d);
+ if ( is_viridian_domain(d) )
+ viridian_domain_deinit(d);
ioreq_server_destroy_all(d);
@@ -1637,9 +1641,12 @@ int hvm_vcpu_initialise(struct vcpu *v)
&& (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */
goto fail5;
- rc = viridian_vcpu_init(v);
- if ( rc )
- goto fail6;
+ if ( is_viridian_domain(v->domain) )
+ {
+ rc = viridian_vcpu_init(v);
+ if ( rc )
+ goto fail6;
+ }
rc = ioreq_server_add_vcpu_all(d, v);
if ( rc != 0 )
@@ -1669,13 +1676,15 @@ int hvm_vcpu_initialise(struct vcpu *v)
fail2:
hvm_vcpu_cacheattr_destroy(v);
fail1:
- viridian_vcpu_deinit(v);
+ if ( is_viridian_domain(v->domain) )
+ viridian_vcpu_deinit(v);
return rc;
}
void hvm_vcpu_destroy(struct vcpu *v)
{
- viridian_vcpu_deinit(v);
+ if ( is_viridian_domain(v->domain) )
+ viridian_vcpu_deinit(v);
ioreq_server_remove_vcpu_all(v->domain, v);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 065b2aab5b..b8236dade0 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -426,7 +426,8 @@ void vlapic_EOI_set(struct vlapic *vlapic)
* priority vector and then recurse to handle the lower priority
* vector.
*/
- bool missed_eoi = viridian_apic_assist_completed(v);
+ bool missed_eoi = has_viridian_apic_assist(v->domain) ?
+ viridian_apic_assist_completed(v) : false;
int vector;
again:
@@ -442,7 +443,7 @@ void vlapic_EOI_set(struct vlapic *vlapic)
* NOTE: It is harmless to call viridian_apic_assist_clear() on a
* recursion, even though it is not necessary.
*/
- if ( !missed_eoi )
+ if ( has_viridian_apic_assist(v->domain) && !missed_eoi )
viridian_apic_assist_clear(v);
vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
@@ -1360,7 +1361,8 @@ int vlapic_has_pending_irq(struct vcpu *v)
* If so, we need to emulate the EOI here before comparing ISR
* with IRR.
*/
- if ( viridian_apic_assist_completed(v) )
+ if ( has_viridian_apic_assist(v->domain) &&
+ viridian_apic_assist_completed(v) )
vlapic_EOI_set(vlapic);
isr = vlapic_find_highest_isr(vlapic);
@@ -1373,7 +1375,8 @@ int vlapic_has_pending_irq(struct vcpu *v)
if ( isr >= 0 &&
(irr & 0xf0) <= (isr & 0xf0) )
{
- viridian_apic_assist_clear(v);
+ if ( has_viridian_apic_assist(v->domain) )
+ viridian_apic_assist_clear(v);
return -1;
}
diff --git a/xen/arch/x86/include/asm/hvm/domain.h b/xen/arch/x86/include/asm/hvm/domain.h
index 333501d5f2..bc52504cdd 100644
--- a/xen/arch/x86/include/asm/hvm/domain.h
+++ b/xen/arch/x86/include/asm/hvm/domain.h
@@ -110,9 +110,9 @@ struct hvm_domain {
/* hypervisor intercepted msix table */
struct list_head msixtbl_list;
-
+#ifdef CONFIG_HVM_VIRIDIAN
struct viridian_domain *viridian;
-
+#endif
/*
* TSC value that VCPUs use to calculate their tsc_offset value.
* Used during initialization and save/restore.
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h b/xen/arch/x86/include/asm/hvm/hvm.h
index 963e820113..1bbeece117 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -507,7 +507,8 @@ hvm_get_cpl(struct vcpu *v)
(has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)
#define is_viridian_domain(d) \
- (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
+ (is_hvm_domain(d) && IS_ENABLED(CONFIG_HVM_VIRIDIAN) && \
+ (viridian_feature_mask(d) & HVMPV_base_freq))
#define is_viridian_vcpu(v) \
is_viridian_domain((v)->domain)
diff --git a/xen/arch/x86/include/asm/hvm/vcpu.h b/xen/arch/x86/include/asm/hvm/vcpu.h
index 196fed6d5d..bac35ec47a 100644
--- a/xen/arch/x86/include/asm/hvm/vcpu.h
+++ b/xen/arch/x86/include/asm/hvm/vcpu.h
@@ -171,8 +171,9 @@ struct hvm_vcpu {
/* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
struct x86_event inject_event;
-
+#ifdef CONFIG_HVM_VIRIDIAN
struct viridian_vcpu *viridian;
+#endif
};
#endif /* __ASM_X86_HVM_VCPU_H__ */
nit: I suspect the code would be far less cluttered with "if viridian..." if the init/deinit/etc functions had dummy versions of those functions when !HVM_VIRIDIAN in the header.
Cheers, Alejandro
|