[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 25/28] xen/domctl: avoid unreachable codes when both MGMT_HYPERCALLS and MEM_SHARING unset
The following functions have been referenced in places which is either guarded with CONFIG_MGMT_HYPERCALLS or CONFIG_MEM_SHARING: - arch_hvm_save - arch_hvm_check - arch_hvm_load - hvm_save_size - hvm_save - hvm_load - hvm_vmtrace_reset So they shall be wrapped under OR relationship, otherwise they will become unreachable codes when MGMT_HYPERCALLS=n && MEM_SHARING=n, and hence violating Misra rule 2.1. We move arch_hvm_save(), arch_hvm_check(), arch_hvm_load() and hvm_save_size() nearer to the left functions, to avoid scattered #ifdef-wrapping. Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx> --- v2 -> v3 - new commit --- xen/arch/x86/hvm/save.c | 154 +++++++++++++++-------------- xen/arch/x86/hvm/vmx/vmx.c | 4 + xen/arch/x86/include/asm/hvm/hvm.h | 4 + 3 files changed, 86 insertions(+), 76 deletions(-) diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c index 8ab6405706..47050e13b6 100644 --- a/xen/arch/x86/hvm/save.c +++ b/xen/arch/x86/hvm/save.c @@ -15,62 +15,6 @@ #include <public/hvm/save.h> -static void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr) -{ - uint32_t eax, ebx, ecx, edx; - - /* Save some CPUID bits */ - cpuid(1, &eax, &ebx, &ecx, &edx); - hdr->cpuid = eax; - - /* Save guest's preferred TSC. */ - hdr->gtsc_khz = d->arch.tsc_khz; - - /* Time when saving started */ - d->arch.hvm.sync_tsc = rdtsc(); -} - -static int arch_hvm_check(const struct domain *d, - const struct hvm_save_header *hdr) -{ - uint32_t eax, ebx, ecx, edx; - - if ( hdr->magic != HVM_FILE_MAGIC ) - { - printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n", - d->domain_id, hdr->magic); - return -EINVAL; - } - - if ( hdr->version != HVM_FILE_VERSION ) - { - printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n", - d->domain_id, hdr->version); - return -EINVAL; - } - - cpuid(1, &eax, &ebx, &ecx, &edx); - /* CPUs ought to match but with feature-masking they might not */ - if ( (hdr->cpuid & ~0x0fUL) != (eax & ~0x0fUL) ) - printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU " - "(%#"PRIx32") and restored on another (%#"PRIx32").\n", - d->domain_id, hdr->cpuid, eax); - - return 0; -} - -static void arch_hvm_load(struct domain *d, const struct hvm_save_header *hdr) -{ - /* Restore guest's preferred TSC frequency. */ - if ( hdr->gtsc_khz ) - d->arch.tsc_khz = hdr->gtsc_khz; - if ( d->arch.vtsc ) - hvm_set_rdtsc_exiting(d, 1); - - /* Time when restore started */ - d->arch.hvm.sync_tsc = rdtsc(); -} - /* List of handlers for various HVM save and restore types */ static struct { hvm_save_handler save; @@ -101,26 +45,6 @@ void __init hvm_register_savevm(uint16_t typecode, hvm_sr_handlers[typecode].kind = kind; } -size_t hvm_save_size(struct domain *d) -{ - struct vcpu *v; - size_t sz; - int i; - - /* Basic overhead for header and footer */ - sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER); - - /* Plus space for each thing we will be saving */ - for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) - if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU ) - for_each_vcpu(d, v) - sz += hvm_sr_handlers[i].size; - else - sz += hvm_sr_handlers[i].size; - - return sz; -} - /* * Extract a single instance of a save record, by marshalling all records of * that type and copying out the one we need. @@ -196,6 +120,83 @@ int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, return rv; } +#if defined(CONFIG_MGMT_HYPERCALLS) || defined(CONFIG_MEM_SHARING) +static void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr) +{ + uint32_t eax, ebx, ecx, edx; + + /* Save some CPUID bits */ + cpuid(1, &eax, &ebx, &ecx, &edx); + hdr->cpuid = eax; + + /* Save guest's preferred TSC. */ + hdr->gtsc_khz = d->arch.tsc_khz; + + /* Time when saving started */ + d->arch.hvm.sync_tsc = rdtsc(); +} + +static int arch_hvm_check(const struct domain *d, + const struct hvm_save_header *hdr) +{ + uint32_t eax, ebx, ecx, edx; + + if ( hdr->magic != HVM_FILE_MAGIC ) + { + printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n", + d->domain_id, hdr->magic); + return -EINVAL; + } + + if ( hdr->version != HVM_FILE_VERSION ) + { + printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n", + d->domain_id, hdr->version); + return -EINVAL; + } + + cpuid(1, &eax, &ebx, &ecx, &edx); + /* CPUs ought to match but with feature-masking they might not */ + if ( (hdr->cpuid & ~0x0fUL) != (eax & ~0x0fUL) ) + printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU " + "(%#"PRIx32") and restored on another (%#"PRIx32").\n", + d->domain_id, hdr->cpuid, eax); + + return 0; +} + +static void arch_hvm_load(struct domain *d, const struct hvm_save_header *hdr) +{ + /* Restore guest's preferred TSC frequency. */ + if ( hdr->gtsc_khz ) + d->arch.tsc_khz = hdr->gtsc_khz; + if ( d->arch.vtsc ) + hvm_set_rdtsc_exiting(d, 1); + + /* Time when restore started */ + d->arch.hvm.sync_tsc = rdtsc(); +} + +size_t hvm_save_size(struct domain *d) +{ + struct vcpu *v; + size_t sz; + unsigned int i; + + /* Basic overhead for header and footer */ + sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER); + + /* Plus space for each thing we will be saving */ + for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) + if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU ) + for_each_vcpu(d, v) + sz += hvm_sr_handlers[i].size; + else + sz += hvm_sr_handlers[i].size; + + return sz; +} + int hvm_save(struct domain *d, hvm_domain_context_t *h) { char *c; @@ -390,6 +391,7 @@ int hvm_load(struct domain *d, bool real, hvm_domain_context_t *h) /* Not reached */ } +#endif /* CONFIG_MGMT_HYPERCALLS || CONFIG_MEM_SHARING */ int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst, uint32_t len) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 1996e139a0..4394990131 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2701,6 +2701,7 @@ static int cf_check vmtrace_output_position(struct vcpu *v, uint64_t *pos) return v->arch.hvm.vmx.ipt_active; } +#if defined(CONFIG_MGMT_HYPERCALLS) || defined(CONFIG_MEM_SHARING) static int cf_check vmtrace_reset(struct vcpu *v) { if ( !v->arch.hvm.vmx.ipt_active ) @@ -2710,6 +2711,7 @@ static int cf_check vmtrace_reset(struct vcpu *v) v->arch.msrs->rtit.status = 0; return 0; } +#endif /* CONFIG_MGMT_HYPERCALLS || CONFIG_MEM_SHARING */ static uint64_t cf_check vmx_get_reg(struct vcpu *v, unsigned int reg) { @@ -2888,7 +2890,9 @@ static struct hvm_function_table __initdata_cf_clobber vmx_function_table = { .vmtrace_output_position = vmtrace_output_position, .vmtrace_set_option = vmtrace_set_option, .vmtrace_get_option = vmtrace_get_option, +#if defined(CONFIG_MGMT_HYPERCALLS) || defined(CONFIG_MEM_SHARING) .vmtrace_reset = vmtrace_reset, +#endif .get_reg = vmx_get_reg, .set_reg = vmx_set_reg, diff --git a/xen/arch/x86/include/asm/hvm/hvm.h b/xen/arch/x86/include/asm/hvm/hvm.h index 473cf24b83..9d6cb42d48 100644 --- a/xen/arch/x86/include/asm/hvm/hvm.h +++ b/xen/arch/x86/include/asm/hvm/hvm.h @@ -240,7 +240,9 @@ struct hvm_function_table { int (*vmtrace_output_position)(struct vcpu *v, uint64_t *pos); int (*vmtrace_set_option)(struct vcpu *v, uint64_t key, uint64_t value); int (*vmtrace_get_option)(struct vcpu *v, uint64_t key, uint64_t *value); +#if defined(CONFIG_MGMT_HYPERCALLS) || defined(CONFIG_MEM_SHARING) int (*vmtrace_reset)(struct vcpu *v); +#endif uint64_t (*get_reg)(struct vcpu *v, unsigned int reg); void (*set_reg)(struct vcpu *v, unsigned int reg, uint64_t val); @@ -775,6 +777,7 @@ static inline int hvm_vmtrace_get_option( return -EOPNOTSUPP; } +#if defined(CONFIG_MGMT_HYPERCALLS) || defined(CONFIG_MEM_SHARING) static inline int hvm_vmtrace_reset(struct vcpu *v) { if ( hvm_funcs.vmtrace_reset ) @@ -782,6 +785,7 @@ static inline int hvm_vmtrace_reset(struct vcpu *v) return -EOPNOTSUPP; } +#endif /* CONFIG_MGMT_HYPERCALLS || CONFIG_MEM_SHARING */ /* * Accessors for registers which have per-guest-type or per-vendor locations -- 2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |