|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] x86/msr: Fix fallout from mostly c/s 832c180
commit be3d5b30331d87e177744dbe23138b9ebcdc86f1
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Apr 9 16:18:46 2019 +0100
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Mon Apr 15 17:51:30 2019 +0100
x86/msr: Fix fallout from mostly c/s 832c180
* Fix the shim build by providing a !CONFIG_HVM declaration for
hvm_get_guest_bndcfgs(), and removing the introduced
ASSERT(is_hvm_domain(d))'s. They are needed for DCE to keep the build
working. Furthermore, in this way, the risk of runtime type confusion is
removed.
* Revert the de-const'ing of the vcpu pointer in vmx_get_guest_bndcfgs().
vmx_vmcs_enter() really does mutate the vcpu, and may cause it to
undergo a
full de/reschedule, which is contrary to the programmers expectation of
hvm_get_guest_bndcfgs(). guest_rdmsr() was always going to need to lose
its const parameter, and this was the correct time for it to happen.
* The MSRs in vcpu_msrs are in numeric order. Re-position XSS to match.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/hvm/vmx/vmx.c | 5 +----
xen/arch/x86/msr.c | 18 +++++-------------
xen/arch/x86/pv/emul-priv-op.c | 2 +-
xen/include/asm-x86/hvm/hvm.h | 5 +++--
xen/include/asm-x86/msr.h | 12 ++++++------
5 files changed, 16 insertions(+), 26 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index c46e05b91e..283eb7b34d 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1150,11 +1150,8 @@ static bool vmx_set_guest_bndcfgs(struct vcpu *v, u64
val)
return true;
}
-static bool vmx_get_guest_bndcfgs(const struct vcpu *cv, u64 *val)
+static bool vmx_get_guest_bndcfgs(struct vcpu *v, u64 *val)
{
- /* Get a non-const pointer for vmx_vmcs_enter() */
- struct vcpu *v = cv->domain->vcpu[cv->vcpu_id];
-
ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
vmx_vmcs_enter(v);
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 815d599588..0049a73bc7 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -115,7 +115,7 @@ int init_vcpu_msr_policy(struct vcpu *v)
return 0;
}
-int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
{
const struct vcpu *curr = current;
const struct domain *d = v->domain;
@@ -182,13 +182,9 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr,
uint64_t *val)
break;
case MSR_IA32_BNDCFGS:
- if ( !cp->feat.mpx )
+ if ( !cp->feat.mpx || !is_hvm_domain(d) ||
+ !hvm_get_guest_bndcfgs(v, val) )
goto gp_fault;
-
- ASSERT(is_hvm_domain(d));
- if (!hvm_get_guest_bndcfgs(v, val) )
- goto gp_fault;
-
break;
case MSR_IA32_XSS:
@@ -375,13 +371,9 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
break;
case MSR_IA32_BNDCFGS:
- if ( !cp->feat.mpx )
+ if ( !cp->feat.mpx || !is_hvm_domain(d) ||
+ !hvm_set_guest_bndcfgs(v, val) )
goto gp_fault;
-
- ASSERT(is_hvm_domain(d));
- if ( !hvm_set_guest_bndcfgs(v, val) )
- goto gp_fault;
-
break;
case MSR_IA32_XSS:
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index a55a400e5a..af74f50dc8 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -819,7 +819,7 @@ static inline bool is_cpufreq_controller(const struct
domain *d)
static int read_msr(unsigned int reg, uint64_t *val,
struct x86_emulate_ctxt *ctxt)
{
- const struct vcpu *curr = current;
+ struct vcpu *curr = current;
const struct domain *currd = curr->domain;
bool vpmu_msr = false;
int ret;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index c811fa9139..157f0debc6 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -145,7 +145,7 @@ struct hvm_function_table {
int (*get_guest_pat)(struct vcpu *v, u64 *);
int (*set_guest_pat)(struct vcpu *v, u64);
- bool (*get_guest_bndcfgs)(const struct vcpu *v, u64 *);
+ bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *);
bool (*set_guest_bndcfgs)(struct vcpu *v, u64);
void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
@@ -444,7 +444,7 @@ static inline unsigned long hvm_get_shadow_gs_base(struct
vcpu *v)
return hvm_funcs.get_shadow_gs_base(v);
}
-static inline bool hvm_get_guest_bndcfgs(const struct vcpu *v, u64 *val)
+static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
{
return hvm_funcs.get_guest_bndcfgs &&
hvm_funcs.get_guest_bndcfgs(v, val);
@@ -692,6 +692,7 @@ unsigned long hvm_get_shadow_gs_base(struct vcpu *v);
void hvm_set_info_guest(struct vcpu *v);
void hvm_cpuid_policy_changed(struct vcpu *v);
void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc);
+bool hvm_get_guest_bndcfgs(struct vcpu *v, uint64_t *val);
/* End of prototype list */
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 0d52c085f6..3cbbc65e02 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -296,6 +296,11 @@ struct vcpu_msrs
};
} misc_features_enables;
+ /* 0x00000da0 - MSR_IA32_XSS */
+ struct {
+ uint64_t raw;
+ } xss;
+
/*
* 0xc0000103 - MSR_TSC_AUX
*
@@ -313,11 +318,6 @@ struct vcpu_msrs
* values here may be stale in current context.
*/
uint32_t dr_mask[4];
-
- /* 0x00000da0 - MSR_IA32_XSS */
- struct {
- uint64_t raw;
- } xss;
};
void init_guest_msr_policy(void);
@@ -333,7 +333,7 @@ int init_vcpu_msr_policy(struct vcpu *v);
* These functions are also used by the migration logic, so need to cope with
* being used outside of v's context.
*/
-int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val);
+int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val);
int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val);
#endif /* !__ASSEMBLY__ */
--
generated by git-patchbot for /home/xen/git/xen.git#staging
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |