|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 09/21] xen/x86: set the vPMU interface based on the presence of a lapic
Instead of choosing the interface to expose to guests based on the guest
type, do it based on whether the guest has an emulated local apic or not.
Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
Changes since v7:
- Merge vpmu work from Boris.
Changes since v6:
- Major rework of the approach.
- Drop Andrew Cooper Acked-by.
Changes since v4:
- Add Andrew Cooper Acked-by.
---
xen/arch/x86/cpu/vpmu.c | 11 ++++++-----
xen/arch/x86/cpu/vpmu_amd.c | 6 +++---
xen/arch/x86/cpu/vpmu_intel.c | 6 +++---
xen/arch/x86/hvm/hvm.c | 2 ++
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
6 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 2f5156a..4346c66 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -94,7 +94,7 @@ void vpmu_lvtpc_update(uint32_t val)
vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | (val & APIC_LVT_MASKED);
/* Postpone APIC updates for PV(H) guests if PMU interrupt is pending */
- if ( is_hvm_vcpu(curr) || !vpmu->xenpmu_data ||
+ if ( has_vlapic(curr->domain) || !vpmu->xenpmu_data ||
!vpmu_is_set(vpmu, VPMU_CACHED) )
apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
}
@@ -129,7 +129,7 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
* and since do_wr/rdmsr may load VPMU context we should save
* (and unload) it again.
*/
- if ( !is_hvm_vcpu(curr) && vpmu->xenpmu_data &&
+ if ( !has_vlapic(curr->domain) && vpmu->xenpmu_data &&
vpmu_is_set(vpmu, VPMU_CACHED) )
{
vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
@@ -184,7 +184,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
return;
/* PV(H) guest */
- if ( !is_hvm_vcpu(sampling) || (vpmu_mode & XENPMU_MODE_ALL) )
+ if ( !has_vlapic(sampling->domain) || (vpmu_mode & XENPMU_MODE_ALL) )
{
const struct cpu_user_regs *cur_regs;
uint64_t *flags = &vpmu->xenpmu_data->pmu.pmu_flags;
@@ -411,7 +411,8 @@ int vpmu_load(struct vcpu *v, bool_t from_guest)
/* Only when PMU is counting, we load PMU context immediately. */
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) ||
- (!is_hvm_vcpu(vpmu_vcpu(vpmu)) && vpmu_is_set(vpmu, VPMU_CACHED)) )
+ (!has_vlapic(vpmu_vcpu(vpmu)->domain) &&
+ vpmu_is_set(vpmu, VPMU_CACHED)) )
return 0;
if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
@@ -635,7 +636,7 @@ long do_xenpmu_op(unsigned int op,
XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg)
struct xen_pmu_data *xenpmu_data;
struct vpmu_struct *vpmu;
- if ( !opt_vpmu_enabled )
+ if ( !opt_vpmu_enabled || has_vlapic(current->domain) )
return -EOPNOTSUPP;
ret = xsm_pmu_op(XSM_OTHER, current->domain, op);
diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 04da81a..990e6f3 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -238,7 +238,7 @@ static int amd_vpmu_load(struct vcpu *v, bool_t from_guest)
bool_t is_running = 0;
struct xen_pmu_amd_ctxt *guest_ctxt = &vpmu->xenpmu_data->pmu.c.amd;
- ASSERT(!is_hvm_vcpu(v));
+ ASSERT(!has_vlapic(v->domain));
ctxt = vpmu->context;
ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
@@ -314,7 +314,7 @@ static int amd_vpmu_save(struct vcpu *v, bool_t to_guest)
{
struct xen_pmu_amd_ctxt *guest_ctxt, *ctxt;
- ASSERT(!is_hvm_vcpu(v));
+ ASSERT(!has_vlapic(v->domain));
ctxt = vpmu->context;
guest_ctxt = &vpmu->xenpmu_data->pmu.c.amd;
memcpy(&guest_ctxt->regs[0], &ctxt->regs[0], regs_sz);
@@ -525,7 +525,7 @@ int svm_vpmu_initialise(struct vcpu *v)
vpmu->context = ctxt;
vpmu->priv_context = NULL;
- if ( !is_hvm_vcpu(v) )
+ if ( !has_vlapic(v->domain) )
{
/* Copy register offsets to shared area */
ASSERT(vpmu->xenpmu_data);
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index 12f80ae..a9e0e68 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -337,7 +337,7 @@ static int core2_vpmu_save(struct vcpu *v, bool_t to_guest)
if ( to_guest )
{
- ASSERT(!is_hvm_vcpu(v));
+ ASSERT(!has_vlapic(v->domain));
memcpy((void *)(&vpmu->xenpmu_data->pmu.c.intel) + regs_off,
vpmu->context + regs_off, regs_sz);
}
@@ -442,7 +442,7 @@ static int core2_vpmu_load(struct vcpu *v, bool_t
from_guest)
{
int ret;
- ASSERT(!is_hvm_vcpu(v));
+ ASSERT(!has_vlapic(v->domain));
memcpy(vpmu->context + regs_off,
(void *)&v->arch.vpmu.xenpmu_data->pmu.c.intel + regs_off,
@@ -502,7 +502,7 @@ static int core2_vpmu_alloc_resource(struct vcpu *v)
vpmu->context = core2_vpmu_cxt;
vpmu->priv_context = p;
- if ( !is_hvm_vcpu(v) )
+ if ( !has_vlapic(v->domain) )
{
/* Copy fixed/arch register offsets to shared area */
ASSERT(vpmu->xenpmu_data);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4490e9d..409b61d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5143,6 +5143,7 @@ static hvm_hypercall_t *const
hvm_hypercall64_table[NR_hypercalls] = {
HYPERCALL(sysctl),
HYPERCALL(domctl),
HYPERCALL(tmem_op),
+ HYPERCALL(xenpmu_op),
[ __HYPERVISOR_arch_1 ] = (hvm_hypercall_t *)paging_domctl_continuation
};
@@ -5164,6 +5165,7 @@ static hvm_hypercall_t *const
hvm_hypercall32_table[NR_hypercalls] = {
HYPERCALL(sysctl),
HYPERCALL(domctl),
HYPERCALL(tmem_op),
+ HYPERCALL(xenpmu_op),
[ __HYPERVISOR_arch_1 ] = (hvm_hypercall_t *)paging_domctl_continuation
};
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 404634b..4adf86b 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1166,7 +1166,7 @@ static int svm_vcpu_initialise(struct vcpu *v)
}
/* PVH's VPMU is initialized via hypercall */
- if ( is_hvm_vcpu(v) )
+ if ( is_hvm_vcpu(v) && has_vlapic(v->domain) )
vpmu_initialise(v);
svm_guest_osvw_init(v);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 073e5be..5df866e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -148,7 +148,7 @@ static int vmx_vcpu_initialise(struct vcpu *v)
}
/* PVH's VPMU is initialized via hypercall */
- if ( is_hvm_vcpu(v) )
+ if ( is_hvm_vcpu(v) && has_vlapic(v->domain) )
vpmu_initialise(v);
vmx_install_vlapic_mapping(v);
--
1.9.5 (Apple Git-50.3)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |