[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 2/6] x86: dynamically attach/detach CQM service for a guest
Add hypervisor side support for dynamically attach and detach CQM services for a certain guest. When attach CQM service for a guest, system will allocate an RMID for it. When detach or guest is shutdown, the RMID will be retrieved back for future use. Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx> Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> --- xen/arch/x86/domain.c | 3 +++ xen/arch/x86/domctl.c | 28 ++++++++++++++++++++ xen/arch/x86/pqos.c | 60 ++++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/domain.h | 2 ++ xen/include/asm-x86/pqos.h | 12 +++++++++ xen/include/public/domctl.h | 11 ++++++++ 6 files changed, 116 insertions(+) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 16f2b50..2656204 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -60,6 +60,7 @@ #include <xen/numa.h> #include <xen/iommu.h> #include <compat/vcpu.h> +#include <asm/pqos.h> DEFINE_PER_CPU(struct vcpu *, curr_vcpu); DEFINE_PER_CPU(unsigned long, cr4); @@ -612,6 +613,8 @@ void arch_domain_destroy(struct domain *d) free_xenheap_page(d->shared_info); cleanup_domain_irq_mapping(d); + + free_cqm_rmid(d); } unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index ef6c140..7219011 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -35,6 +35,7 @@ #include <asm/mem_sharing.h> #include <asm/xstate.h> #include <asm/debugger.h> +#include <asm/pqos.h> static int gdbsx_guest_mem_io( domid_t domid, struct xen_domctl_gdbsx_memio *iop) @@ -1245,6 +1246,33 @@ long arch_do_domctl( } break; + case XEN_DOMCTL_attach_pqos: + { + if ( !(domctl->u.qos_type.flags & XEN_DOMCTL_pqos_cqm) ) + ret = -EINVAL; + else if ( !system_supports_cqm() ) + ret = -ENODEV; + else + ret = alloc_cqm_rmid(d); + } + break; + + case XEN_DOMCTL_detach_pqos: + { + if ( !(domctl->u.qos_type.flags & XEN_DOMCTL_pqos_cqm) ) + ret = -EINVAL; + else if ( !system_supports_cqm() ) + ret = -ENODEV; + else if ( d->arch.pqos_cqm_rmid > 0 ) + { + free_cqm_rmid(d); + ret = 0; + } + else + ret = -ENOENT; + } + break; + default: ret = iommu_do_domctl(domctl, d, u_domctl); break; diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c index ba0de37..eb469ac 100644 --- a/xen/arch/x86/pqos.c +++ b/xen/arch/x86/pqos.c @@ -17,6 +17,7 @@ #include <asm/processor.h> #include <xen/init.h> #include <xen/mm.h> +#include <xen/spinlock.h> #include <asm/pqos.h> static bool_t __initdata opt_pqos = 1; @@ -145,6 +146,65 @@ void __init init_platform_qos(void) init_qos_monitor(); } +int alloc_cqm_rmid(struct domain *d) +{ + int rc = 0; + unsigned int rmid; + + ASSERT(system_supports_cqm()); + + spin_lock(&cqm->cqm_lock); + + if ( d->arch.pqos_cqm_rmid > 0 ) + { + rc = -EEXIST; + goto out; + } + + for ( rmid = cqm->min_rmid; rmid <= cqm->max_rmid; rmid++ ) + { + if ( cqm->rmid_to_dom[rmid] != DOMID_INVALID) + continue; + + cqm->rmid_to_dom[rmid] = d->domain_id; + break; + } + + /* No CQM RMID available, assign RMID=0 by default */ + if ( rmid > cqm->max_rmid ) + { + rmid = 0; + rc = -EUSERS; + } + else + cqm->used_rmid++; + + d->arch.pqos_cqm_rmid = rmid; + +out: + spin_unlock(&cqm->cqm_lock); + + return rc; +} + +void free_cqm_rmid(struct domain *d) +{ + unsigned int rmid; + + spin_lock(&cqm->cqm_lock); + rmid = d->arch.pqos_cqm_rmid; + /* We do not free system reserved "RMID=0" */ + if ( rmid == 0 ) + goto out; + + cqm->rmid_to_dom[rmid] = DOMID_INVALID; + d->arch.pqos_cqm_rmid = 0; + cqm->used_rmid--; + +out: + spin_unlock(&cqm->cqm_lock); +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index ea72db2..662714d 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -313,6 +313,8 @@ struct arch_domain spinlock_t e820_lock; struct e820entry *e820; unsigned int nr_e820; + + unsigned int pqos_cqm_rmid; /* CQM RMID assigned to the domain */ } __cacheline_aligned; #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list)) diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h index 0a8065c..f25037d 100644 --- a/xen/include/asm-x86/pqos.h +++ b/xen/include/asm-x86/pqos.h @@ -16,6 +16,7 @@ */ #ifndef ASM_PQOS_H #define ASM_PQOS_H +#include <xen/sched.h> #include <public/xen.h> #include <xen/spinlock.h> @@ -38,6 +39,17 @@ struct pqos_cqm { }; extern struct pqos_cqm *cqm; +static inline bool_t system_supports_cqm(void) +{ + return !!cqm; +} + +/* IA32_QM_CTR */ +#define IA32_QM_CTR_ERROR_MASK (0x3ul << 62) + void init_platform_qos(void); +int alloc_cqm_rmid(struct domain *d); +void free_cqm_rmid(struct domain *d); + #endif diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index 91f01fa..f8d9293 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -885,6 +885,14 @@ struct xen_domctl_set_max_evtchn { typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t); +struct xen_domctl_qos_type { +#define _XEN_DOMCTL_pqos_cqm 0 +#define XEN_DOMCTL_pqos_cqm (1U<<_XEN_DOMCTL_pqos_cqm) + uint64_t flags; +}; +typedef struct xen_domctl_qos_type xen_domctl_qos_type_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_qos_type_t); + struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 @@ -954,6 +962,8 @@ struct xen_domctl { #define XEN_DOMCTL_setnodeaffinity 68 #define XEN_DOMCTL_getnodeaffinity 69 #define XEN_DOMCTL_set_max_evtchn 70 +#define XEN_DOMCTL_attach_pqos 71 +#define XEN_DOMCTL_detach_pqos 72 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -1014,6 +1024,7 @@ struct xen_domctl { struct xen_domctl_set_broken_page_p2m set_broken_page_p2m; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; + struct xen_domctl_qos_type qos_type; uint8_t pad[128]; } u; }; -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |