[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v11 5/9] x86: dynamically attach/detach QoS monitoring service for a guest
Add hypervisor side support for dynamically attach and detach QoS monitoring services for a certain guest. When attach Qos monitoring service for a guest, system will allocate an RMID for it. When detach or guest is shutdown, the RMID will be recycled for future use. Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> --- xen/arch/x86/domain.c | 3 +++ xen/arch/x86/domctl.c | 29 ++++++++++++++++++++++ xen/arch/x86/pqos.c | 59 ++++++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/domain.h | 2 ++ xen/include/asm-x86/pqos.h | 9 +++++++ xen/include/public/domctl.h | 12 +++++++++ 6 files changed, 114 insertions(+) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index e896210..f8e0e33 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -60,6 +60,7 @@ #include <xen/numa.h> #include <xen/iommu.h> #include <compat/vcpu.h> +#include <asm/pqos.h> DEFINE_PER_CPU(struct vcpu *, curr_vcpu); DEFINE_PER_CPU(unsigned long, cr4); @@ -636,6 +637,8 @@ void arch_domain_destroy(struct domain *d) free_xenheap_page(d->shared_info); cleanup_domain_irq_mapping(d); + + pqos_monitor_free_rmid(d); } unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index f8b0a79..9047dc6 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -35,6 +35,7 @@ #include <asm/mem_sharing.h> #include <asm/xstate.h> #include <asm/debugger.h> +#include <asm/pqos.h> static int gdbsx_guest_mem_io( domid_t domid, struct xen_domctl_gdbsx_memio *iop) @@ -1342,6 +1343,34 @@ long arch_do_domctl( } break; + case XEN_DOMCTL_pqos_monitor_op: + if ( !pqos_monitor_enabled() ) + { + ret = -ENODEV; + break; + } + + switch ( domctl->u.pqos_monitor_op.cmd ) + { + case XEN_DOMCTL_PQOS_MONITOR_OP_ATTACH: + ret = pqos_monitor_alloc_rmid(d); + break; + case XEN_DOMCTL_PQOS_MONITOR_OP_DETACH: + if ( d->arch.pqos_rmid > 0 ) + pqos_monitor_free_rmid(d); + else + ret = -ENOENT; + break; + case XEN_DOMCTL_PQOS_MONITOR_OP_QUERY_RMID: + domctl->u.pqos_monitor_op.data = d->arch.pqos_rmid; + copyback = 1; + break; + default: + ret = -ENOSYS; + break; + }; + break; + default: ret = iommu_do_domctl(domctl, d, u_domctl); break; diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c index 1c431c7..c08cb99 100644 --- a/xen/arch/x86/pqos.c +++ b/xen/arch/x86/pqos.c @@ -111,6 +111,65 @@ void __init init_platform_qos(void) init_pqos_monitor(opt_rmid_max); } +int pqos_monitor_alloc_rmid(struct domain *d) +{ + int rc = 0; + unsigned int rmid; + + ASSERT(pqos_monitor_enabled()); + + spin_lock(&pqosm->pqosm_lock); + + if ( d->arch.pqos_rmid > 0 ) + { + rc = -EEXIST; + goto out; + } + + for ( rmid = pqosm->rmid_min; rmid <= pqosm->rmid_max; rmid++ ) + { + if ( pqosm->rmid_to_dom[rmid] != DOMID_INVALID) + continue; + + pqosm->rmid_to_dom[rmid] = d->domain_id; + break; + } + + /* No RMID available, assign RMID=0 by default */ + if ( rmid > pqosm->rmid_max ) + { + rmid = 0; + rc = -EUSERS; + } + else + pqosm->rmid_inuse++; + + d->arch.pqos_rmid = rmid; + +out: + spin_unlock(&pqosm->pqosm_lock); + + return rc; +} + +void pqos_monitor_free_rmid(struct domain *d) +{ + unsigned int rmid; + + spin_lock(&pqosm->pqosm_lock); + rmid = d->arch.pqos_rmid; + /* We do not free system reserved "RMID=0" */ + if ( rmid == 0 ) + goto out; + + pqosm->rmid_to_dom[rmid] = DOMID_INVALID; + d->arch.pqos_rmid = 0; + pqosm->rmid_inuse--; + +out: + spin_unlock(&pqosm->pqosm_lock); +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index abf55fb..cd53108 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -313,6 +313,8 @@ struct arch_domain /* Shared page for notifying that explicit PIRQ EOI is required. */ unsigned long *pirq_eoi_map; unsigned long pirq_eoi_map_mfn; + + unsigned int pqos_rmid; /* QoS monitoring RMID assigned to the domain */ } __cacheline_aligned; #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list)) diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h index 410fa3d..3a3f063 100644 --- a/xen/include/asm-x86/pqos.h +++ b/xen/include/asm-x86/pqos.h @@ -15,6 +15,7 @@ */ #ifndef ASM_PQOS_H #define ASM_PQOS_H +#include <xen/sched.h> #include <public/xen.h> #include <xen/spinlock.h> @@ -43,6 +44,14 @@ struct pqos_monitor { }; extern struct pqos_monitor *pqosm; +static inline bool_t pqos_monitor_enabled(void) +{ + return !!pqosm; +} + void __init init_platform_qos(void); +int pqos_monitor_alloc_rmid(struct domain *d); +void pqos_monitor_free_rmid(struct domain *d); + #endif diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index 565fa4c..32c0cb2 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -895,6 +895,16 @@ struct xen_domctl_cacheflush { typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t); +struct xen_domctl_pqos_monitor_op { +#define XEN_DOMCTL_PQOS_MONITOR_OP_DETACH 0 +#define XEN_DOMCTL_PQOS_MONITOR_OP_ATTACH 1 +#define XEN_DOMCTL_PQOS_MONITOR_OP_QUERY_RMID 2 + uint32_t cmd; + uint32_t data; +}; +typedef struct xen_domctl_pqos_op xen_domctl_pqos_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_pqos_op_t); + struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 @@ -965,6 +975,7 @@ struct xen_domctl { #define XEN_DOMCTL_getnodeaffinity 69 #define XEN_DOMCTL_set_max_evtchn 70 #define XEN_DOMCTL_cacheflush 71 +#define XEN_DOMCTL_pqos_monitor_op 72 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -1024,6 +1035,7 @@ struct xen_domctl { struct xen_domctl_cacheflush cacheflush; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; + struct xen_domctl_pqos_monitor_op pqos_monitor_op; uint8_t pad[128]; } u; }; -- 1.8.1.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |