|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v10 2/6] x86: dynamically attach/detach CQM service for a guest
Add hypervisor side support for dynamically attach and detach CQM
services for a certain guest.
When attach CQM service for a guest, system will allocate an RMID for
it. When detach or guest is shutdown, the RMID will be retrieved back
for future use.
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
---
xen/arch/x86/domain.c | 3 +++
xen/arch/x86/domctl.c | 24 +++++++++++++++++
xen/arch/x86/pqos/cqm.c | 59 ++++++++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/domain.h | 2 ++
xen/include/asm-x86/pqos.h | 9 +++++++
xen/include/public/domctl.h | 11 ++++++++
6 files changed, 108 insertions(+)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 0d563de..3ea9402 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -60,6 +60,7 @@
#include <xen/numa.h>
#include <xen/iommu.h>
#include <compat/vcpu.h>
+#include <asm/pqos.h>
DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
DEFINE_PER_CPU(unsigned long, cr4);
@@ -611,6 +612,8 @@ void arch_domain_destroy(struct domain *d)
free_xenheap_page(d->shared_info);
cleanup_domain_irq_mapping(d);
+
+ free_cqm_rmid(d);
}
unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 26635ff..d5835c3 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -35,6 +35,7 @@
#include <asm/mem_sharing.h>
#include <asm/xstate.h>
#include <asm/debugger.h>
+#include <asm/pqos.h>
static int gdbsx_guest_mem_io(
domid_t domid, struct xen_domctl_gdbsx_memio *iop)
@@ -1256,6 +1257,29 @@ long arch_do_domctl(
}
break;
+ case XEN_DOMCTL_attach_pqos:
+ if ( !(domctl->u.qos_type.flags & XEN_DOMCTL_pqos_cqm) )
+ ret = -EINVAL;
+ else if ( !system_supports_cqm() )
+ ret = -ENODEV;
+ else
+ ret = alloc_cqm_rmid(d);
+ break;
+
+ case XEN_DOMCTL_detach_pqos:
+ if ( !(domctl->u.qos_type.flags & XEN_DOMCTL_pqos_cqm) )
+ ret = -EINVAL;
+ else if ( !system_supports_cqm() )
+ ret = -ENODEV;
+ else if ( d->arch.pqos_cqm_rmid > 0 )
+ {
+ free_cqm_rmid(d);
+ ret = 0;
+ }
+ else
+ ret = -ENOENT;
+ break;
+
default:
ret = iommu_do_domctl(domctl, d, u_domctl);
break;
diff --git a/xen/arch/x86/pqos/cqm.c b/xen/arch/x86/pqos/cqm.c
index b46f25b..0f492a4 100644
--- a/xen/arch/x86/pqos/cqm.c
+++ b/xen/arch/x86/pqos/cqm.c
@@ -168,6 +168,65 @@ out:
cqm = NULL;
}
+int alloc_cqm_rmid(struct domain *d)
+{
+ int rc = 0;
+ unsigned int rmid;
+
+ ASSERT(system_supports_cqm());
+
+ spin_lock(&cqm->cqm_lock);
+
+ if ( d->arch.pqos_cqm_rmid > 0 )
+ {
+ rc = -EEXIST;
+ goto out;
+ }
+
+ for ( rmid = cqm->rmid_min; rmid <= cqm->rmid_max; rmid++ )
+ {
+ if ( cqm->rmid_to_dom[rmid] != DOMID_INVALID)
+ continue;
+
+ cqm->rmid_to_dom[rmid] = d->domain_id;
+ break;
+ }
+
+ /* No CQM RMID available, assign RMID=0 by default */
+ if ( rmid > cqm->rmid_max )
+ {
+ rmid = 0;
+ rc = -EUSERS;
+ }
+ else
+ cqm->rmid_inuse++;
+
+ d->arch.pqos_cqm_rmid = rmid;
+
+out:
+ spin_unlock(&cqm->cqm_lock);
+
+ return rc;
+}
+
+void free_cqm_rmid(struct domain *d)
+{
+ unsigned int rmid;
+
+ spin_lock(&cqm->cqm_lock);
+ rmid = d->arch.pqos_cqm_rmid;
+ /* We do not free system reserved "RMID=0" */
+ if ( rmid == 0 )
+ goto out;
+
+ cqm->rmid_to_dom[rmid] = DOMID_INVALID;
+ d->arch.pqos_cqm_rmid = 0;
+ cqm->rmid_inuse--;
+
+out:
+ spin_unlock(&cqm->cqm_lock);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 4ff89f0..006fc19 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -313,6 +313,8 @@ struct arch_domain
spinlock_t e820_lock;
struct e820entry *e820;
unsigned int nr_e820;
+
+ unsigned int pqos_cqm_rmid; /* CQM RMID assigned to the domain */
} __cacheline_aligned;
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h
index 9baa621..01fa7f2 100644
--- a/xen/include/asm-x86/pqos.h
+++ b/xen/include/asm-x86/pqos.h
@@ -16,6 +16,7 @@
*/
#ifndef ASM_PQOS_H
#define ASM_PQOS_H
+#include <xen/sched.h>
#include <public/xen.h>
#include <xen/spinlock.h>
@@ -41,7 +42,15 @@ struct pqos_cqm {
};
extern struct pqos_cqm *cqm;
+static inline bool_t system_supports_cqm(void)
+{
+ return !!cqm;
+}
+
void __init init_platform_qos(void);
void __init init_cqm(unsigned int rmid_max, unsigned long rmid_mask);
+int alloc_cqm_rmid(struct domain *d);
+void free_cqm_rmid(struct domain *d);
+
#endif
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index f22fe2e..93435ce 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -896,6 +896,14 @@ struct xen_domctl_cacheflush {
typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t);
+struct xen_domctl_qos_type {
+#define _XEN_DOMCTL_pqos_cqm 0
+#define XEN_DOMCTL_pqos_cqm (1U<<_XEN_DOMCTL_pqos_cqm)
+ uint64_t flags;
+};
+typedef struct xen_domctl_qos_type xen_domctl_qos_type_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_qos_type_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -966,6 +974,8 @@ struct xen_domctl {
#define XEN_DOMCTL_getnodeaffinity 69
#define XEN_DOMCTL_set_max_evtchn 70
#define XEN_DOMCTL_cacheflush 71
+#define XEN_DOMCTL_attach_pqos 72
+#define XEN_DOMCTL_detach_pqos 73
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -1027,6 +1037,7 @@ struct xen_domctl {
struct xen_domctl_cacheflush cacheflush;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
+ struct xen_domctl_qos_type qos_type;
uint8_t pad[128];
} u;
};
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |