|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v5 07/13] x86: dynamically get/set CBM for a domain
On 17/04/15 15:33, Chao Peng wrote:
> For CAT, COS is maintained in hypervisor only while CBM is exposed to
> user space directly to allow getting/setting domain's cache capacity.
> For each specified CBM, hypervisor will either use a existed COS which
> has the same CBM or allocate a new one if the same CBM is not found. If
> the allocation fails because of no enough COS available then error is
> returned. The getting/setting are always operated on a specified socket.
> For multiple sockets system, the interface may be called several times.
>
> Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx>
> ---
> Changes in v5:
> * Add spin_lock to protect cbm_map.
> ---
> xen/arch/x86/domctl.c | 20 ++++++
> xen/arch/x86/psr.c | 139
> +++++++++++++++++++++++++++++++++++++++-
> xen/include/asm-x86/msr-index.h | 1 +
> xen/include/asm-x86/psr.h | 2 +
> xen/include/public/domctl.h | 12 ++++
> 5 files changed, 172 insertions(+), 2 deletions(-)
>
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 9450795..7ffa650 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -1334,6 +1334,26 @@ long arch_do_domctl(
> }
> break;
>
> + case XEN_DOMCTL_psr_cat_op:
> + switch ( domctl->u.psr_cat_op.cmd )
> + {
> + case XEN_DOMCTL_PSR_CAT_OP_SET_L3_CBM:
> + ret = psr_set_l3_cbm(d, domctl->u.psr_cat_op.target,
> + domctl->u.psr_cat_op.data);
> + break;
> +
> + case XEN_DOMCTL_PSR_CAT_OP_GET_L3_CBM:
> + ret = psr_get_l3_cbm(d, domctl->u.psr_cat_op.target,
> + &domctl->u.psr_cat_op.data);
> + copyback = 1;
> + break;
> +
> + default:
> + ret = -EOPNOTSUPP;
> + break;
> + }
> + break;
> +
> default:
> ret = iommu_do_domctl(domctl, d, u_domctl);
> break;
> diff --git a/xen/arch/x86/psr.c b/xen/arch/x86/psr.c
> index d784efb..2b08269 100644
> --- a/xen/arch/x86/psr.c
> +++ b/xen/arch/x86/psr.c
> @@ -32,6 +32,7 @@ struct psr_cat_socket_info {
> unsigned int cbm_len;
> unsigned int cos_max;
> struct psr_cat_cbm *cos_to_cbm;
> + spinlock_t cbm_lock;
> };
>
> struct psr_assoc {
> @@ -47,6 +48,14 @@ static unsigned int opt_cos_max = 255;
> static uint64_t rmid_mask;
> static DEFINE_PER_CPU(struct psr_assoc, psr_assoc);
>
> +static unsigned int get_socket_cpu(unsigned int socket)
> +{
> + if ( socket < nr_sockets )
> + return cpumask_any(&socket_to_cpumask[socket]);
> +
> + return nr_cpu_ids;
> +}
> +
> static void __init parse_psr_bool(char *s, char *value, char *feature,
> unsigned int mask)
> {
> @@ -246,24 +255,148 @@ int psr_get_cat_l3_info(unsigned int socket, uint32_t
> *cbm_len,
> return 0;
> }
>
> +int psr_get_l3_cbm(struct domain *d, unsigned int socket, uint64_t *cbm)
> +{
> + unsigned int cos;
> + struct psr_cat_socket_info *info;
> + int ret = get_cat_socket_info(socket, &info);
> +
> + if ( ret )
> + return ret;
> +
> + cos = d->arch.psr_cos_ids[socket];
> + *cbm = info->cos_to_cbm[cos].cbm;
> + return 0;
> +}
> +
> +static bool_t psr_check_cbm(unsigned int cbm_len, uint64_t cbm)
> +{
> + unsigned int first_bit, zero_bit;
> +
> + /* Set bits should only in the range of [0, cbm_len). */
> + if ( cbm & (~0ull << cbm_len) )
> + return 0;
> +
> + first_bit = find_first_bit(&cbm, cbm_len);
> + zero_bit = find_next_zero_bit(&cbm, cbm_len, first_bit);
> +
> + /* Set bits should be contiguous. */
> + if ( zero_bit < cbm_len &&
> + find_next_bit(&cbm, cbm_len, zero_bit) < cbm_len )
> + return 0;
> +
> + return 1;
> +}
> +
> +struct cos_cbm_info
> +{
> + unsigned int cos;
> + uint64_t cbm;
> +};
> +
> +static void do_write_l3_cbm(void *data)
> +{
> + struct cos_cbm_info *info = data;
> +
> + wrmsrl(MSR_IA32_PSR_L3_MASK(info->cos), info->cbm);
> +}
> +
> +static int write_l3_cbm(unsigned int socket, unsigned int cos, uint64_t cbm)
> +{
> + struct cos_cbm_info info = { .cos = cos, .cbm = cbm };
> +
> + if ( socket == cpu_to_socket(smp_processor_id()) )
> + do_write_l3_cbm(&info);
> + else
> + {
> + unsigned int cpu = get_socket_cpu(socket);
> +
> + if ( cpu >= nr_cpu_ids )
> + return -EBADSLT;
> + on_selected_cpus(cpumask_of(cpu), do_write_l3_cbm, &info, 1);
> + }
> +
> + return 0;
> +}
> +
> +int psr_set_l3_cbm(struct domain *d, unsigned int socket, uint64_t cbm)
> +{
> + unsigned int old_cos, cos;
> + struct psr_cat_cbm *map, *find;
> + struct psr_cat_socket_info *info;
> + int ret = get_cat_socket_info(socket, &info);
> +
> + if ( ret )
> + return ret;
> +
> + if ( !psr_check_cbm(info->cbm_len, cbm) )
> + return -EINVAL;
> +
> + old_cos = d->arch.psr_cos_ids[socket];
> + map = info->cos_to_cbm;
> + find = NULL;
> +
> + for ( cos = 0; cos <= info->cos_max; cos++ )
> + {
> + /* If still not found, then keep unused one. */
> + if ( !find && cos != 0 && map[cos].ref == 0 )
> + find = map + cos;
> + else if ( map[cos].cbm == cbm )
> + {
> + if ( unlikely(cos == old_cos) )
> + return 0;
> + find = map + cos;
> + break;
> + }
> + }
> +
> + /* If old cos is referred only by the domain, then use it. */
> + if ( !find && map[old_cos].ref == 1 )
> + find = map + old_cos;
> +
> + if ( !find )
> + return -EUSERS;
> +
> + cos = find - map;
> + if ( find->cbm != cbm )
> + {
> + ret = write_l3_cbm(socket, cos, cbm);
> + if ( ret )
> + return ret;
> + find->cbm = cbm;
> + }
> +
> + spin_lock(&info->cbm_lock);
> + find->ref++;
> + map[old_cos].ref--;
> + spin_unlock(&info->cbm_lock);
The spinlock must cover read accesses as well, or old_cos is liable to
be stale by this point.
It might be better to split into a rw_lock as it is read often but
modifications should be very rare.
~Andrew
> +
> + d->arch.psr_cos_ids[socket] = cos;
> + return 0;
> +}
> +
> /* Called with domain lock held, no psr specific lock needed */
> static void psr_free_cos(struct domain *d)
> {
> unsigned int socket;
> unsigned int cos;
> + struct psr_cat_socket_info *info;
>
> if( !d->arch.psr_cos_ids )
> return;
>
> for ( socket = 0; socket < nr_sockets; socket++ )
> {
> - if ( !cat_socket_info[socket].enabled )
> + info = cat_socket_info + socket;
> + if ( !info->enabled )
> continue;
>
> if ( (cos = d->arch.psr_cos_ids[socket]) == 0 )
> continue;
>
> - cat_socket_info[socket].cos_to_cbm[cos].ref--;
> + spin_lock(&info->cbm_lock);
> + info->cos_to_cbm[cos].ref--;
> + spin_unlock(&info->cbm_lock);
> }
>
> xfree(d->arch.psr_cos_ids);
> @@ -323,6 +456,8 @@ static void cat_cpu_init(void)
> /* cos=0 is reserved as default cbm(all ones). */
> info->cos_to_cbm[0].cbm = (1ull << info->cbm_len) - 1;
>
> + spin_lock_init(&info->cbm_lock);
> +
> info->enabled = 1;
> printk(XENLOG_INFO "CAT: enabled on socket %u, cos_max:%u,
> cbm_len:%u\n",
> socket, info->cos_max, info->cbm_len);
> diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
> index 83f2f70..5425f77 100644
> --- a/xen/include/asm-x86/msr-index.h
> +++ b/xen/include/asm-x86/msr-index.h
> @@ -327,6 +327,7 @@
> #define MSR_IA32_CMT_EVTSEL 0x00000c8d
> #define MSR_IA32_CMT_CTR 0x00000c8e
> #define MSR_IA32_PSR_ASSOC 0x00000c8f
> +#define MSR_IA32_PSR_L3_MASK(n) (0x00000c90 + (n))
>
> /* Intel Model 6 */
> #define MSR_P6_PERFCTR(n) (0x000000c1 + (n))
> diff --git a/xen/include/asm-x86/psr.h b/xen/include/asm-x86/psr.h
> index 3a8a406..fb474bb 100644
> --- a/xen/include/asm-x86/psr.h
> +++ b/xen/include/asm-x86/psr.h
> @@ -54,6 +54,8 @@ void psr_ctxt_switch_to(struct domain *d);
>
> int psr_get_cat_l3_info(unsigned int socket, uint32_t *cbm_len,
> uint32_t *cos_max);
> +int psr_get_l3_cbm(struct domain *d, unsigned int socket, uint64_t *cbm);
> +int psr_set_l3_cbm(struct domain *d, unsigned int socket, uint64_t cbm);
>
> int psr_domain_init(struct domain *d);
> void psr_domain_free(struct domain *d);
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index d2e8db0..9337bb6 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -1001,6 +1001,16 @@ struct xen_domctl_psr_cmt_op {
> typedef struct xen_domctl_psr_cmt_op xen_domctl_psr_cmt_op_t;
> DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
>
> +struct xen_domctl_psr_cat_op {
> +#define XEN_DOMCTL_PSR_CAT_OP_SET_L3_CBM 0
> +#define XEN_DOMCTL_PSR_CAT_OP_GET_L3_CBM 1
> + uint32_t cmd; /* IN: XEN_DOMCTL_PSR_CAT_OP_* */
> + uint32_t target; /* IN: socket to be operated on */
> + uint64_t data; /* IN/OUT */
> +};
> +typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
> +
> struct xen_domctl {
> uint32_t cmd;
> #define XEN_DOMCTL_createdomain 1
> @@ -1075,6 +1085,7 @@ struct xen_domctl {
> #define XEN_DOMCTL_set_vcpu_msrs 73
> #define XEN_DOMCTL_setvnumainfo 74
> #define XEN_DOMCTL_psr_cmt_op 75
> +#define XEN_DOMCTL_psr_cat_op 76
> #define XEN_DOMCTL_gdbsx_guestmemio 1000
> #define XEN_DOMCTL_gdbsx_pausevcpu 1001
> #define XEN_DOMCTL_gdbsx_unpausevcpu 1002
> @@ -1137,6 +1148,7 @@ struct xen_domctl {
> struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
> struct xen_domctl_vnuma vnuma;
> struct xen_domctl_psr_cmt_op psr_cmt_op;
> + struct xen_domctl_psr_cat_op psr_cat_op;
> uint8_t pad[128];
> } u;
> };
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |