[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v8 10/19] x86/VPMU: Interface for setting PMU mode and flags



>>> On 01.07.14 at 16:37, <boris.ostrovsky@xxxxxxxxxx> wrote:
> @@ -274,3 +290,151 @@ void vpmu_dump(struct vcpu *v)
>          vpmu->arch_vpmu_ops->arch_vpmu_dump(v);
>  }
>  
> +static atomic_t vpmu_sched_counter;
> +static void vpmu_sched_checkin(unsigned long unused)

Blank line please between those two.

> +static int
> +vpmu_force_context_switch(XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg)
> +{
> +    unsigned i, j, allbutself_num;
> +    cpumask_t allbutself;
> +    s_time_t start;
> +    static struct tasklet *sync_task = NULL;

Pointless initializer.

> +    int ret = 0;
> +
> +    allbutself_num = num_online_cpus() - 1;
> +
> +    if ( sync_task ) // if true, we are in hypercall continuation

C++ style comment.

> +        goto cont_wait;
> +
> +    cpumask_andnot(&allbutself, &cpu_online_map,
> +                   cpumask_of(smp_processor_id()));
> +
> +    sync_task = xmalloc_array(struct tasklet, allbutself_num);
> +    if ( !sync_task )
> +    {
> +        printk("vpmu_unload_all: out of memory\n");

Neither this function nor the one it's a helper for is named that way.

> +        return -ENOMEM;
> +    }
> +
> +    for ( i = 0; i < allbutself_num; i++ )
> +        tasklet_init(&sync_task[i], vpmu_sched_checkin, 0);
> +
> +    atomic_set(&vpmu_sched_counter, 0);
> +
> +    j = 0;
> +    for_each_cpu ( i, &allbutself )
> +        tasklet_schedule_on_cpu(&sync_task[j++], i);
> +
> +    vpmu_save(current);
> +
> +cont_wait:

Labels (if needed at all) should be indented by at least one space.

> +    start = NOW();
> +    /*
> +     * Note that we may fail here if a CPU is hot-unplugged while we are
> +     * waiting. We will then time out.
> +     */
> +    while ( atomic_read(&vpmu_sched_counter) != allbutself_num )
> +    {
> +        /* Give up after 5 seconds */
> +        if ( NOW() > start + SECONDS(5) )
> +        {
> +            printk("vpmu_unload_all: failed to sync\n");
> +            ret = -EBUSY;
> +            break;
> +        }
> +        cpu_relax();
> +        if ( hypercall_preempt_check() )
> +            return hypercall_create_continuation(
> +                __HYPERVISOR_xenpmu_op, "ih", XENPMU_mode_set, arg);
> +    }

I wonder whether this is race free (wrt another CPU doing something
similar) and how you expect the 5s timeout above to ever be reached
(you're virtually guaranteed to get asked to preempt earlier).

> +
> +    for ( i = 0; i < allbutself_num; i++ )
> +        tasklet_kill(&sync_task[i]);
> +    xfree(sync_task);
> +    sync_task = NULL;
> +
> +    return ret;
> +}
> +
> +long do_xenpmu_op(int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg)
> +{
> +    int ret = -EINVAL;
> +    xen_pmu_params_t pmu_params;
> +
> +    switch ( op )
> +    {
> +    case XENPMU_mode_set:
> +    {
> +        static DEFINE_SPINLOCK(xenpmu_mode_lock);
> +        uint32_t current_mode;
> +
> +        if ( !is_control_domain(current->domain) )
> +            return -EPERM;
> +
> +        if ( copy_from_guest(&pmu_params, arg, 1) )
> +            return -EFAULT;
> +
> +        if ( pmu_params.val & ~XENPMU_MODE_ON )
> +            return -EINVAL;
> +
> +        if ( !spin_trylock(&xenpmu_mode_lock) )
> +            return -EAGAIN;

Wouldn't it be better for this to also set a continuation, rather than
having the caller do the retry?

> +
> +        current_mode = vpmu_mode;
> +        vpmu_mode = pmu_params.val;
> +
> +        if ( vpmu_mode == XENPMU_MODE_OFF )
> +        {
> +            /*
> +             * Make sure all (non-dom0) VCPUs have unloaded their VPMUs. This
> +             * can be achieved by having all physical processors go through
> +             * context_switch().
> +             */
> +            ret = vpmu_force_context_switch(arg);
> +            if ( ret )
> +                vpmu_mode = current_mode;
> +        }
> +        else
> +            ret = 0;
> +
> +        spin_unlock(&xenpmu_mode_lock);
> +        break;
> +    }
> +    case XENPMU_mode_get:

Blank line please before next case (just like properly done further
down).

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.