On 25.09.14 at 21:28, <boris.ostrovsky@xxxxxxxxxx> wrote:
+static int vpmu_force_context_switch(void)
+{
+ unsigned i, j, allbutself_num, mycpu;
+ static s_time_t start, now;
I don't think "now" needs to be static. In fact it would perhaps better
be moved into the more narrow scope below.
+ struct tasklet **sync_task;
+ struct vcpu *curr_vcpu = current;
+ int ret = 0;
+
+ allbutself_num = num_online_cpus() - 1;
+
+ sync_task = xzalloc_array(struct tasklet *, allbutself_num);
So while the allocation further down got broken up properly, this
till degenerates to an order-greater-0 allocation for beyond 512
CPUs. I think you'd be better of (ab)using per-CPU data here.
+ if ( !sync_task )
+ {
+ printk(XENLOG_WARNING "vpmu_force_context_switch: out of memory\n");
+ return -ENOMEM;
+ }
+
+ for ( i = 0; i < allbutself_num; i++ )
+ {
+ sync_task[i] = xmalloc(struct tasklet);
+ if ( sync_task[i] == NULL )
+ {
+ printk(XENLOG_WARNING "vpmu_force_context_switch: out of
memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ tasklet_init(sync_task[i], vpmu_sched_checkin, 0);
+ }
+
+ atomic_set(&vpmu_sched_counter, 0);
+
+ j = 0;
+ mycpu = smp_processor_id();
+ for_each_online_cpu( i )
+ {
+ if ( i != mycpu )
+ tasklet_schedule_on_cpu(sync_task[j++], i);
+ }
+
+ vpmu_save(curr_vcpu);
+
+ start = NOW();
+
+ /*
+ * Note that we may fail here if a CPU is hot-plugged while we are
+ * waiting. We will then time out.
+ */
+ while ( atomic_read(&vpmu_sched_counter) != allbutself_num )
+ {
+ cpu_relax();
+
+ now = NOW();
+
+ /* Give up after 5 seconds */
+ if ( now > start + SECONDS(5) )
+ {
+ printk(XENLOG_WARNING
+ "vpmu_force_context_switch: failed to sync\n");
+ ret = -EBUSY;
+ break;
+ }
+
+ /* Or after 2 milliseconds if need to be preempted */
/* Or after 2 ms (arbitrary value) if need to be preempted. */
+ if ( (now > start + MILLISECS(2)) && hypercall_preempt_check() )
+ {
+ ret = -EAGAIN;
+ break;
+ }
And then this won't do what (I hope) you want on any continuation:
"start" doesn't get updated again in that case.