[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v8 14/16] x86/microcode: Synchronize late microcode loading
On 01.08.2019 12:22, Chao Gao wrote: > @@ -283,37 +345,105 @@ static int microcode_update_cpu(const struct > microcode_patch *patch) > return err; > } > > -static long do_microcode_update(void *patch) > +static int do_microcode_update(void *patch) > { > - unsigned int cpu; > + unsigned int cpu = smp_processor_id(); > + unsigned int cpu_nr = num_online_cpus(); > + int ret; > > - /* store the patch after a successful loading */ > - if ( !microcode_update_cpu(patch) && patch ) > + /* Mark loading an ucode is in progress */ > + cmpxchg(&loading_state, LOADING_EXITED, LOADING_ENTERED); > + cpumask_set_cpu(cpu, &cpu_callin_map); > + ret = wait_for_condition(wait_cpu_callin, (void *)(unsigned long)cpu_nr, > + MICROCODE_CALLIN_TIMEOUT_US); One more minor request - just like you do here, ... > + if ( ret ) > { > - spin_lock(µcode_mutex); > - microcode_update_cache(patch); > - spin_unlock(µcode_mutex); > - patch = NULL; > + cmpxchg(&loading_state, LOADING_ENTERED, LOADING_ABORTED); > + return ret; > } > > - if ( microcode_ops->end_update ) > - microcode_ops->end_update(); > + /* > + * Load microcode update on only one logical processor per core, or in > + * AMD's term, one core per compute unit. The one with the lowest thread > + * id among all siblings is chosen to perform the loading. > + */ > + if ( (cpu == cpumask_first(per_cpu(cpu_sibling_mask, cpu))) ) > + { > + static unsigned int panicked = 0; > + bool monitor; > + unsigned int done; > + unsigned long tick = 0; > > - cpu = cpumask_next(smp_processor_id(), &cpu_online_map); > - if ( cpu < nr_cpu_ids ) > - return continue_hypercall_on_cpu(cpu, do_microcode_update, patch); > + ret = microcode_ops->apply_microcode(patch); > + if ( !ret ) > + { > + unsigned int cpu2; > > - /* Free the patch if no CPU has loaded it successfully. */ > - if ( patch ) > - microcode_free_patch(patch); > + atomic_inc(&cpu_updated); > + /* Propagate revision number to all siblings */ > + for_each_cpu(cpu2, per_cpu(cpu_sibling_mask, cpu)) > + per_cpu(cpu_sig, cpu2).rev = this_cpu(cpu_sig).rev; > + } > > - return 0; > + /* > + * The first CPU reaching here will monitor the progress and emit > + * warning message if the duration is too long (e.g. >1 second). > + */ > + monitor = !atomic_inc_return(&cpu_out); > + if ( monitor ) > + tick = rdtsc_ordered(); > + > + /* Waiting for all cores or computing units finishing update */ > + done = atomic_read(&cpu_out); > + while ( panicked && done != nr_cores ) > + { > + /* > + * During each timeout interval, at least a CPU is expected to > + * finish its update. Otherwise, something goes wrong. > + * > + * Note that RDTSC (in wait_for_condition()) is safe for threads > to > + * execute while waiting for completion of loading an update. > + */ > + if ( wait_for_condition(&wait_cpu_callout, > + (void *)(unsigned long)(done + 1), > + MICROCODE_UPDATE_TIMEOUT_US) && ... could you please omit the redundant & on the first argument here? Jan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |