|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 2/6] microcode: save all microcodes which pass sanity check
... and search caches to find a suitable one when loading.
With this cache, the existing 'uci->mc' structure is redundent.
I deliberately avoid touching 'uci->mc' as I am going to remove
it completely in the next patch.
Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
---
xen/arch/x86/microcode.c | 2 +
xen/arch/x86/microcode_amd.c | 93 +++++++++++++++++++++++++++++++++++---
xen/arch/x86/microcode_intel.c | 99 ++++++++++++++++++++++++++++++++++++++---
xen/include/asm-x86/microcode.h | 11 +++++
4 files changed, 193 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/microcode.c b/xen/arch/x86/microcode.c
index 4163f50..4f2db88 100644
--- a/xen/arch/x86/microcode.c
+++ b/xen/arch/x86/microcode.c
@@ -61,6 +61,8 @@ static struct ucode_mod_blob __initdata ucode_blob;
*/
static bool_t __initdata ucode_scan;
+LIST_HEAD(microcode_cache);
+
void __init microcode_set_module(unsigned int idx)
{
ucode_mod_idx = idx;
diff --git a/xen/arch/x86/microcode_amd.c b/xen/arch/x86/microcode_amd.c
index fba44cc..a686a87 100644
--- a/xen/arch/x86/microcode_amd.c
+++ b/xen/arch/x86/microcode_amd.c
@@ -190,22 +190,90 @@ static bool_t microcode_fits(const struct microcode_amd
*mc_amd,
return 1;
}
+static struct ucode_patch *alloc_ucode_patch(struct microcode_amd *mc_amd)
+{
+ struct ucode_patch *ucode_patch = xmalloc(struct ucode_patch);
+ struct microcode_amd *cache = xmalloc(struct microcode_amd);
+ void *mpb = xmalloc_bytes(mc_amd->mpb_size);
+ struct equiv_cpu_entry *equiv_cpu_table =
+ xmalloc_bytes(mc_amd->equiv_cpu_table_size);
+
+ if ( !ucode_patch || !cache || !mpb || !equiv_cpu_table )
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(cache->equiv_cpu_table, mc_amd->equiv_cpu_table,
+ mc_amd->equiv_cpu_table_size);
+ memcpy(cache->mpb, mc_amd->mpb, mc_amd->mpb_size);
+ cache->equiv_cpu_table_size = mc_amd->equiv_cpu_table_size;
+ cache->mpb_size = mc_amd->mpb_size;
+ ucode_patch->data = cache;
+ return ucode_patch;
+}
+
+static void free_ucode_patch(struct ucode_patch *ucode_patch)
+{
+ struct microcode_amd *mc_amd = ucode_patch->data;
+
+ xfree(mc_amd->equiv_cpu_table);
+ xfree(mc_amd->mpb);
+ xfree(mc_amd);
+ xfree(ucode_patch);
+}
+
+/*
+ * save a micrcode to the cache list
+ * return 1: added successfully
+ * 0: replaced an existing entry
+ * -1: failed as a newer microcode was already cached
+ */
+static int save_patch(struct ucode_patch *new_patch)
+{
+ struct ucode_patch *ucode_patch;
+ struct microcode_amd *new_mc = new_patch->data;
+ struct microcode_header_amd *new_header = new_mc->mpb;
+
+ list_for_each_entry(ucode_patch, µcode_cache, list)
+ {
+ struct microcode_amd *old_mc = ucode_patch->data;
+ struct microcode_header_amd *old_header = old_mc->mpb;
+
+ if ( new_header->processor_rev_id == old_header->processor_rev_id )
+ {
+ if ( new_header->patch_id <= old_header->patch_id )
+ return -1;
+ list_replace(&ucode_patch->list, &new_patch->list);
+ free_ucode_patch(ucode_patch);
+ return 0;
+ }
+ }
+ list_add_tail(&new_patch->list, µcode_cache);
+ return 1;
+}
+
+static struct microcode_header_amd *find_patch(unsigned int cpu)
+{
+ struct ucode_patch *ucode_patch;
+
+ list_for_each_entry(ucode_patch, µcode_cache, list)
+ {
+ if ( microcode_fits(ucode_patch->data, cpu) )
+ return ((struct microcode_amd *)ucode_patch->data)->mpb;
+ }
+ return NULL;
+}
+
static int apply_microcode(unsigned int cpu)
{
unsigned long flags;
struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
uint32_t rev;
- struct microcode_amd *mc_amd = uci->mc.mc_amd;
struct microcode_header_amd *hdr;
int hw_err;
/* We should bind the task to the CPU */
BUG_ON(raw_smp_processor_id() != cpu);
- if ( mc_amd == NULL )
- return -EINVAL;
-
- hdr = mc_amd->mpb;
+ hdr = find_patch(cpu);
if ( hdr == NULL )
return -EINVAL;
@@ -491,6 +559,21 @@ static int cpu_request_microcode(unsigned int cpu, const
void *buf,
while ( (error = get_ucode_from_buffer_amd(mc_amd, buf, bufsize,
&offset)) == 0 )
{
+ struct ucode_patch *ucode_patch;
+
+ /*
+ * Save this microcode before checking the signature. It is to
+ * optimize microcode update on a mixed family system. Parsing
+ * microcode file is only done once on one of the CPUs, and
+ * during this process microcode cache is created. Other CPUs
+ * needn't parse the same micrcode file again and again.
+ * Instead, they just load the matched and latest microcode in
+ * the caches.
+ */
+ ucode_patch = alloc_ucode_patch(mc_amd);
+ if ( !IS_ERR_OR_NULL(ucode_patch) && (save_patch(ucode_patch) < 0) )
+ free_ucode_patch(ucode_patch);
+
if ( microcode_fits(mc_amd, cpu) )
{
error = apply_microcode(cpu);
diff --git a/xen/arch/x86/microcode_intel.c b/xen/arch/x86/microcode_intel.c
index 8d9a3b2..c4f812f 100644
--- a/xen/arch/x86/microcode_intel.c
+++ b/xen/arch/x86/microcode_intel.c
@@ -251,6 +251,42 @@ static int microcode_sanity_check(void *mc)
}
/*
+ * save a micrcode to the cache list
+ * return 1: added successfully
+ * 0: replaced an existing entry
+ * -1: failed as a newer microcode was already cached
+ */
+static int save_patch(struct ucode_patch *new_patch)
+{
+ void *mc;
+ struct ucode_patch *ucode_patch;
+
+ ASSERT(new_patch);
+
+ mc = new_patch->data;
+ list_for_each_entry(ucode_patch, µcode_cache, list)
+ {
+ struct microcode_header_intel *saved_header = ucode_patch->data;
+ int ret;
+
+ ret = microcode_update_match(mc, saved_header->sig, saved_header->pf,
+ saved_header->rev);
+ if ( ret == OLD_UCODE )
+ return -1;
+ if ( ret == MIS_UCODE )
+ continue;
+
+ list_replace(&ucode_patch->list, &new_patch->list);
+ xfree(ucode_patch->data);
+ xfree(ucode_patch);
+ return 0;
+ }
+
+ list_add_tail(&new_patch->list, µcode_cache);
+ return 1;
+}
+
+/*
* return 0 - no update found
* return 1 - found update
* return < 0 - error
@@ -261,6 +297,30 @@ static int get_matching_microcode(const void *mc, unsigned
int cpu)
const struct microcode_header_intel *mc_header = mc;
unsigned long total_size = get_totalsize(mc_header);
void *new_mc;
+ struct ucode_patch *ucode_patch = xmalloc(struct ucode_patch);
+ void *new_mc2 = xmalloc_bytes(total_size);
+
+ /*
+ * Save this microcode before checking the signature. It is to
+ * optimize microcode update on a mixed family system. Parsing
+ * microcode file is only done once on one of the CPUs, and
+ * during this process microcode cache is created. Other CPUs
+ * needn't parse the same micrcode file again and again.
+ * Instead, they just load the matched and latest microcode in
+ * the caches.
+ */
+ if ( !ucode_patch || !new_mc2 )
+ {
+ printk(KERN_ERR "microcode: error! Can not allocate memory\n");
+ return -ENOMEM;
+ }
+ memcpy(new_mc2, mc, total_size);
+ ucode_patch->data = new_mc2;
+ if ( save_patch(ucode_patch) < 0 )
+ {
+ xfree(new_mc2);
+ xfree(ucode_patch);
+ }
if ( microcode_update_match(mc, uci->cpu_sig.sig, uci->cpu_sig.pf,
uci->cpu_sig.rev) != NEW_UCODE )
@@ -282,6 +342,29 @@ static int get_matching_microcode(const void *mc, unsigned
int cpu)
return 1;
}
+static struct microcode_intel *find_patch(unsigned int cpu)
+{
+ int err;
+ struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
+ struct ucode_patch *ucode_patch;
+
+ err = collect_cpu_info(cpu, &uci->cpu_sig);
+ if ( unlikely(err) )
+ {
+ memset(uci, 0, sizeof(*uci));
+ return ERR_PTR(err);
+ }
+
+ list_for_each_entry(ucode_patch, µcode_cache, list)
+ {
+ int ret = microcode_update_match(ucode_patch->data, uci->cpu_sig.sig,
+ uci->cpu_sig.pf, uci->cpu_sig.rev);
+ if (ret == NEW_UCODE)
+ return ucode_patch->data;
+ }
+ return NULL;
+}
+
static int apply_microcode(unsigned int cpu)
{
unsigned long flags;
@@ -289,18 +372,20 @@ static int apply_microcode(unsigned int cpu)
unsigned int val[2];
unsigned int cpu_num = raw_smp_processor_id();
struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu_num);
+ struct microcode_intel *mc_intel;
/* We should bind the task to the CPU */
BUG_ON(cpu_num != cpu);
- if ( uci->mc.mc_intel == NULL )
+ mc_intel = find_patch(cpu);
+ if ( mc_intel == NULL )
return -EINVAL;
/* serialize access to the physical write to MSR 0x79 */
spin_lock_irqsave(µcode_update_lock, flags);
/* write microcode via MSR 0x79 */
- wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)uci->mc.mc_intel->bits);
+ wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc_intel->bits);
wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL);
/* As documented in the SDM: Do a CPUID 1 here */
@@ -311,19 +396,19 @@ static int apply_microcode(unsigned int cpu)
val[1] = (uint32_t)(msr_content >> 32);
spin_unlock_irqrestore(µcode_update_lock, flags);
- if ( val[1] != uci->mc.mc_intel->hdr.rev )
+ if ( val[1] != mc_intel->hdr.rev )
{
printk(KERN_ERR "microcode: CPU%d update from revision "
"%#x to %#x failed. Resulting revision is %#x.\n", cpu_num,
- uci->cpu_sig.rev, uci->mc.mc_intel->hdr.rev, val[1]);
+ uci->cpu_sig.rev, mc_intel->hdr.rev, val[1]);
return -EIO;
}
printk(KERN_INFO "microcode: CPU%d updated from revision "
"%#x to %#x, date = %04x-%02x-%02x \n",
cpu_num, uci->cpu_sig.rev, val[1],
- uci->mc.mc_intel->hdr.date & 0xffff,
- uci->mc.mc_intel->hdr.date >> 24,
- (uci->mc.mc_intel->hdr.date >> 16) & 0xff);
+ mc_intel->hdr.date & 0xffff,
+ mc_intel->hdr.date >> 24,
+ (mc_intel->hdr.date >> 16) & 0xff);
uci->cpu_sig.rev = val[1];
return 0;
diff --git a/xen/include/asm-x86/microcode.h b/xen/include/asm-x86/microcode.h
index 23ea954..0236425 100644
--- a/xen/include/asm-x86/microcode.h
+++ b/xen/include/asm-x86/microcode.h
@@ -1,6 +1,7 @@
#ifndef ASM_X86__MICROCODE_H
#define ASM_X86__MICROCODE_H
+#include <xen/list.h>
#include <xen/percpu.h>
struct cpu_signature;
@@ -30,7 +31,17 @@ struct ucode_cpu_info {
} mc;
};
+struct ucode_patch {
+ struct list_head list;
+ void *data;
+ uint32_t patch_id;
+ uint16_t equiv_cpu;
+ void * equiv_cpu_table;
+ size_t equiv_cpu_table_size;
+};
+
DECLARE_PER_CPU(struct ucode_cpu_info, ucode_cpu_info);
extern const struct microcode_ops *microcode_ops;
+extern struct list_head microcode_cache;
#endif /* ASM_X86__MICROCODE_H */
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |