[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V2] x86, amd_ucode: Verify max allowed patch size before apply



Each family has a stipulated max patch_size. Use this as
additional sanity check before we apply it.

In situations where the 'patch level' of the last patch file of the
fw image that can 'fit' on the current processor is lower, i.e:

 if ( mc_header->patch_id <= uci->cpu_sig.rev )

then we can simply flush out ucode_blob or NULL-ify ucode_mod_map. This
saves bit of boot time when routines are invoked during 'microcode_init'
as we don't have to update anything.

To this end, this patch returns EEXIST when above situation occurs.

While at it, fix comment at very top to indicate we support ucode
patch loading from fam10h and higher.

Changes from V1-
 - modified code per Andrew, Jan comments
 - modified error handling around 'microcode_fits' to ensure there
   are no untoward causality issues regarding returned error value.

Signed-off-by: Aravind Gopalakrishnan <aravind.gopalakrishnan@xxxxxxx>
Reviewed-by: Tom Lendacky <Thomas.Lendacky@xxxxxxx>
Reviewed-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@xxxxxxx>
---
 xen/arch/x86/microcode_amd.c |   45 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 41 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/microcode_amd.c b/xen/arch/x86/microcode_amd.c
index b227173..49fbfda 100644
--- a/xen/arch/x86/microcode_amd.c
+++ b/xen/arch/x86/microcode_amd.c
@@ -8,7 +8,7 @@
  *  Tigran Aivazian <tigran@xxxxxxxxxxxxxxxxxxxx>
  *
  *  This driver allows to upgrade microcode on AMD
- *  family 0x10 and 0x11 processors.
+ *  family 0x10 and later.
  *
  *  Licensed unter the terms of the GNU General Public
  *  License version 2. See file COPYING for details.
@@ -94,7 +94,35 @@ static int collect_cpu_info(int cpu, struct cpu_signature 
*csig)
     return 0;
 }
 
-static bool_t microcode_fits(const struct microcode_amd *mc_amd, int cpu)
+static bool_t verify_patch_size(uint32_t patch_size)
+{
+    uint32_t max_size;
+
+#define F1XH_MPB_MAX_SIZE 2048
+#define F14H_MPB_MAX_SIZE 1824
+#define F15H_MPB_MAX_SIZE 4096
+#define F16H_MPB_MAX_SIZE 3458
+
+    switch (boot_cpu_data.x86)
+    {
+    case 0x14:
+        max_size = F14H_MPB_MAX_SIZE;
+        break;
+    case 0x15:
+        max_size = F15H_MPB_MAX_SIZE;
+        break;
+    case 0x16:
+        max_size = F16H_MPB_MAX_SIZE;
+        break;
+    default:
+        max_size = F1XH_MPB_MAX_SIZE;
+        break;
+    }
+
+    return (patch_size <= max_size);
+}
+
+static int microcode_fits(const struct microcode_amd *mc_amd, int cpu)
 {
     struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
     const struct microcode_header_amd *mc_header = mc_amd->mpb;
@@ -123,8 +151,17 @@ static bool_t microcode_fits(const struct microcode_amd 
*mc_amd, int cpu)
     if ( (mc_header->processor_rev_id) != equiv_cpu_id )
         return 0;
 
+    if ( !verify_patch_size(mc_amd->mpb_size) )
+    {
+        printk(XENLOG_DEBUG "microcode: patch size mismatch\n");
+        return -E2BIG;
+    }
+
     if ( mc_header->patch_id <= uci->cpu_sig.rev )
-        return 0;
+    {
+        printk(XENLOG_DEBUG "microcode: patch is already at required level or 
greater.\n");
+        return -EEXIST;
+    }
 
     printk(KERN_DEBUG "microcode: CPU%d found a matching microcode "
            "update with version %#x (current=%#x)\n",
@@ -319,7 +356,7 @@ static int cpu_request_microcode(int cpu, const void *buf, 
size_t bufsize)
     while ( (error = get_ucode_from_buffer_amd(mc_amd, buf, bufsize,
                                                &offset)) == 0 )
     {
-        if ( microcode_fits(mc_amd, cpu) )
+        if ( (error = microcode_fits(mc_amd, cpu)) > 0 )
         {
             error = apply_microcode(cpu);
             if ( error )
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.