[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 18/21] x86/boot: find MBI and SLRT on AMD



Use slr_entry_amd_info::boot_params_base on AMD with SKINIT to get MBI
location.

Another thing of interest is the location of SLRT which is bootloader's
data after SKL.

Signed-off-by: Krystian Hebel <krystian.hebel@xxxxxxxxx>
Signed-off-by: Sergii Dmytruk <sergii.dmytruk@xxxxxxxxx>
---
 xen/arch/x86/boot/head.S          | 38 ++++++++++++++++----
 xen/arch/x86/boot/slaunch_early.c | 58 +++++++++++++++++++++++++++++++
 2 files changed, 90 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S
index 419bf58d5c..3184b6883a 100644
--- a/xen/arch/x86/boot/head.S
+++ b/xen/arch/x86/boot/head.S
@@ -354,10 +354,12 @@ cs32_switch:
         jmp     *%edi
 
         /*
-         * Entry point for TrenchBoot Secure Launch on Intel TXT platforms.
+         * Entry point for TrenchBoot Secure Launch, common for Intel TXT and
+         * AMD Secure Startup, but state is slightly different.
          *
+         * On Intel:
          * CPU is in 32b protected mode with paging disabled. On entry:
-         * - %ebx = %eip = MLE entry point,
+         * - %ebx = %eip = this entry point,
          * - stack pointer is undefined,
          * - CS is flat 4GB code segment,
          * - DS, ES, SS, FS and GS are undefined according to TXT SDG, but this
@@ -375,13 +377,34 @@ cs32_switch:
          * - trying to enter real mode results in reset
          * - APs must be brought up by MONITOR or GETSEC[WAKEUP], depending on
          *   which is supported by a given SINIT ACM
+         *
+         * On AMD (as implemented by TrenchBoot's SKL):
+         * CPU is in 32b protected mode with paging disabled. On entry:
+         * - %ebx = %eip = this entry point,
+         * - %ebp holds base address of SKL
+         * - stack pointer is treated as undefined for parity with TXT,
+         * - CS is flat 4GB code segment,
+         * - DS, ES, SS are flat 4GB data segments, but treated as undefined 
for
+         *   parity with TXT.
+         *
+         * Additional restrictions:
+         * - interrupts (including NMIs and SMIs) are disabled and must be
+         *   enabled later
+         * - APs must be brought up by SIPI without an INIT
          */
 slaunch_stub_entry:
         /* Calculate the load base address. */
         mov     %ebx, %esi
         sub     $sym_offs(slaunch_stub_entry), %esi
 
-        /* Mark Secure Launch boot protocol and jump to common entry. */
+        /* On AMD, %ebp holds the base address of SLB, save it for later. */
+        mov     %ebp, %ebx
+
+        /*
+         * Mark Secure Launch boot protocol and jump to common entry. Note that
+         * all general purpose registers except %ebx and %esi are clobbered
+         * between here and .Lslaunch_proto.
+         */
         mov     $SLAUNCH_BOOTLOADER_MAGIC, %eax
         jmp     .Lset_stack
 
@@ -508,15 +531,18 @@ __start:
         sub     $8, %esp
 
         push    %esp                             /* pointer to output 
structure */
+        push    %ebx                             /* Slaunch parameter on AMD */
         lea     sym_offs(__2M_rwdata_end), %ecx  /* end of target image */
         lea     sym_offs(_start), %edx           /* target base address */
         mov     %esi, %eax                       /* load base address */
         /*
-         * slaunch_early_init(load/eax, tgt/edx, tgt_end/ecx, ret/stk) using
-         * fastcall calling convention.
+         * slaunch_early_init(load/eax, tgt/edx, tgt_end/ecx,
+         *                     slaunch/stk, ret/stk)
+         *
+         * Uses fastcall calling convention.
          */
         call    slaunch_early_init
-        add     $4, %esp                         /* pop the fourth parameter */
+        add     $8, %esp                         /* pop last two parameters */
 
         /* Move outputs of slaunch_early_init() from stack into registers. */
         pop     %eax  /* physical MBI address */
diff --git a/xen/arch/x86/boot/slaunch_early.c 
b/xen/arch/x86/boot/slaunch_early.c
index af8aa29ae0..d53faf8ab0 100644
--- a/xen/arch/x86/boot/slaunch_early.c
+++ b/xen/arch/x86/boot/slaunch_early.c
@@ -7,6 +7,20 @@
 #include <xen/slr_table.h>
 #include <xen/types.h>
 #include <asm/intel_txt.h>
+#include <asm/x86-vendors.h>
+
+/*
+ * The AMD-defined structure layout for the SLB. The last two fields are
+ * SL-specific.
+ */
+struct skinit_sl_header
+{
+    uint16_t skl_entry_point;
+    uint16_t length;
+    uint8_t reserved[62];
+    uint16_t skl_info_offset;
+    uint16_t bootloader_data_offset;
+} __packed;
 
 struct early_init_results
 {
@@ -14,9 +28,25 @@ struct early_init_results
     uint32_t slrt_pa;
 } __packed;
 
+static bool is_intel_cpu(void)
+{
+    /*
+     * asm/processor.h can't be included in early code, which means neither
+     * cpuid() function nor boot_cpu_data can be used here.
+     */
+    uint32_t eax, ebx, ecx, edx;
+    asm volatile ( "cpuid"
+          : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+          : "0" (0), "c" (0) );
+    return ebx == X86_VENDOR_INTEL_EBX
+        && ecx == X86_VENDOR_INTEL_ECX
+        && edx == X86_VENDOR_INTEL_EDX;
+}
+
 void slaunch_early_init(uint32_t load_base_addr,
                         uint32_t tgt_base_addr,
                         uint32_t tgt_end_addr,
+                        uint32_t slaunch_param,
                         struct early_init_results *result)
 {
     void *txt_heap;
@@ -26,6 +56,34 @@ void slaunch_early_init(uint32_t load_base_addr,
     struct slr_entry_intel_info *intel_info;
     uint32_t size = tgt_end_addr - tgt_base_addr;
 
+    if ( !is_intel_cpu() )
+    {
+        /*
+         * Not an Intel CPU. Currently the only other option is AMD with SKINIT
+         * and secure-kernel-loader (SKL).
+         */
+        struct slr_entry_amd_info *amd_info;
+        const struct skinit_sl_header *sl_header = (void *)slaunch_param;
+
+        /*
+         * slaunch_param holds a physical address of SLB.
+         * Bootloader's data is SLRT.
+         */
+        result->slrt_pa = slaunch_param + sl_header->bootloader_data_offset;
+        result->mbi_pa = 0;
+
+        slrt = (struct slr_table *)(uintptr_t)result->slrt_pa;
+
+        amd_info = (struct slr_entry_amd_info *)
+            slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_AMD_INFO);
+        /* Basic checks only, SKL checked and consumed the rest. */
+        if ( amd_info == NULL || amd_info->hdr.size != sizeof(*amd_info) )
+            return;
+
+        result->mbi_pa = amd_info->boot_params_base;
+        return;
+    }
+
     txt_heap = txt_init();
     os_mle = txt_os_mle_data_start(txt_heap);
     os_sinit = txt_os_sinit_data_start(txt_heap);
-- 
2.49.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.