[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 05/21] x86/boot/slaunch_early: early TXT checks and boot data retrieval
From: Krystian Hebel <krystian.hebel@xxxxxxxxx> The tests validate that important parts of memory are protected against DMA attacks, including Xen and MBI. Modules can be tested later, when it is possible to report issues to a user before invoking TXT reset. TPM event log validation is temporarily disabled due to an issue with its allocation by bootloader (GRUB) which will need to be modified to address this. Ultimately event log will also have to be validated early as it is used immediately after these tests to hold MBI measurements. See larger comment in txt_verify_pmr_ranges(). Signed-off-by: Krystian Hebel <krystian.hebel@xxxxxxxxx> Signed-off-by: Sergii Dmytruk <sergii.dmytruk@xxxxxxxxx> --- xen/arch/x86/boot/slaunch_early.c | 6 ++ xen/arch/x86/include/asm/intel_txt.h | 111 +++++++++++++++++++++++++++ 2 files changed, 117 insertions(+) diff --git a/xen/arch/x86/boot/slaunch_early.c b/xen/arch/x86/boot/slaunch_early.c index 177267248f..af8aa29ae0 100644 --- a/xen/arch/x86/boot/slaunch_early.c +++ b/xen/arch/x86/boot/slaunch_early.c @@ -22,10 +22,13 @@ void slaunch_early_init(uint32_t load_base_addr, void *txt_heap; struct txt_os_mle_data *os_mle; struct slr_table *slrt; + struct txt_os_sinit_data *os_sinit; struct slr_entry_intel_info *intel_info; + uint32_t size = tgt_end_addr - tgt_base_addr; txt_heap = txt_init(); os_mle = txt_os_mle_data_start(txt_heap); + os_sinit = txt_os_sinit_data_start(txt_heap); result->slrt_pa = os_mle->slrt; result->mbi_pa = 0; @@ -38,4 +41,7 @@ void slaunch_early_init(uint32_t load_base_addr, return; result->mbi_pa = intel_info->boot_params_base; + + txt_verify_pmr_ranges(os_mle, os_sinit, intel_info, + load_base_addr, tgt_base_addr, size); } diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h index b973640c56..7170baf6fb 100644 --- a/xen/arch/x86/include/asm/intel_txt.h +++ b/xen/arch/x86/include/asm/intel_txt.h @@ -81,6 +81,8 @@ #ifndef __ASSEMBLY__ +#include <xen/slr_table.h> + /* Need to differentiate between pre- and post paging enabled. */ #ifdef __EARLY_SLAUNCH__ #include <xen/macros.h> @@ -285,4 +287,113 @@ static inline void *txt_init(void) return txt_heap; } +static inline int is_in_pmr(struct txt_os_sinit_data *os_sinit, uint64_t base, + uint32_t size, int check_high) +{ + /* Check for size overflow. */ + if ( base + size < base ) + txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW); + + /* Low range always starts at 0, so its size is also end address. */ + if ( base >= os_sinit->vtd_pmr_lo_base && + base + size <= os_sinit->vtd_pmr_lo_size ) + return 1; + + if ( check_high && os_sinit->vtd_pmr_hi_size != 0 ) + { + if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size < + os_sinit->vtd_pmr_hi_size ) + txt_reset(SLAUNCH_ERROR_INTEGER_OVERFLOW); + if ( base >= os_sinit->vtd_pmr_hi_base && + base + size <= os_sinit->vtd_pmr_hi_base + + os_sinit->vtd_pmr_hi_size ) + return 1; + } + + return 0; +} + +static inline void txt_verify_pmr_ranges(struct txt_os_mle_data *os_mle, + struct txt_os_sinit_data *os_sinit, + struct slr_entry_intel_info *info, + uint32_t load_base_addr, + uint32_t tgt_base_addr, + uint32_t xen_size) +{ + int check_high_pmr = 0; + + /* Verify the value of the low PMR base. It should always be 0. */ + if ( os_sinit->vtd_pmr_lo_base != 0 ) + txt_reset(SLAUNCH_ERROR_LO_PMR_BASE); + + /* + * Low PMR size should not be 0 on current platforms. There is an ongoing + * transition to TPR-based DMA protection instead of PMR-based; this is not + * yet supported by the code. + */ + if ( os_sinit->vtd_pmr_lo_size == 0 ) + txt_reset(SLAUNCH_ERROR_LO_PMR_SIZE); + + /* Check if regions overlap. Treat regions with no hole between as error. */ + if ( os_sinit->vtd_pmr_hi_size != 0 && + os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size ) + txt_reset(SLAUNCH_ERROR_HI_PMR_BASE); + + /* All regions accessed by 32b code must be below 4G. */ + if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <= + 0x100000000ull ) + check_high_pmr = 1; + + /* + * ACM checks that TXT heap and MLE memory is protected against DMA. We have + * to check if MBI and whole Xen memory is protected. The latter is done in + * case bootloader failed to set whole image as MLE and to make sure that + * both pre- and post-relocation code is protected. + */ + + /* Check if all of Xen before relocation is covered by PMR. */ + if ( !is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr) ) + txt_reset(SLAUNCH_ERROR_LO_PMR_MLE); + + /* Check if all of Xen after relocation is covered by PMR. */ + if ( load_base_addr != tgt_base_addr && + !is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr) ) + txt_reset(SLAUNCH_ERROR_LO_PMR_MLE); + + /* + * If present, check that MBI is covered by PMR. MBI starts with 'uint32_t + * total_size'. + */ + if ( info->boot_params_base != 0 && + !is_in_pmr(os_sinit, info->boot_params_base, + *(uint32_t *)(uintptr_t)info->boot_params_base, + check_high_pmr) ) + txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR); + + /* Check if TPM event log (if present) is covered by PMR. */ + /* + * FIXME: currently commented out as GRUB allocates it in a hole between + * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other + * easy-to-use DMA protection mechanisms that would allow to protect that + * part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but + * it still wouldn't be enough. + * + * One possible solution would be for GRUB to allocate log at lower address, + * but this would further increase memory space fragmentation. Another + * option is to align PMR up instead of down, making PMR cover part of + * reserved region, but it is unclear what the consequences may be. + * + * In tboot this issue was resolved by reserving leftover chunks of memory + * in e820 and/or UEFI memory map. This is also a valid solution, but would + * require more changes to GRUB than the ones listed above, as event log is + * allocated much earlier than PMRs. + */ + /* + if ( os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 && + !is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size, + check_high_pmr) ) + txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR); + */ +} + #endif /* __ASSEMBLY__ */ -- 2.49.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |