[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4] xen/sm{e, a}p: allow disabling sm{e, a}p for Xen itself
SMEP/SMAP is a security feature to prevent kernel executing/accessing user address involuntarily, any such behavior will lead to a page fault. SMEP/SMAP is open (in CR4) for both Xen and HVM guest in earlier code. SMEP/SMAP bit set in Xen CR4 would enforce security checking for 32-bit PV guest which will suffer unknown SMEP/SMAP page fault when guest kernel attempt to access user address although SMEP/SMAP is close for PV guests. This patch introduces a new boot option value "hvm" for "sm{e,a}p", it is going to diable SMEP/SMAP for Xen hypervisor while enable them for HVM. In this way, 32-bit PV guest will not suffer SMEP/SMAP security issue. Users can choose whether open SMEP/SMAP for Xen itself, especially when they are going to run 32-bit PV guests. Signed-off-by: He Chen <he.chen@xxxxxxxxxxxxxxx> --- Changes in v4: * introduce 2 new synthetic features X86_FEATURE_XEN_SMEP and X86_FEATURE_XEN_SMAP for Xen itself. * adjust SM{E,A}P related instruction patching code. * Commit message refinement. Changes in v3: * Fix boot options. * Fix CR4 & mmu_cr4_features operations. * Disable SMEP/SMAP for Dom0. * Commit message refinement. Changes in v2: * Allow "hvm" as a value to "smep" and "smap" command line options. * Clear SMEP/SMAP CPUID bits for pv guests if they are set to hvm only. * Refine docs. * Rewrite commit message. --- docs/misc/xen-command-line.markdown | 2 ++ xen/arch/x86/setup.c | 58 ++++++++++++++++++++++++++++++++----- xen/include/asm-x86/asm_defns.h | 10 +++---- xen/include/asm-x86/cpufeature.h | 5 +++- 4 files changed, 61 insertions(+), 14 deletions(-) diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown index 3a250cb..b15f3e7 100644 --- a/docs/misc/xen-command-line.markdown +++ b/docs/misc/xen-command-line.markdown @@ -1433,6 +1433,7 @@ Set the serial transmit buffer size. > Default: `true` Flag to enable Supervisor Mode Execution Protection +Use `smep=hvm` to enable SMEP for HVM guests only. ### smap > `= <boolean>` @@ -1440,6 +1441,7 @@ Flag to enable Supervisor Mode Execution Protection > Default: `true` Flag to enable Supervisor Mode Access Prevention +Use `smap=hvm` to enable SMAP for HVM guests only. ### snb\_igd\_quirk > `= <boolean> | cap | <integer>` diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 217c775..59238a4 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -62,12 +62,12 @@ static unsigned int __initdata max_cpus; integer_param("maxcpus", max_cpus); /* smep: Enable/disable Supervisor Mode Execution Protection (default on). */ -static bool_t __initdata opt_smep = 1; -boolean_param("smep", opt_smep); +static void parse_smep_param(char *s); +custom_param("smep", parse_smep_param); /* smap: Enable/disable Supervisor Mode Access Prevention (default on). */ -static bool_t __initdata opt_smap = 1; -boolean_param("smap", opt_smap); +static void parse_smap_param(char *s); +custom_param("smap", parse_smap_param); unsigned long __read_mostly cr4_pv32_mask; @@ -111,6 +111,48 @@ struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 0, 0, -1 }; unsigned long __read_mostly mmu_cr4_features = XEN_MINIMAL_CR4; +#define SMEP_HVM_ONLY (-1) +static int __initdata opt_smep = 1; +static void __init parse_smep_param(char *s) +{ + if ( !strcmp(s, "hvm") ) + { + opt_smep = SMEP_HVM_ONLY; + } + else if ( !parse_bool(s) ) + { + opt_smep = 0; + } + else if ( parse_bool(s) && opt_smep != SMEP_HVM_ONLY ) + { + opt_smep = 1; + } + + if ( opt_smep == 1 ) + __set_bit(X86_FEATURE_XEN_SMEP, boot_cpu_data.x86_capability); +} + +#define SMAP_HVM_ONLY (-1) +static int __initdata opt_smap = 1; +static void __init parse_smap_param(char *s) +{ + if ( !strcmp(s, "hvm") ) + { + opt_smap = SMAP_HVM_ONLY; + } + else if ( !parse_bool(s) ) + { + opt_smap = 0; + } + else if ( parse_bool(s) && opt_smap != SMAP_HVM_ONLY ) + { + opt_smap = 1; + } + + if ( opt_smap == 1 ) + __set_bit(X86_FEATURE_XEN_SMAP, boot_cpu_data.x86_capability); +} + bool_t __read_mostly acpi_disabled; bool_t __initdata acpi_force; static char __initdata acpi_param[10] = ""; @@ -1403,12 +1445,12 @@ void __init noreturn __start_xen(unsigned long mbi_p) if ( !opt_smep ) setup_clear_cpu_cap(X86_FEATURE_SMEP); - if ( cpu_has_smep ) + if ( cpu_has_xen_smep ) set_in_cr4(X86_CR4_SMEP); if ( !opt_smap ) setup_clear_cpu_cap(X86_FEATURE_SMAP); - if ( cpu_has_smap ) + if ( cpu_has_xen_smap ) set_in_cr4(X86_CR4_SMAP); cr4_pv32_mask = mmu_cr4_features & XEN_CR4_PV32_BITS; @@ -1550,7 +1592,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) * This saves a large number of corner cases interactions with * copy_from_user(). */ - if ( cpu_has_smap ) + if ( cpu_has_xen_smap ) { cr4_pv32_mask &= ~X86_CR4_SMAP; write_cr4(read_cr4() & ~X86_CR4_SMAP); @@ -1570,7 +1612,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) bootstrap_map, cmdline) != 0) panic("Could not set up DOM0 guest OS"); - if ( cpu_has_smap ) + if ( cpu_has_xen_smap ) { write_cr4(read_cr4() | X86_CR4_SMAP); cr4_pv32_mask |= X86_CR4_SMAP; diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h index e36e78f..f1c6fa1 100644 --- a/xen/include/asm-x86/asm_defns.h +++ b/xen/include/asm-x86/asm_defns.h @@ -205,7 +205,7 @@ void ret_from_intr(void); .popsection; \ .pushsection .altinstructions, "a"; \ altinstruction_entry 661b, 661b, X86_FEATURE_ALWAYS, 3, 0; \ - altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3; \ + altinstruction_entry 661b, 662b, X86_FEATURE_XEN_SMAP, 3, 3; \ .popsection #define ASM_STAC ASM_AC(STAC) @@ -217,21 +217,21 @@ void ret_from_intr(void); 668: call cr4_pv32_restore; \ .section .altinstructions, "a"; \ altinstruction_entry 667b, 667b, X86_FEATURE_ALWAYS, 5, 0; \ - altinstruction_entry 667b, 668b, X86_FEATURE_SMEP, 5, 5; \ - altinstruction_entry 667b, 668b, X86_FEATURE_SMAP, 5, 5; \ + altinstruction_entry 667b, 668b, X86_FEATURE_XEN_SMEP, 5, 5; \ + altinstruction_entry 667b, 668b, X86_FEATURE_XEN_SMAP, 5, 5; \ .popsection #else static always_inline void clac(void) { /* Note: a barrier is implicit in alternative() */ - alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP); + alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_XEN_SMAP); } static always_inline void stac(void) { /* Note: a barrier is implicit in alternative() */ - alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP); + alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_XEN_SMAP); } #endif diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h index bcdf5d6..a0b5b06 100644 --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -17,6 +17,8 @@ XEN_CPUFEATURE(CPUID_FAULTING, (FSCAPINTS+0)*32+ 6) /* cpuid faulting */ XEN_CPUFEATURE(CLFLUSH_MONITOR, (FSCAPINTS+0)*32+ 7) /* clflush reqd with monitor */ XEN_CPUFEATURE(APERFMPERF, (FSCAPINTS+0)*32+ 8) /* APERFMPERF */ XEN_CPUFEATURE(MFENCE_RDTSC, (FSCAPINTS+0)*32+ 9) /* MFENCE synchronizes RDTSC */ +XEN_CPUFEATURE(XEN_SMEP, (FSCAPINTS+0)*32+ 10) /* SMEP gets used by Xen itself */ +XEN_CPUFEATURE(XEN_SMAP, (FSCAPINTS+0)*32+ 11) /* SMAP gets used by Xen itself */ #define NCAPINTS (FSCAPINTS + 1) /* N 32-bit words worth of info */ @@ -67,7 +69,8 @@ XEN_CPUFEATURE(MFENCE_RDTSC, (FSCAPINTS+0)*32+ 9) /* MFENCE synchronizes RDTS #define cpu_has_fsgsbase boot_cpu_has(X86_FEATURE_FSGSBASE) #define cpu_has_aperfmperf boot_cpu_has(X86_FEATURE_APERFMPERF) #define cpu_has_smep boot_cpu_has(X86_FEATURE_SMEP) -#define cpu_has_smap boot_cpu_has(X86_FEATURE_SMAP) +#define cpu_has_xen_smep boot_cpu_has(X86_FEATURE_XEN_SMEP) +#define cpu_has_xen_smap boot_cpu_has(X86_FEATURE_XEN_SMAP) #define cpu_has_fpu_sel (!boot_cpu_has(X86_FEATURE_NO_FPU_SEL)) #define cpu_has_ffxsr ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) \ && boot_cpu_has(X86_FEATURE_FFXSR)) -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |