[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v1 3/6] x86: Enable Supervisor Mode Execution Prevention (SMAP) for Xen
> -----Original Message----- > From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx] > Sent: Tuesday, April 15, 2014 6:33 PM > To: Wu, Feng > Cc: JBeulich@xxxxxxxx; Ian.Campbell@xxxxxxxxxx; xen-devel@xxxxxxxxxxxxx; > Dong, Eddie; Nakajima, Jun > Subject: Re: [Xen-devel] [PATCH v1 3/6] x86: Enable Supervisor Mode Execution > Prevention (SMAP) for Xen > > On 15/04/14 14:01, Feng Wu wrote: > > Supervisor Mode Access Prevention (SMAP) is a new security > > feature disclosed by Intel, please refer to the following > > document: > > > > http://software.intel.com/sites/default/files/319433-014.pdf > > > > If CR4.SMAP = 1, supervisor-mode data accesses are not allowed > > to linear addresses that are accessible in user mode. If CPL < 3, > > SMAP protections are disabled if EFLAGS.AC = 1. If CPL = 3, SMAP > > applies to all supervisor-mode data accesses (these are implicit > > supervisor accesses) regardless of the value of EFLAGS.AC. > > > > This patch enables SMAP in Xen to prevent Xen hypervisor from > > accessing pv guest data, whose translation paging-structure > > entries' U/S flags are all set. > > > > Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx> > > --- > > xen/arch/x86/setup.c | 9 +++++++++ > > xen/arch/x86/traps.c | 34 > ++++++++++++++++++++++++++-------- > > xen/include/asm-x86/cpufeature.h | 1 + > > xen/include/asm-x86/domain.h | 6 ++++-- > > 4 files changed, 40 insertions(+), 10 deletions(-) > > > > diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c > > index e9c2c51..09c974d 100644 > > --- a/xen/arch/x86/setup.c > > +++ b/xen/arch/x86/setup.c > > @@ -61,6 +61,10 @@ integer_param("maxcpus", max_cpus); > > static bool_t __initdata disable_smep; > > invbool_param("smep", disable_smep); > > > > +/* smap: Enable/disable Supervisor Mode Access Prevention (default on). */ > > +static bool_t __initdata disable_smap; > > +invbool_param("smap", disable_smap); > > + > > Please use a positive boolean rather than negative. Convention is also > to prefix the variable with opt_ > > static bool_t __initdata opt_smap = 1; > boolean_param("smap", opt_smap); > Since SMAP is a similar feature compared to SMEP, here I just follow the existing code of SMEP. > > Also, please patch docs/misc/xen-command-line.markdown > > > /* **** Linux config option: propagated to domain0. */ > > /* "acpi=off": Sisables both ACPI table parsing and interpreter. */ > > /* "acpi=force": Override the disable blacklist. */ > > @@ -1279,6 +1283,11 @@ void __init noreturn __start_xen(unsigned long > mbi_p) > > if ( cpu_has_smep ) > > set_in_cr4(X86_CR4_SMEP); > > > > + if ( disable_smap ) > > + setup_clear_cpu_cap(X86_FEATURE_SMAP); > > + if ( cpu_has_smap ) > > + set_in_cr4(X86_CR4_SMAP); > > + > > if ( cpu_has_fsgsbase ) > > set_in_cr4(X86_CR4_FSGSBASE); > > > > diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c > > index ed4ae2d..9307f10 100644 > > --- a/xen/arch/x86/traps.c > > +++ b/xen/arch/x86/traps.c > > @@ -1182,11 +1182,12 @@ static int handle_gdt_ldt_mapping_fault( > > enum pf_type { > > real_fault, > > smep_fault, > > + smap_fault, > > spurious_fault > > }; > > > > static enum pf_type __page_fault_type( > > - unsigned long addr, unsigned int error_code) > > + unsigned long addr, struct cpu_user_regs *regs) > > { > > unsigned long mfn, cr3 = read_cr3(); > > l4_pgentry_t l4e, *l4t; > > @@ -1194,6 +1195,7 @@ static enum pf_type __page_fault_type( > > l2_pgentry_t l2e, *l2t; > > l1_pgentry_t l1e, *l1t; > > unsigned int required_flags, disallowed_flags, page_user; > > + unsigned int error_code = regs->error_code; > > > > /* > > * We do not take spurious page faults in IRQ handlers as we do not > > @@ -1270,11 +1272,26 @@ leaf: > > ((error_code & (PFEC_insn_fetch|PFEC_user_mode)) == > PFEC_insn_fetch) ) > > return smep_fault; > > > > + /* > > + * Supervisor Mode Access Prevention (SMAP): > > + * Disallow supervisor access user-accessible mappings > > + * A fault is considered as an SMAP violation if the following > > + * conditions are ture: > > 'true' > > > + * - X86_CR4_SMAP is set in CR4 > > + * - An user page is accessed > > + * - CPL=3 or X86_EFLAGS_AC is clear > > CPL < 3 surely? > > ~Andrew The comments here is correct. SMAP takes effect when: - CPL == 3 or - CPL <3 && AC bit is clear > > > + * - Page fault in kernel mode > > + */ > > + if ( (read_cr4() & X86_CR4_SMAP) && page_user && > > + !(((regs->cs & 0x03) < 3) && (regs->eflags & X86_EFLAGS_AC)) > && > > + !(error_code & PFEC_user_mode) ) > > + return smap_fault; > > + > > return spurious_fault; > > } > > > > static enum pf_type spurious_page_fault( > > - unsigned long addr, unsigned int error_code) > > + unsigned long addr, struct cpu_user_regs *regs) > > { > > unsigned long flags; > > enum pf_type pf_type; > > @@ -1284,7 +1301,7 @@ static enum pf_type spurious_page_fault( > > * page tables from becoming invalid under our feet during the walk. > > */ > > local_irq_save(flags); > > - pf_type = __page_fault_type(addr, error_code); > > + pf_type = __page_fault_type(addr, regs); > > local_irq_restore(flags); > > > > return pf_type; > > @@ -1379,8 +1396,8 @@ void do_page_fault(struct cpu_user_regs *regs) > > > > if ( unlikely(!guest_mode(regs)) ) > > { > > - pf_type = spurious_page_fault(addr, error_code); > > - BUG_ON(pf_type == smep_fault); > > + pf_type = spurious_page_fault(addr, regs); > > + BUG_ON((pf_type == smep_fault) || (pf_type == smap_fault)); > > if ( pf_type != real_fault ) > > return; > > > > @@ -1406,10 +1423,11 @@ void do_page_fault(struct cpu_user_regs *regs) > > > > if ( unlikely(current->domain->arch.suppress_spurious_page_faults) ) > > { > > - pf_type = spurious_page_fault(addr, error_code); > > - if ( pf_type == smep_fault ) > > + pf_type = spurious_page_fault(addr, regs); > > + if ( (pf_type == smep_fault) || (pf_type == smap_fault)) > > { > > - gdprintk(XENLOG_ERR, "Fatal SMEP fault\n"); > > + gdprintk(XENLOG_ERR, "Fatal %s fault\n", > > + (pf_type == smep_fault) ? "SMEP" : "SMAP"); > > domain_crash(current->domain); > > } > > if ( pf_type != real_fault ) > > diff --git a/xen/include/asm-x86/cpufeature.h > b/xen/include/asm-x86/cpufeature.h > > index 0c4d6c1..3ded6bd 100644 > > --- a/xen/include/asm-x86/cpufeature.h > > +++ b/xen/include/asm-x86/cpufeature.h > > @@ -186,6 +186,7 @@ > > #define cpu_has_fsgsbase boot_cpu_has(X86_FEATURE_FSGSBASE) > > > > #define cpu_has_smep boot_cpu_has(X86_FEATURE_SMEP) > > +#define cpu_has_smap boot_cpu_has(X86_FEATURE_SMAP) > > #define cpu_has_fpu_sel > (!boot_cpu_has(X86_FEATURE_NO_FPU_SEL)) > > > > #define cpu_has_ffxsr ((boot_cpu_data.x86_vendor == > X86_VENDOR_AMD) \ > > diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h > > index 4ff89f0..3b515f2 100644 > > --- a/xen/include/asm-x86/domain.h > > +++ b/xen/include/asm-x86/domain.h > > @@ -464,12 +464,14 @@ unsigned long pv_guest_cr4_fixup(const struct > vcpu *, unsigned long guest_cr4); > > (((v)->arch.pv_vcpu.ctrlreg[4] \ > > | (mmu_cr4_features \ > > & (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_SMEP | \ > > - X86_CR4_OSXSAVE | X86_CR4_FSGSBASE)) \ > > + X86_CR4_SMAP | X86_CR4_OSXSAVE | \ > > + X86_CR4_FSGSBASE)) \ > > | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)) \ > > & ~X86_CR4_DE) > > #define real_cr4_to_pv_guest_cr4(c) \ > > ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD | \ > > - X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE)) > > + X86_CR4_OSXSAVE | X86_CR4_SMEP | \ > > + X86_CR4_FSGSBASE | X86_CR4_SMAP)) > > > > void domain_cpuid(struct domain *d, > > unsigned int input, Thanks, Feng _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |