[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 2/6] x86: Temporary disable SMAP to legally access user pages in kernel mode



On 15/04/14 14:01, Feng Wu wrote:
> Use STAC/CLAC to temporary disable SMAP to allow legal accesses to
> user pages in kernel mode
>
> Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
> ---
>  xen/arch/x86/clear_page.S           |  3 +++
>  xen/arch/x86/domain_build.c         | 16 ++++++++++++++++
>  xen/arch/x86/usercopy.c             |  6 ++++++
>  xen/arch/x86/x86_64/compat/entry.S  |  2 ++
>  xen/arch/x86/x86_64/entry.S         |  4 ++++
>  xen/include/asm-x86/uaccess.h       |  4 ++++
>  xen/include/asm-x86/x86_64/system.h |  2 ++
>  7 files changed, 37 insertions(+)
>
> diff --git a/xen/arch/x86/clear_page.S b/xen/arch/x86/clear_page.S
> index 96315ad..2ef4bb0 100644
> --- a/xen/arch/x86/clear_page.S
> +++ b/xen/arch/x86/clear_page.S
> @@ -1,9 +1,11 @@
>  #include <xen/config.h>
>  #include <asm/page.h>
> +#include <asm/asm_defns.h>
>  
>  #define ptr_reg %rdi
>  
>  ENTRY(clear_page_sse2)
> +        ASM_STAC
>          mov     $PAGE_SIZE/16, %ecx
>          xor     %eax,%eax
>  
> @@ -15,5 +17,6 @@ ENTRY(clear_page_sse2)
>          lea     16(ptr_reg), ptr_reg
>          jnz     0b
>  
> +        ASM_CLAC
>          sfence
>          ret
> diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
> index 84ce392..1ba138b 100644
> --- a/xen/arch/x86/domain_build.c
> +++ b/xen/arch/x86/domain_build.c
> @@ -789,8 +789,10 @@ int __init construct_dom0(
>              rc = -1;
>              goto out;
>          }
> +        stac();
>          hypercall_page_initialise(
>              d, (void *)(unsigned long)parms.virt_hypercall);
> +        clac();
>      }
>  
>      /* Free temporary buffers. */
> @@ -799,6 +801,7 @@ int __init construct_dom0(
>      /* Set up start info area. */
>      si = (start_info_t *)vstartinfo_start;
>      clear_page(si);
> +    stac();
>      si->nr_pages = nr_pages;
>  
>      si->shared_info = virt_to_maddr(d->shared_info);
> @@ -813,6 +816,7 @@ int __init construct_dom0(
>      snprintf(si->magic, sizeof(si->magic), "xen-3.0-x86_%d%s",
>               elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : "");
>  
> +    clac();
>      count = d->tot_pages;
>      l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
>      l3tab = NULL;
> @@ -956,16 +960,20 @@ int __init construct_dom0(
>          if ( pfn > REVERSE_START && (vinitrd_start || pfn < initrd_pfn) )
>              mfn = alloc_epfn - (pfn - REVERSE_START);
>  #endif
> +        stac();
>          if ( !is_pv_32on64_domain(d) )
>              ((unsigned long *)vphysmap_start)[pfn] = mfn;
>          else
>              ((unsigned int *)vphysmap_start)[pfn] = mfn;
> +        clac();
>          set_gpfn_from_mfn(mfn, pfn);
>          if (!(pfn & 0xfffff))
>              process_pending_softirqs();
>      }
> +    stac();
>      si->first_p2m_pfn = pfn;
>      si->nr_p2m_frames = d->tot_pages - count;
> +    clac();
>      page_list_for_each ( page, &d->page_list )
>      {
>          mfn = page_to_mfn(page);
> @@ -976,7 +984,9 @@ int __init construct_dom0(
>              if ( !page->u.inuse.type_info &&
>                   !get_page_and_type(page, d, PGT_writable_page) )
>                  BUG();
> +            stac();
>              ((unsigned long *)vphysmap_start)[pfn] = mfn;
> +            clac();
>              set_gpfn_from_mfn(mfn, pfn);
>              ++pfn;
>              if (!(pfn & 0xfffff))
> @@ -985,7 +995,9 @@ int __init construct_dom0(
>      }
>      BUG_ON(pfn != d->tot_pages);
>  #ifndef NDEBUG
> +    stac();
>      alloc_epfn += PFN_UP(initrd_len) + si->nr_p2m_frames;
> +    clac();

This could presumably be eliminated by keeping a stack copy of
si->nr_p2m_frames ?

~Andrew

>  #endif
>      while ( pfn < nr_pages )
>      {
> @@ -997,10 +1009,12 @@ int __init construct_dom0(
>  #ifndef NDEBUG
>  #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
>  #endif
> +            stac();
>              if ( !is_pv_32on64_domain(d) )
>                  ((unsigned long *)vphysmap_start)[pfn] = mfn;
>              else
>                  ((unsigned int *)vphysmap_start)[pfn] = mfn;
> +            clac();
>              set_gpfn_from_mfn(mfn, pfn);
>  #undef pfn
>              page++; pfn++;
> @@ -1009,6 +1023,7 @@ int __init construct_dom0(
>          }
>      }
>  
> +    stac();
>      if ( initrd_len != 0 )
>      {
>          si->mod_start = vinitrd_start ?: initrd_pfn;
> @@ -1024,6 +1039,7 @@ int __init construct_dom0(
>          si->console.dom0.info_off  = sizeof(struct start_info);
>          si->console.dom0.info_size = sizeof(struct dom0_vga_console_info);
>      }
> +    clac();
>  
>      if ( is_pv_32on64_domain(d) )
>          xlat_start_info(si, XLAT_start_info_console_dom0);
> diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
> index b79202b..5dd77db 100644
> --- a/xen/arch/x86/usercopy.c
> +++ b/xen/arch/x86/usercopy.c
> @@ -15,6 +15,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void 
> *from, unsigned n)
>      unsigned long __d0, __d1, __d2, __n = n;
>  
>      asm volatile (
> +             ASM_STAC"\n"
>          "    cmp  $"STR(2*BYTES_PER_LONG-1)",%0\n"
>          "    jbe  1f\n"
>          "    mov  %1,%0\n"
> @@ -30,6 +31,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void 
> *from, unsigned n)
>          "    mov  %3,%0\n"
>          "1:  rep movsb\n" /* ...remainder copied as bytes */
>          "2:\n"
> +             ASM_CLAC"\n"
>          ".section .fixup,\"ax\"\n"
>          "5:  add %3,%0\n"
>          "    jmp 2b\n"
> @@ -52,6 +54,7 @@ __copy_from_user_ll(void *to, const void __user *from, 
> unsigned n)
>      unsigned long __d0, __d1, __d2, __n = n;
>  
>      asm volatile (
> +             ASM_STAC"\n"
>          "    cmp  $"STR(2*BYTES_PER_LONG-1)",%0\n"
>          "    jbe  1f\n"
>          "    mov  %1,%0\n"
> @@ -67,6 +70,7 @@ __copy_from_user_ll(void *to, const void __user *from, 
> unsigned n)
>          "    mov  %3,%0\n"
>          "1:  rep; movsb\n" /* ...remainder copied as bytes */
>          "2:\n"
> +             ASM_CLAC"\n"
>          ".section .fixup,\"ax\"\n"
>          "5:  add %3,%0\n"
>          "    jmp 6f\n"
> @@ -114,10 +118,12 @@ copy_to_user(void __user *to, const void *from, 
> unsigned n)
>  do {                                                                 \
>       long __d0;                                                      \
>       __asm__ __volatile__(                                           \
> +                     ASM_STAC"\n"                                    \
>               "0:     rep; stosl\n"                                   \
>               "       movl %2,%0\n"                                   \
>               "1:     rep; stosb\n"                                   \
>               "2:\n"                                                  \
> +                     ASM_CLAC"\n"                                    \
>               ".section .fixup,\"ax\"\n"                              \
>               "3:     lea 0(%2,%0,4),%0\n"                            \
>               "       jmp 2b\n"                                       \
> diff --git a/xen/arch/x86/x86_64/compat/entry.S 
> b/xen/arch/x86/x86_64/compat/entry.S
> index 32b3bcc..b0fabf4 100644
> --- a/xen/arch/x86/x86_64/compat/entry.S
> +++ b/xen/arch/x86/x86_64/compat/entry.S
> @@ -265,6 +265,7 @@ ENTRY(compat_int80_direct_trap)
>  /* On return only %rbx and %rdx are guaranteed non-clobbered.            */
>  compat_create_bounce_frame:
>          ASSERT_INTERRUPTS_ENABLED
> +        ASM_STAC
>          mov   %fs,%edi
>          testb $2,UREGS_cs+8(%rsp)
>          jz    1f
> @@ -336,6 +337,7 @@ __UNLIKELY_END(compat_bounce_null_selector)
>          movl  %eax,UREGS_cs+8(%rsp)
>          movl  TRAPBOUNCE_eip(%rdx),%eax
>          movl  %eax,UREGS_rip+8(%rsp)
> +        ASM_CLAC
>          ret
>  .section .fixup,"ax"
>  .Lfx13:
> diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
> index 3ea4683..63ba795 100644
> --- a/xen/arch/x86/x86_64/entry.S
> +++ b/xen/arch/x86/x86_64/entry.S
> @@ -377,6 +377,7 @@ __UNLIKELY_END(create_bounce_frame_bad_sp)
>          movb  TRAPBOUNCE_flags(%rdx),%cl
>          subq  $40,%rsi
>          movq  UREGS_ss+8(%rsp),%rax
> +        ASM_STAC
>  .Lft2:  movq  %rax,32(%rsi)             # SS
>          movq  UREGS_rsp+8(%rsp),%rax
>  .Lft3:  movq  %rax,24(%rsi)             # RSP
> @@ -434,9 +435,11 @@ UNLIKELY_END(bounce_failsafe)
>          testq %rax,%rax
>  UNLIKELY_START(z, create_bounce_frame_bad_bounce_ip)
>          lea   
> UNLIKELY_DISPATCH_LABEL(create_bounce_frame_bad_bounce_ip)(%rip), %rdi
> +        ASM_CLAC
>          jmp   asm_domain_crash_synchronous  /* Does not return */
>  __UNLIKELY_END(create_bounce_frame_bad_bounce_ip)
>          movq  %rax,UREGS_rip+8(%rsp)
> +        ASM_CLAC
>          ret
>          _ASM_EXTABLE(.Lft2,  dom_crash_sync_extable)
>          _ASM_EXTABLE(.Lft3,  dom_crash_sync_extable)
> @@ -463,6 +466,7 @@ ENTRY(dom_crash_sync_extable)
>          leal  (%rax,%rax,2),%eax
>          orb   %al,UREGS_cs(%rsp)
>          xorl  %edi,%edi
> +        ASM_CLAC
>          jmp   asm_domain_crash_synchronous /* Does not return */
>  
>  /* No special register assumptions. */
> diff --git a/xen/include/asm-x86/uaccess.h b/xen/include/asm-x86/uaccess.h
> index 88b4ba2..ce1af4a 100644
> --- a/xen/include/asm-x86/uaccess.h
> +++ b/xen/include/asm-x86/uaccess.h
> @@ -147,8 +147,10 @@ struct __large_struct { unsigned long buf[100]; };
>   */
>  #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)    \
>       __asm__ __volatile__(                                           \
> +             ASM_STAC"\n"                                            \
>               "1:     mov"itype" %"rtype"1,%2\n"                      \
>               "2:\n"                                                  \
> +             ASM_CLAC"\n"                                            \
>               ".section .fixup,\"ax\"\n"                              \
>               "3:     mov %3,%0\n"                                    \
>               "       jmp 2b\n"                                       \
> @@ -159,8 +161,10 @@ struct __large_struct { unsigned long buf[100]; };
>  
>  #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)    \
>       __asm__ __volatile__(                                           \
> +             ASM_STAC"\n"                                            \
>               "1:     mov"itype" %2,%"rtype"1\n"                      \
>               "2:\n"                                                  \
> +             ASM_CLAC"\n"                                            \
>               ".section .fixup,\"ax\"\n"                              \
>               "3:     mov %3,%0\n"                                    \
>               "       xor"itype" %"rtype"1,%"rtype"1\n"               \
> diff --git a/xen/include/asm-x86/x86_64/system.h 
> b/xen/include/asm-x86/x86_64/system.h
> index 20f038b..b8394b9 100644
> --- a/xen/include/asm-x86/x86_64/system.h
> +++ b/xen/include/asm-x86/x86_64/system.h
> @@ -13,8 +13,10 @@
>   */
>  #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype)                 \
>      asm volatile (                                                      \
> +        ASM_STAC"\n"                                                    \
>          "1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n"                      \
>          "2:\n"                                                          \
> +        ASM_CLAC"\n"                                                    \
>          ".section .fixup,\"ax\"\n"                                      \
>          "3:     movl $1,%1\n"                                           \
>          "       jmp 2b\n"                                               \


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.