[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 3/4] x86/uaccess: rework user access speculative harden guards
The current guards to select whether user accesses should be speculative hardened violate Misra rule 20.7, as the UA_KEEP() macro doesn't (and can't) parenthesize the 'args' argument. Change the logic so the guard is implemented inside the assembly block using the .if assembly directive. No functional change intended. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- The guard check could be moved inside of the guest_access_mask_ptr macro, but given it's limited usages it's clearer to keep the check in the callers IMO. --- xen/arch/x86/include/asm/uaccess.h | 45 +++++++++++++++--------------- xen/arch/x86/usercopy.c | 26 ++++++++--------- 2 files changed, 35 insertions(+), 36 deletions(-) diff --git a/xen/arch/x86/include/asm/uaccess.h b/xen/arch/x86/include/asm/uaccess.h index 2d01669b9610..6b8150ac221a 100644 --- a/xen/arch/x86/include/asm/uaccess.h +++ b/xen/arch/x86/include/asm/uaccess.h @@ -24,9 +24,6 @@ unsigned int copy_from_unsafe_ll(void *to, const void *from, unsigned int n); void noreturn __get_user_bad(void); void noreturn __put_user_bad(void); -#define UA_KEEP(args...) args -#define UA_DROP(args...) - /** * get_guest: - Get a simple variable from guest space. * @x: Variable to store result. @@ -104,7 +101,7 @@ void noreturn __put_user_bad(void); #define put_unsafe(x, ptr) \ ({ \ int err_; \ - put_unsafe_size(x, ptr, sizeof(*(ptr)), UA_DROP, err_, -EFAULT);\ + put_unsafe_size(x, ptr, sizeof(*(ptr)), 0, err_, -EFAULT); \ err_; \ }) @@ -126,7 +123,7 @@ void noreturn __put_user_bad(void); #define get_unsafe(x, ptr) \ ({ \ int err_; \ - get_unsafe_size(x, ptr, sizeof(*(ptr)), UA_DROP, err_, -EFAULT);\ + get_unsafe_size(x, ptr, sizeof(*(ptr)), 0, err_, -EFAULT); \ err_; \ }) @@ -155,9 +152,9 @@ struct __large_struct { unsigned long buf[100]; }; */ #define put_unsafe_asm(x, addr, GUARD, err, itype, rtype, ltype, errret) \ __asm__ __volatile__( \ - GUARD( \ + ".if " STR(GUARD) "\n" \ " guest_access_mask_ptr %[ptr], %[scr1], %[scr2]\n" \ - ) \ + ".endif\n" \ "1: mov"itype" %"rtype"[val], (%[ptr])\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -165,16 +162,16 @@ struct __large_struct { unsigned long buf[100]; }; " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : [ret] "+r" (err), [ptr] "=&r" (dummy_) \ - GUARD(, [scr1] "=&r" (dummy_), [scr2] "=&r" (dummy_)) \ + : [ret] "+r" (err), [ptr] "=&r" (dummy_), \ + [scr1] "=&r" (dummy_), [scr2] "=&r" (dummy_) \ : [val] ltype (x), "m" (__m(addr)), \ "[ptr]" (addr), [errno] "i" (errret)) #define get_unsafe_asm(x, addr, GUARD, err, rtype, ltype, errret) \ __asm__ __volatile__( \ - GUARD( \ + ".if " STR(GUARD) "\n" \ " guest_access_mask_ptr %[ptr], %[scr1], %[scr2]\n" \ - ) \ + ".endif\n" \ "1: mov (%[ptr]), %"rtype"[val]\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -184,14 +181,15 @@ struct __large_struct { unsigned long buf[100]; }; ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ : [ret] "+r" (err), [val] ltype (x), \ - [ptr] "=&r" (dummy_) \ - GUARD(, [scr1] "=&r" (dummy_), [scr2] "=&r" (dummy_)) \ + [ptr] "=&r" (dummy_), \ + [scr1] "=&r" (dummy_), [scr2] "=&r" (dummy_) \ : "m" (__m(addr)), "[ptr]" (addr), \ [errno] "i" (errret)) #define put_unsafe_size(x, ptr, size, grd, retval, errret) \ do { \ retval = 0; \ + BUILD_BUG_ON((grd) != 0 && (grd) != 1); \ stac(); \ switch ( size ) \ { \ @@ -214,11 +212,12 @@ do { \ } while ( false ) #define put_guest_size(x, ptr, size, retval, errret) \ - put_unsafe_size(x, ptr, size, UA_KEEP, retval, errret) + put_unsafe_size(x, ptr, size, 1, retval, errret) #define get_unsafe_size(x, ptr, size, grd, retval, errret) \ do { \ retval = 0; \ + BUILD_BUG_ON((grd) != 0 && (grd) != 1); \ stac(); \ switch ( size ) \ { \ @@ -233,7 +232,7 @@ do { \ } while ( false ) #define get_guest_size(x, ptr, size, retval, errret) \ - get_unsafe_size(x, ptr, size, UA_KEEP, retval, errret) + get_unsafe_size(x, ptr, size, 1, retval, errret) /** * __copy_to_guest_pv: - Copy a block of data into guest space, with less @@ -333,16 +332,16 @@ copy_to_unsafe(void __user *to, const void *from, unsigned int n) switch (n) { case 1: - put_unsafe_size(*(const uint8_t *)from, to, 1, UA_DROP, ret, 1); + put_unsafe_size(*(const uint8_t *)from, to, 1, 0, ret, 1); return ret; case 2: - put_unsafe_size(*(const uint16_t *)from, to, 2, UA_DROP, ret, 2); + put_unsafe_size(*(const uint16_t *)from, to, 2, 0, ret, 2); return ret; case 4: - put_unsafe_size(*(const uint32_t *)from, to, 4, UA_DROP, ret, 4); + put_unsafe_size(*(const uint32_t *)from, to, 4, 0, ret, 4); return ret; case 8: - put_unsafe_size(*(const uint64_t *)from, to, 8, UA_DROP, ret, 8); + put_unsafe_size(*(const uint64_t *)from, to, 8, 0, ret, 8); return ret; } } @@ -374,16 +373,16 @@ copy_from_unsafe(void *to, const void __user *from, unsigned int n) switch ( n ) { case 1: - get_unsafe_size(*(uint8_t *)to, from, 1, UA_DROP, ret, 1); + get_unsafe_size(*(uint8_t *)to, from, 1, 0, ret, 1); return ret; case 2: - get_unsafe_size(*(uint16_t *)to, from, 2, UA_DROP, ret, 2); + get_unsafe_size(*(uint16_t *)to, from, 2, 0, ret, 2); return ret; case 4: - get_unsafe_size(*(uint32_t *)to, from, 4, UA_DROP, ret, 4); + get_unsafe_size(*(uint32_t *)to, from, 4, 0, ret, 4); return ret; case 8: - get_unsafe_size(*(uint64_t *)to, from, 8, UA_DROP, ret, 8); + get_unsafe_size(*(uint64_t *)to, from, 8, 0, ret, 8); return ret; } } diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c index 7ab2009efe4c..d66beecc5507 100644 --- a/xen/arch/x86/usercopy.c +++ b/xen/arch/x86/usercopy.c @@ -11,23 +11,23 @@ #include <asm/uaccess.h> #ifndef GUARD -# define GUARD UA_KEEP +# define GUARD 1 #endif unsigned int copy_to_guest_ll(void __user *to, const void *from, unsigned int n) { - GUARD(unsigned dummy); + unsigned __maybe_unused dummy; stac(); asm volatile ( - GUARD( + ".if " STR(GUARD) "\n" " guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n" - ) + ".endif\n" "1: rep movsb\n" "2:\n" _ASM_EXTABLE(1b, 2b) - : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from) - GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy)) + : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from), + [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy) :: "memory" ); clac(); @@ -40,9 +40,9 @@ unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned int stac(); asm volatile ( - GUARD( + ".if " STR(GUARD) "\n" " guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n" - ) + ".endif\n" "1: rep movsb\n" "2:\n" ".section .fixup,\"ax\"\n" @@ -56,15 +56,15 @@ unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned int ".previous\n" _ASM_EXTABLE(1b, 6b) : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from), - [aux] "=&r" (dummy) - GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy)) + [aux] "=&r" (dummy), + [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy) :: "memory" ); clac(); return n; } -#if GUARD(1) + 0 +#if GUARD /** * copy_to_guest_pv: - Copy a block of data into PV guest space. @@ -140,14 +140,14 @@ unsigned int copy_from_guest_pv(void *to, const void __user *from, } # undef GUARD -# define GUARD UA_DROP +# define GUARD 0 # define copy_to_guest_ll copy_to_unsafe_ll # define copy_from_guest_ll copy_from_unsafe_ll # undef __user # define __user # include __FILE__ -#endif /* GUARD(1) */ +#endif /* GUARD */ /* * Local variables: -- 2.46.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |