[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 4/9] x86/pv: Implement pv_hypercall() in C
On 02/08/16 14:12, Jan Beulich wrote: >>>> On 18.07.16 at 11:51, <andrew.cooper3@xxxxxxxxxx> wrote: >> +long pv_hypercall(struct cpu_user_regs *regs) >> +{ >> + struct vcpu *curr = current; >> +#ifndef NDEBUG >> + unsigned long old_rip = regs->rip; >> +#endif >> + long ret; >> + uint32_t eax = regs->eax; >> + >> + ASSERT(curr->arch.flags & TF_kernel_mode); > I'm afraid TF_kernel_mode can't be relied on for 32-bit guests, so > this needs to move into the if() below. > >> + if ( (eax >= NR_hypercalls) || !hypercall_table[eax] ) >> + return -ENOSYS; >> + >> + if ( !is_pv_32bit_vcpu(curr) ) >> + { >> + unsigned long rdi = regs->rdi; >> + unsigned long rsi = regs->rsi; >> + unsigned long rdx = regs->rdx; >> + unsigned long r10 = regs->r10; >> + unsigned long r8 = regs->r8; >> + unsigned long r9 = regs->r9; >> + >> +#ifndef NDEBUG >> + /* Deliberately corrupt parameter regs not used by this hypercall. >> */ >> + switch ( hypercall_args_table[eax] ) >> + { >> + case 0: rdi = 0xdeadbeefdeadf00dUL; >> + case 1: rsi = 0xdeadbeefdeadf00dUL; >> + case 2: rdx = 0xdeadbeefdeadf00dUL; >> + case 3: r10 = 0xdeadbeefdeadf00dUL; >> + case 4: r8 = 0xdeadbeefdeadf00dUL; >> + case 5: r9 = 0xdeadbeefdeadf00dUL; > Without comments, aren't these going to become 5 new Coverity > issues? > >> + } >> +#endif >> + if ( unlikely(tb_init_done) ) >> + { >> + unsigned long args[6] = { rdi, rsi, rdx, r10, r8, r9 }; >> + >> + __trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args); >> + } >> + >> + ret = hypercall_table[eax](rdi, rsi, rdx, r10, r8, r9); >> + >> +#ifndef NDEBUG >> + if ( regs->rip == old_rip ) >> + { >> + /* Deliberately corrupt parameter regs used by this hypercall. >> */ >> + switch ( hypercall_args_table[eax] ) >> + { >> + case 6: regs->r9 = 0xdeadbeefdeadf00dUL; >> + case 5: regs->r8 = 0xdeadbeefdeadf00dUL; >> + case 4: regs->r10 = 0xdeadbeefdeadf00dUL; >> + case 3: regs->edx = 0xdeadbeefdeadf00dUL; >> + case 2: regs->esi = 0xdeadbeefdeadf00dUL; >> + case 1: regs->edi = 0xdeadbeefdeadf00dUL; > For consistency with earlier code, lease use rdx, rsi, and rdi here. > >> +#ifndef NDEBUG >> + if ( regs->rip == old_rip ) >> + { >> + /* Deliberately corrupt parameter regs used by this hypercall. >> */ >> + switch ( compat_hypercall_args_table[eax] ) >> + { >> + case 6: regs->ebp = 0xdeadf00d; >> + case 5: regs->edi = 0xdeadf00d; >> + case 4: regs->esi = 0xdeadf00d; >> + case 3: regs->edx = 0xdeadf00d; >> + case 2: regs->ecx = 0xdeadf00d; >> + case 1: regs->ebx = 0xdeadf00d; > Please use 32-bit stores here. > >> --- a/xen/arch/x86/x86_64/compat/entry.S >> +++ b/xen/arch/x86/x86_64/compat/entry.S >> @@ -25,70 +25,10 @@ UNLIKELY_START(ne, msi_check) >> LOAD_C_CLOBBERED compat=1 ax=0 >> UNLIKELY_END(msi_check) >> >> - movl UREGS_rax(%rsp),%eax >> GET_CURRENT(bx) >> >> - cmpl $NR_hypercalls,%eax >> - jae compat_bad_hypercall >> -#ifndef NDEBUG >> - /* Deliberately corrupt parameter regs not used by this hypercall. >> */ >> - pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq >> %rdi >> - pushq UREGS_rbp+5*8(%rsp) >> - leaq compat_hypercall_args_table(%rip),%r10 >> - movl $6,%ecx >> - subb (%r10,%rax,1),%cl >> - movq %rsp,%rdi >> - movl $0xDEADBEEF,%eax >> - rep stosq >> - popq %r8 ; popq %r9 ; xchgl %r8d,%r9d /* Args 5&6: zero extend */ >> - popq %rdx; popq %rcx; xchgl %edx,%ecx /* Args 3&4: zero extend */ >> - popq %rdi; popq %rsi; xchgl %edi,%esi /* Args 1&2: zero extend */ >> - movl UREGS_rax(%rsp),%eax >> - pushq %rax >> - pushq UREGS_rip+8(%rsp) >> -#define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */ >> -#else >> - /* Relocate argument registers and zero-extend to 64 bits. */ >> - xchgl %ecx,%esi /* Arg 2, Arg 4 */ >> - movl %edx,%edx /* Arg 3 */ >> - movl %edi,%r8d /* Arg 5 */ >> - movl %ebp,%r9d /* Arg 6 */ >> - movl UREGS_rbx(%rsp),%edi /* Arg 1 */ >> -#define SHADOW_BYTES 0 /* No on-stack shadow state */ >> -#endif >> - cmpb $0,tb_init_done(%rip) >> -UNLIKELY_START(ne, compat_trace) >> - call __trace_hypercall_entry >> - /* Restore the registers that __trace_hypercall_entry clobbered. */ >> - movl UREGS_rax+SHADOW_BYTES(%rsp),%eax /* Hypercall # */ >> - movl UREGS_rbx+SHADOW_BYTES(%rsp),%edi /* Arg 1 */ >> - movl UREGS_rcx+SHADOW_BYTES(%rsp),%esi /* Arg 2 */ >> - movl UREGS_rdx+SHADOW_BYTES(%rsp),%edx /* Arg 3 */ >> - movl UREGS_rsi+SHADOW_BYTES(%rsp),%ecx /* Arg 4 */ >> - movl UREGS_rdi+SHADOW_BYTES(%rsp),%r8d /* Arg 5 */ >> - movl UREGS_rbp+SHADOW_BYTES(%rsp),%r9d /* Arg 6 */ >> -#undef SHADOW_BYTES >> -UNLIKELY_END(compat_trace) >> - leaq compat_hypercall_table(%rip),%r10 >> - PERFC_INCR(hypercalls, %rax, %rbx) >> - callq *(%r10,%rax,8) >> -#ifndef NDEBUG >> - /* Deliberately corrupt parameter regs used by this hypercall. */ >> - popq %r10 # Shadow RIP >> - cmpq %r10,UREGS_rip+8(%rsp) >> - popq %rcx # Shadow hypercall index >> - jne compat_skip_clobber /* If RIP has changed then don't clobber. >> */ >> - leaq compat_hypercall_args_table(%rip),%r10 >> - movb (%r10,%rcx,1),%cl >> - movl $0xDEADBEEF,%r10d >> - testb %cl,%cl; jz compat_skip_clobber; movl %r10d,UREGS_rbx(%rsp) >> - cmpb $2, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rcx(%rsp) >> - cmpb $3, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdx(%rsp) >> - cmpb $4, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rsi(%rsp) >> - cmpb $5, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdi(%rsp) >> - cmpb $6, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rbp(%rsp) >> -compat_skip_clobber: >> -#endif >> + mov %rsp, %rdi >> + call pv_hypercall >> movl %eax,UREGS_rax(%rsp) # save the return value > To follow the HVM model, this should also move into C. Having tried this, I can't. Using regs->eax = -ENOSYS; in C results in the upper 32bits of UREGS_rax set on the stack, and nothing else re-clobbers this. It highlights a second bug present in the hvm side, and propagated to the pv side. Currently, eax gets truncated when reading out of the registers, before it is bounds-checked against NR_hypercalls. For the HVM side, all this does is risk aliasing if upper bits are set, but for the PV side, it will cause a failure for a compat guest issuing a hypercall after previously receiving an error. I am proposing the following change to the HVM side to compensate, and to leave the asm adjustment of UREGS_rax in place. ~Andrew diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index e2bb58a..69315d1 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4132,11 +4132,11 @@ int hvm_do_hypercall(struct cpu_user_regs *regs) struct domain *currd = curr->domain; struct segment_register sreg; int mode = hvm_guest_x86_mode(curr); - uint32_t eax = regs->eax; + unsigned long eax = regs->eax; switch ( mode ) { _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |