[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 5/5] x86: don't build unused entry code when !PV32



On 28.12.2020 16:30, Roger Pau Monné wrote:
> On Wed, Nov 25, 2020 at 09:51:33AM +0100, Jan Beulich wrote:
>> --- a/xen/arch/x86/x86_64/compat/entry.S
>> +++ b/xen/arch/x86/x86_64/compat/entry.S
>> @@ -29,8 +29,6 @@ ENTRY(entry_int82)
>>          mov   %rsp, %rdi
>>          call  do_entry_int82
>>  
>> -#endif /* CONFIG_PV32 */
>> -
>>  /* %rbx: struct vcpu */
>>  ENTRY(compat_test_all_events)
>>          ASSERT_NOT_IN_ATOMIC
>> @@ -197,6 +195,8 @@ ENTRY(cr4_pv32_restore)
>>          xor   %eax, %eax
>>          ret
>>  
>> +#endif /* CONFIG_PV32 */
> 
> I've also wondered, it feels weird to add CONFIG_PV32 gates to the
> compat entry.S, since that's supposed to be only used when there's
> support for 32bit PV guests?
> 
> Wouldn't this file only get built when such support is enabled?

No. We need cstar_enter also for 64-bit guests' 32-bit
user space possibly making system calls via SYSCALL.

>> --- a/xen/arch/x86/x86_64/entry.S
>> +++ b/xen/arch/x86/x86_64/entry.S
>> @@ -328,8 +328,10 @@ UNLIKELY_END(sysenter_gpf)
>>          movq  VCPU_domain(%rbx),%rdi
>>          movq  %rax,TRAPBOUNCE_eip(%rdx)
>>          movb  %cl,TRAPBOUNCE_flags(%rdx)
>> +#ifdef CONFIG_PV32
>>          cmpb  $0, DOMAIN_is_32bit_pv(%rdi)
>>          jne   compat_sysenter
>> +#endif
>>          jmp   .Lbounce_exception
>>  
>>  ENTRY(int80_direct_trap)
>> @@ -370,6 +372,7 @@ UNLIKELY_END(msi_check)
>>          mov    0x80 * TRAPINFO_sizeof + TRAPINFO_eip(%rsi), %rdi
>>          movzwl 0x80 * TRAPINFO_sizeof + TRAPINFO_cs (%rsi), %ecx
>>  
>> +#ifdef CONFIG_PV32
>>          mov   %ecx, %edx
>>          and   $~3, %edx
>>  
>> @@ -378,6 +381,10 @@ UNLIKELY_END(msi_check)
>>  
>>          test  %rdx, %rdx
>>          jz    int80_slow_path
>> +#else
>> +        test  %rdi, %rdi
>> +        jz    int80_slow_path
>> +#endif
>>  
>>          /* Construct trap_bounce from trap_ctxt[0x80]. */
>>          lea   VCPU_trap_bounce(%rbx), %rdx
>> @@ -390,8 +397,10 @@ UNLIKELY_END(msi_check)
>>          lea   (, %rcx, TBF_INTERRUPT), %ecx
>>          mov   %cl, TRAPBOUNCE_flags(%rdx)
>>  
>> +#ifdef CONFIG_PV32
>>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>>          jne   compat_int80_direct_trap
>> +#endif
>>  
>>          call  create_bounce_frame
>>          jmp   test_all_events
>> @@ -541,12 +550,16 @@ ENTRY(dom_crash_sync_extable)
>>          GET_STACK_END(ax)
>>          leaq  STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
>>          # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
>> +#ifdef CONFIG_PV32
>>          movq  STACK_CPUINFO_FIELD(current_vcpu)(%rax), %rax
>>          movq  VCPU_domain(%rax),%rax
>>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>>          sete  %al
>>          leal  (%rax,%rax,2),%eax
>>          orb   %al,UREGS_cs(%rsp)
>> +#else
>> +        orb   $3, UREGS_cs(%rsp)
>> +#endif
>>          xorl  %edi,%edi
>>          jmp   asm_domain_crash_synchronous /* Does not return */
>>          .popsection
>> @@ -562,11 +575,15 @@ ENTRY(ret_from_intr)
>>          GET_CURRENT(bx)
>>          testb $3, UREGS_cs(%rsp)
>>          jz    restore_all_xen
>> +#ifdef CONFIG_PV32
>>          movq  VCPU_domain(%rbx), %rax
>>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>>          je    test_all_events
>>          jmp   compat_test_all_events
>>  #else
>> +        jmp   test_all_events
>> +#endif
>> +#else
>>          ASSERT_CONTEXT_IS_XEN
>>          jmp   restore_all_xen
>>  #endif
>> @@ -652,7 +669,7 @@ handle_exception_saved:
>>          testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
>>          jz    exception_with_ints_disabled
>>  
>> -#ifdef CONFIG_PV
>> +#if defined(CONFIG_PV32)
>>          ALTERNATIVE_2 "jmp .Lcr4_pv32_done", \
>>              __stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMEP, 
>> \
>>              __stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMAP
>> @@ -692,7 +709,7 @@ handle_exception_saved:
>>          test  $~(PFEC_write_access|PFEC_insn_fetch),%eax
>>          jz    compat_test_all_events
>>  .Lcr4_pv32_done:
>> -#else
>> +#elif !defined(CONFIG_PV)
>>          ASSERT_CONTEXT_IS_XEN
>>  #endif /* CONFIG_PV */
>>          sti
>> @@ -711,9 +728,11 @@ handle_exception_saved:
>>  #ifdef CONFIG_PV
>>          testb $3,UREGS_cs(%rsp)
>>          jz    restore_all_xen
>> +#ifdef CONFIG_PV32
>>          movq  VCPU_domain(%rbx),%rax
>>          cmpb  $0, DOMAIN_is_32bit_pv(%rax)
>>          jne   compat_test_all_events
>> +#endif
>>          jmp   test_all_events
>>  #else
>>          ASSERT_CONTEXT_IS_XEN
>> @@ -947,11 +966,16 @@ handle_ist_exception:
>>          je    1f
>>          movl  $EVENT_CHECK_VECTOR,%edi
>>          call  send_IPI_self
>> -1:      movq  VCPU_domain(%rbx),%rax
>> +1:
>> +#ifdef CONFIG_PV32
>> +        movq  VCPU_domain(%rbx),%rax
>>          cmpb  $0,DOMAIN_is_32bit_pv(%rax)
>>          je    restore_all_guest
>>          jmp   compat_restore_all_guest
>>  #else
>> +        jmp   restore_all_guest
>> +#endif
>> +#else
>>          ASSERT_CONTEXT_IS_XEN
>>          jmp   restore_all_xen
>>  #endif
> 
> I would like to have Andrew's opinion on this one (as you and him tend
> to modify more asm code than myself). There are quite a lot of
> addition to the assembly code, and IMO it makes the code more complex
> which I think we should try to avoid, as assembly is already hard
> enough.

Well, while I can see your point (and I indeed asked myself the same
question when making this change), this merely follows the route
started with the addition on CONFIG_PV conditionals. If we think that
prior step didn't set a good precedent, we ought to undo it.
Otherwise I see no good argument against doing the same kind of
transformation a 2nd time (and further ones, if need be down the
road).

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.