[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/3] x86: drop is_pv_32on64_vcpu()
... as being identical to is_pv_32bit_vcpu() after the x86-32 removal. In a few cases this includes an additional is_pv_32bit_vcpu() -> is_pv_32bit_domain() conversion. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -1339,7 +1339,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m mctelem_cookie_t cookie = ID2COOKIE(mc_fetch.nat->fetch_id); mctelem_ack(which, cookie); } else { - if (!is_pv_32on64_vcpu(v) + if (!is_pv_32bit_vcpu(v) ? guest_handle_is_null(mc_fetch.nat->data) : compat_handle_is_null(mc_fetch.cmp->data)) return x86_mcerr("do_mca fetch: guest buffer " @@ -1347,7 +1347,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m if ((mctc = mctelem_consume_oldest_begin(which))) { struct mc_info *mcip = mctelem_dataptr(mctc); - if (!is_pv_32on64_vcpu(v) + if (!is_pv_32bit_vcpu(v) ? copy_to_guest(mc_fetch.nat->data, mcip, 1) : copy_to_compat(mc_fetch.cmp->data, mcip, 1)) { @@ -1378,7 +1378,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m mc_physcpuinfo.nat = &op->u.mc_physcpuinfo; nlcpu = num_online_cpus(); - if (!is_pv_32on64_vcpu(v) + if (!is_pv_32bit_vcpu(v) ? !guest_handle_is_null(mc_physcpuinfo.nat->info) : !compat_handle_is_null(mc_physcpuinfo.cmp->info)) { if (mc_physcpuinfo.nat->ncpus <= 0) @@ -1389,7 +1389,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m if (log_cpus == NULL) return x86_mcerr("do_mca cpuinfo", -ENOMEM); on_each_cpu(do_mc_get_cpu_info, log_cpus, 1); - if (!is_pv_32on64_vcpu(v) + if (!is_pv_32bit_vcpu(v) ? copy_to_guest(mc_physcpuinfo.nat->info, log_cpus, nlcpu) : copy_to_compat(mc_physcpuinfo.cmp->info, --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -496,7 +496,7 @@ int vcpu_initialise(struct vcpu *v) void vcpu_destroy(struct vcpu *v) { - if ( is_pv_32on64_vcpu(v) ) + if ( is_pv_32bit_vcpu(v) ) release_compat_l4(v); vcpu_destroy_fpu(v); @@ -1705,7 +1705,7 @@ unsigned long hypercall_create_continuat curr->arch.hvm_vcpu.hcall_preempted = 1; if ( is_pv_vcpu(curr) ? - !is_pv_32on64_vcpu(curr) : + !is_pv_32bit_vcpu(curr) : curr->arch.hvm_vcpu.hcall_64bit ) { for ( i = 0; *p != '\0'; i++ ) --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -2652,7 +2652,7 @@ int vcpu_destroy_pagetables(struct vcpu if ( rc ) return rc; - if ( is_pv_32on64_vcpu(v) ) + if ( is_pv_32bit_vcpu(v) ) { l4tab = map_domain_page(mfn); mfn = l4e_get_pfn(*l4tab); --- a/xen/arch/x86/trace.c +++ b/xen/arch/x86/trace.c @@ -11,7 +11,7 @@ void __trace_hypercall_entry(void) struct cpu_user_regs *regs = guest_cpu_user_regs(); unsigned long args[6]; - if ( is_pv_32on64_vcpu(current) ) + if ( is_pv_32bit_vcpu(current) ) { args[0] = regs->ebx; args[1] = regs->ecx; @@ -36,7 +36,7 @@ void __trace_hypercall_entry(void) void __trace_pv_trap(int trapnr, unsigned long eip, int use_error_code, unsigned error_code) { - if ( is_pv_32on64_vcpu(current) ) + if ( is_pv_32bit_vcpu(current) ) { struct __packed { unsigned eip:32, @@ -77,7 +77,7 @@ void __trace_pv_page_fault(unsigned long { unsigned long eip = guest_cpu_user_regs()->eip; - if ( is_pv_32on64_vcpu(current) ) + if ( is_pv_32bit_vcpu(current) ) { struct __packed { u32 eip, addr, error_code; @@ -108,7 +108,7 @@ void __trace_pv_page_fault(unsigned long void __trace_trap_one_addr(unsigned event, unsigned long va) { - if ( is_pv_32on64_vcpu(current) ) + if ( is_pv_32bit_vcpu(current) ) { u32 d = va; __trace_var(event, 1, sizeof(d), &d); @@ -123,7 +123,7 @@ void __trace_trap_one_addr(unsigned even void __trace_trap_two_addr(unsigned event, unsigned long va1, unsigned long va2) { - if ( is_pv_32on64_vcpu(current) ) + if ( is_pv_32bit_vcpu(current) ) { struct __packed { u32 va1, va2; @@ -156,7 +156,7 @@ void __trace_ptwr_emulation(unsigned lon * cases, "unsigned long" is the size of a guest virtual address. */ - if ( is_pv_32on64_vcpu(current) ) + if ( is_pv_32bit_vcpu(current) ) { struct __packed { l1_pgentry_t pte; --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -124,7 +124,7 @@ static void show_guest_stack(struct vcpu if ( is_hvm_vcpu(v) ) return; - if ( is_pv_32on64_vcpu(v) ) + if ( is_pv_32bit_vcpu(v) ) { compat_show_guest_stack(v, regs, debug_stack_lines); return; @@ -2382,7 +2382,7 @@ static int emulate_privileged_op(struct { unsigned long mfn; - if ( !is_pv_32on64_vcpu(v) ) + if ( !is_pv_32bit_domain(currd) ) { mfn = pagetable_get_pfn(v->arch.guest_table); *reg = xen_pfn_to_cr3(mfn_to_gmfn(currd, mfn)); @@ -2452,7 +2452,7 @@ static int emulate_privileged_op(struct unsigned long gfn; struct page_info *page; - gfn = !is_pv_32on64_vcpu(v) + gfn = !is_pv_32bit_domain(currd) ? xen_cr3_to_pfn(*reg) : compat_cr3_to_pfn(*reg); page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC); if ( page ) @@ -2504,19 +2504,19 @@ static int emulate_privileged_op(struct switch ( regs->_ecx ) { case MSR_FS_BASE: - if ( is_pv_32on64_vcpu(v) ) + if ( is_pv_32bit_domain(currd) ) goto fail; wrfsbase(msr_content); v->arch.pv_vcpu.fs_base = msr_content; break; case MSR_GS_BASE: - if ( is_pv_32on64_vcpu(v) ) + if ( is_pv_32bit_domain(currd) ) goto fail; wrgsbase(msr_content); v->arch.pv_vcpu.gs_base_kernel = msr_content; break; case MSR_SHADOW_GS_BASE: - if ( is_pv_32on64_vcpu(v) ) + if ( is_pv_32bit_domain(currd) ) goto fail; if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) ) goto fail; @@ -2675,18 +2675,18 @@ static int emulate_privileged_op(struct switch ( regs->_ecx ) { case MSR_FS_BASE: - if ( is_pv_32on64_vcpu(v) ) + if ( is_pv_32bit_domain(currd) ) goto fail; val = cpu_has_fsgsbase ? __rdfsbase() : v->arch.pv_vcpu.fs_base; goto rdmsr_writeback; case MSR_GS_BASE: - if ( is_pv_32on64_vcpu(v) ) + if ( is_pv_32bit_domain(currd) ) goto fail; val = cpu_has_fsgsbase ? __rdgsbase() : v->arch.pv_vcpu.gs_base_kernel; goto rdmsr_writeback; case MSR_SHADOW_GS_BASE: - if ( is_pv_32on64_vcpu(v) ) + if ( is_pv_32bit_domain(currd) ) goto fail; val = v->arch.pv_vcpu.gs_base_user; goto rdmsr_writeback; @@ -3201,7 +3201,7 @@ void do_general_protection(struct cpu_us return; } } - else if ( is_pv_32on64_vcpu(v) && regs->error_code ) + else if ( is_pv_32bit_vcpu(v) && regs->error_code ) { emulate_gate_op(regs); return; --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -15,7 +15,6 @@ #define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv) #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain)) #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d)) -#define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain)) #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \ d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) --- a/xen/include/asm-x86/ldt.h +++ b/xen/include/asm-x86/ldt.h @@ -15,7 +15,7 @@ static inline void load_LDT(struct vcpu } else { - desc = (!is_pv_32on64_vcpu(v) + desc = (!is_pv_32bit_vcpu(v) ? this_cpu(gdt_table) : this_cpu(compat_gdt_table)) + LDT_ENTRY - FIRST_RESERVED_GDT_ENTRY; _set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, SYS_DESC_ldt); --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -285,7 +285,7 @@ static inline int tmem_get_tmemop_from_c #ifdef CONFIG_COMPAT if ( has_hvm_container_vcpu(current) ? hvm_guest_x86_mode(current) != 8 : - is_pv_32on64_vcpu(current) ) + is_pv_32bit_vcpu(current) ) { int rc; enum XLAT_tmem_op_u u; Attachment:
x86-drop-is_32on64_vcpu.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |