|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 01/16] x86: change name of parameter for various invlpg functions
On 09/04/2018 05:15 PM, Wei Liu wrote:
> They all incorrectly named a parameter virtual address while it should
> have been linear address.
>
> Requested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
> Acked-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
> ---
> xen/arch/x86/hvm/svm/svm.c | 14 +++++++-------
> xen/arch/x86/hvm/vmx/vmx.c | 12 ++++++------
> xen/arch/x86/mm.c | 10 +++++-----
> xen/arch/x86/mm/hap/hap.c | 2 +-
> xen/arch/x86/mm/shadow/multi.c | 14 +++++++-------
> xen/arch/x86/mm/shadow/none.c | 2 +-
> xen/include/asm-x86/hvm/hvm.h | 6 +++---
> xen/include/asm-x86/hvm/svm/asid.h | 4 ++--
> xen/include/asm-x86/hvm/svm/svm.h | 4 ++--
> xen/include/asm-x86/paging.h | 3 ++-
> 10 files changed, 36 insertions(+), 35 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 0b06e2ff11..34d55b4938 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -2488,18 +2488,18 @@ static void svm_vmexit_do_invalidate_cache(struct
> cpu_user_regs *regs)
> }
>
> static void svm_invlpga_intercept(
> - struct vcpu *v, unsigned long vaddr, uint32_t asid)
> + struct vcpu *v, unsigned long linear, uint32_t asid)
> {
> - svm_invlpga(vaddr,
> + svm_invlpga(linear,
> (asid == 0)
> ? v->arch.hvm.n1asid.asid
> : vcpu_nestedhvm(v).nv_n2asid.asid);
> }
>
> -static void svm_invlpg_intercept(unsigned long vaddr)
> +static void svm_invlpg_intercept(unsigned long linear)
> {
> - HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
> - paging_invlpg(current, vaddr);
> + HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(linear));
> + paging_invlpg(current, linear);
> }
>
> static bool is_invlpg(const struct x86_emulate_state *state,
> @@ -2512,9 +2512,9 @@ static bool is_invlpg(const struct x86_emulate_state
> *state,
> (ext & 7) == 7;
> }
>
> -static void svm_invlpg(struct vcpu *v, unsigned long vaddr)
> +static void svm_invlpg(struct vcpu *v, unsigned long linear)
> {
> - svm_asid_g_invlpg(v, vaddr);
> + svm_asid_g_invlpg(v, linear);
> }
>
> static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index e926b0b28e..b2e1a28038 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -75,7 +75,7 @@ static void vmx_wbinvd_intercept(void);
> static void vmx_fpu_dirty_intercept(void);
> static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
> static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
> -static void vmx_invlpg(struct vcpu *v, unsigned long vaddr);
> +static void vmx_invlpg(struct vcpu *v, unsigned long linear);
>
> /* Values for domain's ->arch.hvm_domain.pi_ops.flags. */
> #define PI_CSW_FROM (1u << 0)
> @@ -2595,16 +2595,16 @@ static void vmx_dr_access(unsigned long
> exit_qualification,
> vmx_update_cpu_exec_control(v);
> }
>
> -static void vmx_invlpg_intercept(unsigned long vaddr)
> +static void vmx_invlpg_intercept(unsigned long linear)
> {
> - HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
> - paging_invlpg(current, vaddr);
> + HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(linear));
> + paging_invlpg(current, linear);
> }
>
> -static void vmx_invlpg(struct vcpu *v, unsigned long vaddr)
> +static void vmx_invlpg(struct vcpu *v, unsigned long linear)
> {
> if ( cpu_has_vmx_vpid )
> - vpid_sync_vcpu_gva(v, vaddr);
> + vpid_sync_vcpu_gva(v, linear);
> }
>
> static int vmx_vmfunc_intercept(struct cpu_user_regs *regs)
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 84979f28d5..409814ce0a 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5793,19 +5793,19 @@ const unsigned long *__init
> get_platform_badpages(unsigned int *array_size)
> return bad_pages;
> }
>
> -void paging_invlpg(struct vcpu *v, unsigned long va)
> +void paging_invlpg(struct vcpu *v, unsigned long linear)
> {
> - if ( !is_canonical_address(va) )
> + if ( !is_canonical_address(linear) )
> return;
>
> if ( paging_mode_enabled(v->domain) &&
> - !paging_get_hostmode(v)->invlpg(v, va) )
> + !paging_get_hostmode(v)->invlpg(v, linear) )
> return;
>
> if ( is_pv_vcpu(v) )
> - flush_tlb_one_local(va);
> + flush_tlb_one_local(linear);
> else
> - hvm_invlpg(v, va);
> + hvm_invlpg(v, linear);
> }
>
> /* Build a 32bit PSE page table using 4MB pages. */
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index c53d76cf69..3d651b94c3 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -650,7 +650,7 @@ static int hap_page_fault(struct vcpu *v, unsigned long
> va,
> * should not be intercepting it. However, we need to correctly handle
> * getting here from instruction emulation.
> */
> -static bool_t hap_invlpg(struct vcpu *v, unsigned long va)
> +static bool_t hap_invlpg(struct vcpu *v, unsigned long linear)
> {
> /*
> * Emulate INVLPGA:
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index 7bb6f47155..bba573ae87 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -3549,7 +3549,7 @@ propagate:
> * instruction should be issued on the hardware, or false if it's safe not
> * to do so.
> */
> -static bool sh_invlpg(struct vcpu *v, unsigned long va)
> +static bool sh_invlpg(struct vcpu *v, unsigned long linear)
> {
> mfn_t sl1mfn;
> shadow_l2e_t sl2e;
> @@ -3572,14 +3572,14 @@ static bool sh_invlpg(struct vcpu *v, unsigned long
> va)
> {
> shadow_l3e_t sl3e;
> if ( !(shadow_l4e_get_flags(
> - sh_linear_l4_table(v)[shadow_l4_linear_offset(va)])
> + sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)])
> & _PAGE_PRESENT) )
> return false;
> /* This must still be a copy-from-user because we don't have the
> * paging lock, and the higher-level shadows might disappear
> * under our feet. */
> if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
> - + shadow_l3_linear_offset(va)),
> + + shadow_l3_linear_offset(linear)),
> sizeof (sl3e)) != 0 )
> {
> perfc_incr(shadow_invlpg_fault);
> @@ -3589,7 +3589,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
> return false;
> }
> #else /* SHADOW_PAGING_LEVELS == 3 */
> - if (
> !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
> + if (
> !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(linear)])
> & _PAGE_PRESENT) )
> // no need to flush anything if there's no SL2...
> return false;
> @@ -3598,7 +3598,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
> /* This must still be a copy-from-user because we don't have the shadow
> * lock, and the higher-level shadows might disappear under our feet. */
> if ( __copy_from_user(&sl2e,
> - sh_linear_l2_table(v) +
> shadow_l2_linear_offset(va),
> + sh_linear_l2_table(v) +
> shadow_l2_linear_offset(linear),
> sizeof (sl2e)) != 0 )
> {
> perfc_incr(shadow_invlpg_fault);
> @@ -3642,7 +3642,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
> * feet. */
> if ( __copy_from_user(&sl2e,
> sh_linear_l2_table(v)
> - + shadow_l2_linear_offset(va),
> + + shadow_l2_linear_offset(linear),
> sizeof (sl2e)) != 0 )
> {
> perfc_incr(shadow_invlpg_fault);
> @@ -3664,7 +3664,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
> && page_is_out_of_sync(pg) ) )
> {
> shadow_l1e_t *sl1;
> - sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(va);
> + sl1 = sh_linear_l1_table(v) +
> shadow_l1_linear_offset(linear);
> /* Remove the shadow entry that maps this VA */
> (void) shadow_set_l1e(d, sl1, shadow_l1e_empty(),
> p2m_invalid, sl1mfn);
> diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c
> index a8c9604cdf..4de645a433 100644
> --- a/xen/arch/x86/mm/shadow/none.c
> +++ b/xen/arch/x86/mm/shadow/none.c
> @@ -37,7 +37,7 @@ static int _page_fault(struct vcpu *v, unsigned long va,
> return 0;
> }
>
> -static bool _invlpg(struct vcpu *v, unsigned long va)
> +static bool _invlpg(struct vcpu *v, unsigned long linear)
> {
> ASSERT_UNREACHABLE();
> return true;
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 132e62b4f6..6b0e088750 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -160,7 +160,7 @@ struct hvm_function_table {
>
> int (*event_pending)(struct vcpu *v);
> bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
> - void (*invlpg)(struct vcpu *v, unsigned long vaddr);
> + void (*invlpg)(struct vcpu *v, unsigned long linear);
>
> int (*cpu_up_prepare)(unsigned int cpu);
> void (*cpu_dead)(unsigned int cpu);
> @@ -454,9 +454,9 @@ static inline int hvm_event_pending(struct vcpu *v)
> return hvm_funcs.event_pending(v);
> }
>
> -static inline void hvm_invlpg(struct vcpu *v, unsigned long va)
> +static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
> {
> - hvm_funcs.invlpg(v, va);
> + hvm_funcs.invlpg(v, linear);
> }
>
> /* These bits in CR4 are owned by the host. */
> diff --git a/xen/include/asm-x86/hvm/svm/asid.h
> b/xen/include/asm-x86/hvm/svm/asid.h
> index 60cbb7b881..0e5ec3ab78 100644
> --- a/xen/include/asm-x86/hvm/svm/asid.h
> +++ b/xen/include/asm-x86/hvm/svm/asid.h
> @@ -25,11 +25,11 @@
> void svm_asid_init(const struct cpuinfo_x86 *c);
> void svm_asid_handle_vmrun(void);
>
> -static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
> +static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_linear)
> {
> #if 0
> /* Optimization? */
> - svm_invlpga(g_vaddr, v->arch.hvm.svm.vmcb->guest_asid);
> + svm_invlpga(g_linear, v->arch.hvm.svm.vmcb->guest_asid);
> #endif
>
> /* Safe fallback. Take a new ASID. */
> diff --git a/xen/include/asm-x86/hvm/svm/svm.h
> b/xen/include/asm-x86/hvm/svm/svm.h
> index 4e5e142910..8166046a6d 100644
> --- a/xen/include/asm-x86/hvm/svm/svm.h
> +++ b/xen/include/asm-x86/hvm/svm/svm.h
> @@ -40,13 +40,13 @@ static inline void svm_vmsave_pa(paddr_t vmcb)
> : : "a" (vmcb) : "memory" );
> }
>
> -static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
> +static inline void svm_invlpga(unsigned long linear, uint32_t asid)
> {
> asm volatile (
> ".byte 0x0f,0x01,0xdf"
> : /* output */
> : /* input */
> - "a" (vaddr), "c" (asid));
> + "a" (linear), "c" (asid));
> }
>
> unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
> diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
> index f440e3e53c..b51e1709d3 100644
> --- a/xen/include/asm-x86/paging.h
> +++ b/xen/include/asm-x86/paging.h
> @@ -110,7 +110,8 @@ struct shadow_paging_mode {
> struct paging_mode {
> int (*page_fault )(struct vcpu *v, unsigned long va,
> struct cpu_user_regs *regs);
> - bool (*invlpg )(struct vcpu *v, unsigned long
> va);
> + bool (*invlpg )(struct vcpu *v,
> + unsigned long linear);
> unsigned long (*gva_to_gfn )(struct vcpu *v,
> struct p2m_domain *p2m,
> unsigned long va,
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |