|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 2/6] x86/paging: add TLB flush hooks
> -----Original Message-----
> From: Xen-devel <xen-devel-bounces@xxxxxxxxxxxxxxxxxxxx> On Behalf Of Roger
> Pau Monne
> Sent: 03 March 2020 17:21
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Wei Liu <wl@xxxxxxx>; Andrew Cooper <andrew.cooper3@xxxxxxxxxx>; Durrant,
> Paul
> <pdurrant@xxxxxxxxxxxx>; Tim Deegan <tim@xxxxxxx>; George Dunlap
> <george.dunlap@xxxxxxxxxx>; Jan
> Beulich <jbeulich@xxxxxxxx>; Roger Pau Monne <roger.pau@xxxxxxxxxx>
> Subject: [Xen-devel] [PATCH v6 2/6] x86/paging: add TLB flush hooks
>
> Add shadow and hap implementation specific helpers to perform guest
> TLB flushes. Note that the code for both is exactly the same at the
> moment, and is copied from hvm_flush_vcpu_tlb. This will be changed by
> further patches that will add implementation specific optimizations to
> them.
>
> No functional change intended.
>
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Viridian part...
Reviewed-by: Paul Durrant <pdurrant@xxxxxxxx>
> Reviewed-by: Wei Liu <wl@xxxxxxx>
> Acked-by: Tim Deegan <tim@xxxxxxx>
> ---
> Changes since v5:
> - Make the flush tlb operation a paging_mode hook.
>
> Changes since v3:
> - Fix stray newline removal.
> - Fix return of shadow_flush_tlb dummy function.
> ---
> xen/arch/x86/hvm/hvm.c | 56 +--------------------------
> xen/arch/x86/hvm/viridian/viridian.c | 2 +-
> xen/arch/x86/mm/hap/hap.c | 58 ++++++++++++++++++++++++++++
> xen/arch/x86/mm/shadow/common.c | 55 ++++++++++++++++++++++++++
> xen/arch/x86/mm/shadow/multi.c | 1 +
> xen/arch/x86/mm/shadow/private.h | 4 ++
> xen/include/asm-x86/hvm/hvm.h | 3 --
> xen/include/asm-x86/paging.h | 10 +++++
> 8 files changed, 130 insertions(+), 59 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index db5d7b4d30..a2abad9f76 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3988,60 +3988,6 @@ static void hvm_s3_resume(struct domain *d)
> }
> }
>
> -bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
> - void *ctxt)
> -{
> - static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
> - cpumask_t *mask = &this_cpu(flush_cpumask);
> - struct domain *d = current->domain;
> - struct vcpu *v;
> -
> - /* Avoid deadlock if more than one vcpu tries this at the same time. */
> - if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
> - return false;
> -
> - /* Pause all other vcpus. */
> - for_each_vcpu ( d, v )
> - if ( v != current && flush_vcpu(ctxt, v) )
> - vcpu_pause_nosync(v);
> -
> - /* Now that all VCPUs are signalled to deschedule, we wait... */
> - for_each_vcpu ( d, v )
> - if ( v != current && flush_vcpu(ctxt, v) )
> - while ( !vcpu_runnable(v) && v->is_running )
> - cpu_relax();
> -
> - /* All other vcpus are paused, safe to unlock now. */
> - spin_unlock(&d->hypercall_deadlock_mutex);
> -
> - cpumask_clear(mask);
> -
> - /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
> - for_each_vcpu ( d, v )
> - {
> - unsigned int cpu;
> -
> - if ( !flush_vcpu(ctxt, v) )
> - continue;
> -
> - paging_update_cr3(v, false);
> -
> - cpu = read_atomic(&v->dirty_cpu);
> - if ( is_vcpu_dirty_cpu(cpu) )
> - __cpumask_set_cpu(cpu, mask);
> - }
> -
> - /* Flush TLBs on all CPUs with dirty vcpu state. */
> - flush_tlb_mask(mask);
> -
> - /* Done. */
> - for_each_vcpu ( d, v )
> - if ( v != current && flush_vcpu(ctxt, v) )
> - vcpu_unpause(v);
> -
> - return true;
> -}
> -
> static bool always_flush(void *ctxt, struct vcpu *v)
> {
> return true;
> @@ -4052,7 +3998,7 @@ static int hvmop_flush_tlb_all(void)
> if ( !is_hvm_domain(current->domain) )
> return -EINVAL;
>
> - return hvm_flush_vcpu_tlb(always_flush, NULL) ? 0 : -ERESTART;
> + return paging_flush_tlb(always_flush, NULL) ? 0 : -ERESTART;
> }
>
> static int hvmop_set_evtchn_upcall_vector(
> diff --git a/xen/arch/x86/hvm/viridian/viridian.c
> b/xen/arch/x86/hvm/viridian/viridian.c
> index cd8f210198..977c1bc54f 100644
> --- a/xen/arch/x86/hvm/viridian/viridian.c
> +++ b/xen/arch/x86/hvm/viridian/viridian.c
> @@ -609,7 +609,7 @@ int viridian_hypercall(struct cpu_user_regs *regs)
> * A false return means that another vcpu is currently trying
> * a similar operation, so back off.
> */
> - if ( !hvm_flush_vcpu_tlb(need_flush, &input_params.vcpu_mask) )
> + if ( !paging_flush_tlb(need_flush, &input_params.vcpu_mask) )
> return HVM_HCALL_preempted;
>
> output.rep_complete = input.rep_count;
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index 3d93f3451c..5616235bd8 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -669,6 +669,60 @@ static void hap_update_cr3(struct vcpu *v, int
> do_locking, bool noflush)
> hvm_update_guest_cr3(v, noflush);
> }
>
> +static bool flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
> + void *ctxt)
> +{
> + static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
> + cpumask_t *mask = &this_cpu(flush_cpumask);
> + struct domain *d = current->domain;
> + struct vcpu *v;
> +
> + /* Avoid deadlock if more than one vcpu tries this at the same time. */
> + if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
> + return false;
> +
> + /* Pause all other vcpus. */
> + for_each_vcpu ( d, v )
> + if ( v != current && flush_vcpu(ctxt, v) )
> + vcpu_pause_nosync(v);
> +
> + /* Now that all VCPUs are signalled to deschedule, we wait... */
> + for_each_vcpu ( d, v )
> + if ( v != current && flush_vcpu(ctxt, v) )
> + while ( !vcpu_runnable(v) && v->is_running )
> + cpu_relax();
> +
> + /* All other vcpus are paused, safe to unlock now. */
> + spin_unlock(&d->hypercall_deadlock_mutex);
> +
> + cpumask_clear(mask);
> +
> + /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
> + for_each_vcpu ( d, v )
> + {
> + unsigned int cpu;
> +
> + if ( !flush_vcpu(ctxt, v) )
> + continue;
> +
> + paging_update_cr3(v, false);
> +
> + cpu = read_atomic(&v->dirty_cpu);
> + if ( is_vcpu_dirty_cpu(cpu) )
> + __cpumask_set_cpu(cpu, mask);
> + }
> +
> + /* Flush TLBs on all CPUs with dirty vcpu state. */
> + flush_tlb_mask(mask);
> +
> + /* Done. */
> + for_each_vcpu ( d, v )
> + if ( v != current && flush_vcpu(ctxt, v) )
> + vcpu_unpause(v);
> +
> + return true;
> +}
> +
> const struct paging_mode *
> hap_paging_get_mode(struct vcpu *v)
> {
> @@ -781,6 +835,7 @@ static const struct paging_mode hap_paging_real_mode = {
> .update_cr3 = hap_update_cr3,
> .update_paging_modes = hap_update_paging_modes,
> .write_p2m_entry = hap_write_p2m_entry,
> + .flush_tlb = flush_tlb,
> .guest_levels = 1
> };
>
> @@ -792,6 +847,7 @@ static const struct paging_mode hap_paging_protected_mode
> = {
> .update_cr3 = hap_update_cr3,
> .update_paging_modes = hap_update_paging_modes,
> .write_p2m_entry = hap_write_p2m_entry,
> + .flush_tlb = flush_tlb,
> .guest_levels = 2
> };
>
> @@ -803,6 +859,7 @@ static const struct paging_mode hap_paging_pae_mode = {
> .update_cr3 = hap_update_cr3,
> .update_paging_modes = hap_update_paging_modes,
> .write_p2m_entry = hap_write_p2m_entry,
> + .flush_tlb = flush_tlb,
> .guest_levels = 3
> };
>
> @@ -814,6 +871,7 @@ static const struct paging_mode hap_paging_long_mode = {
> .update_cr3 = hap_update_cr3,
> .update_paging_modes = hap_update_paging_modes,
> .write_p2m_entry = hap_write_p2m_entry,
> + .flush_tlb = flush_tlb,
> .guest_levels = 4
> };
>
> diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
> index cba3ab1eba..121ddf1255 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -3357,6 +3357,61 @@ out:
> return rc;
> }
>
> +/* Fluhs TLB of selected vCPUs. */
> +bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
> + void *ctxt)
> +{
> + static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
> + cpumask_t *mask = &this_cpu(flush_cpumask);
> + struct domain *d = current->domain;
> + struct vcpu *v;
> +
> + /* Avoid deadlock if more than one vcpu tries this at the same time. */
> + if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
> + return false;
> +
> + /* Pause all other vcpus. */
> + for_each_vcpu ( d, v )
> + if ( v != current && flush_vcpu(ctxt, v) )
> + vcpu_pause_nosync(v);
> +
> + /* Now that all VCPUs are signalled to deschedule, we wait... */
> + for_each_vcpu ( d, v )
> + if ( v != current && flush_vcpu(ctxt, v) )
> + while ( !vcpu_runnable(v) && v->is_running )
> + cpu_relax();
> +
> + /* All other vcpus are paused, safe to unlock now. */
> + spin_unlock(&d->hypercall_deadlock_mutex);
> +
> + cpumask_clear(mask);
> +
> + /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
> + for_each_vcpu ( d, v )
> + {
> + unsigned int cpu;
> +
> + if ( !flush_vcpu(ctxt, v) )
> + continue;
> +
> + paging_update_cr3(v, false);
> +
> + cpu = read_atomic(&v->dirty_cpu);
> + if ( is_vcpu_dirty_cpu(cpu) )
> + __cpumask_set_cpu(cpu, mask);
> + }
> +
> + /* Flush TLBs on all CPUs with dirty vcpu state. */
> + flush_tlb_mask(mask);
> +
> + /* Done. */
> + for_each_vcpu ( d, v )
> + if ( v != current && flush_vcpu(ctxt, v) )
> + vcpu_unpause(v);
> +
> + return true;
> +}
> +
> /**************************************************************************/
> /* Shadow-control XEN_DOMCTL dispatcher */
>
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index 26798b317c..b6afc0fba4 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -4873,6 +4873,7 @@ const struct paging_mode sh_paging_mode = {
> .update_cr3 = sh_update_cr3,
> .update_paging_modes = shadow_update_paging_modes,
> .write_p2m_entry = shadow_write_p2m_entry,
> + .flush_tlb = shadow_flush_tlb,
> .guest_levels = GUEST_PAGING_LEVELS,
> .shadow.detach_old_tables = sh_detach_old_tables,
> #ifdef CONFIG_PV
> diff --git a/xen/arch/x86/mm/shadow/private.h
> b/xen/arch/x86/mm/shadow/private.h
> index 3217777921..e8b028a365 100644
> --- a/xen/arch/x86/mm/shadow/private.h
> +++ b/xen/arch/x86/mm/shadow/private.h
> @@ -814,6 +814,10 @@ static inline int sh_check_page_has_no_refs(struct
> page_info *page)
> ((count & PGC_allocated) ? 1 : 0) );
> }
>
> +/* Flush the TLB of the selected vCPUs. */
> +bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
> + void *ctxt);
> +
> #endif /* _XEN_SHADOW_PRIVATE_H */
>
> /*
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 24da824cbf..aae00a7860 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -334,9 +334,6 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t
> value,
> signed int cr0_pg);
> unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore);
>
> -bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
> - void *ctxt);
> -
> int hvm_copy_context_and_params(struct domain *src, struct domain *dst);
>
> #ifdef CONFIG_HVM
> diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
> index 7544f73121..051161481c 100644
> --- a/xen/include/asm-x86/paging.h
> +++ b/xen/include/asm-x86/paging.h
> @@ -140,6 +140,9 @@ struct paging_mode {
> unsigned long gfn,
> l1_pgentry_t *p, l1_pgentry_t
> new,
> unsigned int level);
> + bool (*flush_tlb )(bool (*flush_vcpu)(void *ctxt,
> + struct vcpu
> *v),
> + void *ctxt);
>
> unsigned int guest_levels;
>
> @@ -397,6 +400,13 @@ static always_inline unsigned int
> paging_max_paddr_bits(const struct domain *d)
> return bits;
> }
>
> +static inline bool paging_flush_tlb(bool (*flush_vcpu)(void *ctxt,
> + struct vcpu *v),
> + void *ctxt)
> +{
> + return paging_get_hostmode(current)->flush_tlb(flush_vcpu, ctxt);
> +}
> +
> #endif /* XEN_PAGING_H */
>
> /*
> --
> 2.25.0
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxxx
> https://lists.xenproject.org/mailman/listinfo/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |