|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [XEN PATCH v1 1/1] Invalidate cache for cpus affinitized to the domain
CCing Andy and Jan
Is restricting cache flush to set of cpus bound to the domain, a
right thing to do?
On Fri, 2020-12-11 at 11:44 +0000, Harsha Shamsundara Havanur wrote:
> A HVM domain flushes cache on all the cpus using
> `flush_all` macro which uses cpu_online_map, during
> i) creation of a new domain
> ii) when device-model op is performed
> iii) when domain is destructed.
>
> This triggers IPI on all the cpus, thus affecting other
> domains that are pinned to different pcpus. This patch
> restricts cache flush to the set of cpus affinitized to
> the current domain using `domain->dirty_cpumask`.
>
> Signed-off-by: Harsha Shamsundara Havanur <havanur@xxxxxxxxxx>
> ---
> xen/arch/x86/hvm/hvm.c | 2 +-
> xen/arch/x86/hvm/mtrr.c | 6 +++---
> xen/arch/x86/hvm/svm/svm.c | 2 +-
> xen/arch/x86/hvm/vmx/vmx.c | 2 +-
> 4 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 54e32e4fe8..ec247c7010 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2219,7 +2219,7 @@ void hvm_shadow_handle_cd(struct vcpu *v,
> unsigned long value)
> domain_pause_nosync(v->domain);
>
> /* Flush physical caches. */
> - flush_all(FLUSH_CACHE);
> + flush_mask(v->domain->dirty_cpumask, FLUSH_CACHE);
> hvm_set_uc_mode(v, 1);
>
> domain_unpause(v->domain);
> diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
> index fb051d59c3..0d804c1fa0 100644
> --- a/xen/arch/x86/hvm/mtrr.c
> +++ b/xen/arch/x86/hvm/mtrr.c
> @@ -631,7 +631,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain
> *d, uint64_t gfn_start,
> break;
> /* fall through */
> default:
> - flush_all(FLUSH_CACHE);
> + flush_mask(d->dirty_cpumask, FLUSH_CACHE);
> break;
> }
> return 0;
> @@ -683,7 +683,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain
> *d, uint64_t gfn_start,
> list_add_rcu(&range->list, &d-
> >arch.hvm.pinned_cacheattr_ranges);
> p2m_memory_type_changed(d);
> if ( type != PAT_TYPE_WRBACK )
> - flush_all(FLUSH_CACHE);
> + flush_mask(d->dirty_cpumask, FLUSH_CACHE);
>
> return 0;
> }
> @@ -785,7 +785,7 @@ void memory_type_changed(struct domain *d)
> d->vcpu && d->vcpu[0] )
> {
> p2m_memory_type_changed(d);
> - flush_all(FLUSH_CACHE);
> + flush_mask(d->dirty_cpumask, FLUSH_CACHE);
> }
> }
>
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index cfea5b5523..383e763d7d 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -2395,7 +2395,7 @@ static void svm_vmexit_mce_intercept(
> static void svm_wbinvd_intercept(void)
> {
> if ( cache_flush_permitted(current->domain) )
> - flush_all(FLUSH_CACHE);
> + flush_mask(current->domain->dirty_cpumask, FLUSH_CACHE);
> }
>
> static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs
> *regs,
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 86b8916a5d..a05c7036c4 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -3349,7 +3349,7 @@ static void vmx_wbinvd_intercept(void)
> return;
>
> if ( cpu_has_wbinvd_exiting )
> - flush_all(FLUSH_CACHE);
> + flush_mask(current->domain->dirty_cpumask, FLUSH_CACHE);
> else
> wbinvd();
> }
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |