[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 4/7] x86: Set v->needs_tlb_flush when needed


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: "Teddy Astie" <teddy.astie@xxxxxxxxxx>
  • Date: Wed, 15 Apr 2026 13:32:20 +0000
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=mte1 header.d=mandrillapp.com header.i="@mandrillapp.com" header.h="From:Subject:To:Cc:Message-Id:In-Reply-To:References:Feedback-ID:Date:MIME-Version:Content-Type:Content-Transfer-Encoding"; dkim=pass header.s=mte1 header.d=vates.tech header.i="teddy.astie@xxxxxxxxxx" header.h="From:Subject:To:Cc:Message-Id:In-Reply-To:References:Feedback-ID:Date:MIME-Version:Content-Type:Content-Transfer-Encoding"
  • Cc: "Teddy Astie" <teddy.astie@xxxxxxxxxx>, "Jan Beulich" <jbeulich@xxxxxxxx>, "Andrew Cooper" <andrew.cooper3@xxxxxxxxxx>, "Roger Pau Monné" <roger.pau@xxxxxxxxxx>, "Jason Andryuk" <jason.andryuk@xxxxxxx>, "Tim Deegan" <tim@xxxxxxx>
  • Delivery-date: Wed, 15 Apr 2026 13:32:24 +0000
  • Feedback-id: 30504962:30504962.20260415:md
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Sets v->needs_tlb_flush in where a tlb flush is expected
to be scheduled on the vCPU.

Signed-off-by: Teddy Astie <teddy.astie@xxxxxxxxxx>
---
The goal here is to avoid too much noise in [1], hence it currently
cohexists with hvm_asid_flush_vcpu(), but [1] will drop
hvm_asid_flush_vcpu() and only keep needs_tlb_flush.

[1] x86/hvm: Transition to needs_tlb_flush logic, use  per-domain ASID

 xen/arch/x86/flushtlb.c        | 4 ++++
 xen/arch/x86/hvm/emulate.c     | 1 +
 xen/arch/x86/hvm/hvm.c         | 1 +
 xen/arch/x86/hvm/svm/svm.c     | 5 +++++
 xen/arch/x86/hvm/vmx/vmcs.c    | 1 +
 xen/arch/x86/hvm/vmx/vmx.c     | 3 +++
 xen/arch/x86/hvm/vmx/vvmx.c    | 1 +
 xen/arch/x86/mm/p2m.c          | 4 ++++
 xen/arch/x86/mm/paging.c       | 1 +
 xen/arch/x86/mm/shadow/multi.c | 1 +
 10 files changed, 22 insertions(+)

diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c
index 23721bb52c..8ee2385bba 100644
--- a/xen/arch/x86/flushtlb.c
+++ b/xen/arch/x86/flushtlb.c
@@ -324,7 +324,11 @@ unsigned int guest_flush_tlb_flags(const struct domain *d)
 void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask)
 {
     unsigned int flags = guest_flush_tlb_flags(d);
+    struct vcpu *v;
 
     if ( flags )
         flush_mask(mask, flags);
+
+    for_each_vcpu(d, v)
+        v->needs_tlb_flush = true;
 }
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index f3aae158e9..3bc1d321cc 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2657,6 +2657,7 @@ static int cf_check hvmemul_tlb_op(
         if ( x86emul_invpcid_type(aux) != X86_INVPCID_INDIV_ADDR )
         {
             hvm_asid_flush_vcpu(current);
+            current->needs_tlb_flush = true;
             break;
         }
         aux = x86emul_invpcid_pcid(aux);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4a81afce02..0f0b0e242f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1613,6 +1613,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
     struct domain *d = v->domain;
 
     hvm_asid_flush_vcpu(v);
+    v->needs_tlb_flush = true;
 
     spin_lock_init(&v->arch.hvm.tm_lock);
     INIT_LIST_HEAD(&v->arch.hvm.tm_list);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 58e927ae04..64c08432fd 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -138,6 +138,8 @@ static void cf_check svm_update_guest_cr(
         {
             if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
                 hvm_asid_flush_vcpu(v);
+                
+            v->needs_tlb_flush = true;
         }
         else if ( nestedhvm_vmswitch_in_progress(v) )
             ; /* CR3 switches during VMRUN/VMEXIT do not flush the TLB. */
@@ -944,6 +946,7 @@ static void noreturn cf_check svm_do_resume(void)
         hvm_migrate_pirqs(v);
         /* Migrating to another ASID domain.  Request a new ASID. */
         hvm_asid_flush_vcpu(v);
+        v->needs_tlb_flush = true;
     }
 
     if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) )
@@ -2306,6 +2309,8 @@ static void cf_check svm_invlpg(struct vcpu *v, unsigned 
long linear)
 {
     /* Safe fallback. Take a new ASID. */
     hvm_asid_flush_vcpu(v);
+    /* Schedule a tlb flush on the VCPU. */
+    v->needs_tlb_flush = true;
 }
 
 static bool cf_check svm_get_pending_event(
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 8e52ef4d49..4efe13e07f 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1904,6 +1904,7 @@ void cf_check vmx_do_resume(void)
         v->arch.hvm.vmx.hostenv_migrated = 1;
 
         hvm_asid_flush_vcpu(v);
+        v->needs_tlb_flush = true;
     }
 
     debug_state = v->domain->debugger_attached
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index ec0a790336..0e4f9f9c3d 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1511,6 +1511,7 @@ static void cf_check vmx_handle_cd(struct vcpu *v, 
unsigned long value)
 
             wbinvd();               /* flush possibly polluted cache */
             hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
+            v->needs_tlb_flush = true; /* invalidate memory type cached in TLB 
*/
             v->arch.hvm.vmx.cache_mode = CACHE_MODE_NO_FILL;
         }
         else
@@ -1520,6 +1521,7 @@ static void cf_check vmx_handle_cd(struct vcpu *v, 
unsigned long value)
             if ( !is_iommu_enabled(v->domain) || iommu_snoop )
                 vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
             hvm_asid_flush_vcpu(v); /* no need to flush cache */
+            v->needs_tlb_flush = true;
         }
     }
 }
@@ -1872,6 +1874,7 @@ static void cf_check vmx_update_guest_cr(
 
         if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
             hvm_asid_flush_vcpu(v);
+        v->needs_tlb_flush = true;
         break;
 
     default:
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index e4cdfe55c1..16d6f1d61b 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1254,6 +1254,7 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
         if ( nvmx->guest_vpid != new_vpid )
         {
             hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
+            v->needs_tlb_flush = true;
             nvmx->guest_vpid = new_vpid;
         }
     }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index fddecdf978..910623ac93 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -25,6 +25,7 @@
 #include <asm/p2m.h>
 #include <asm/mem_sharing.h>
 #include <asm/hvm/nestedhvm.h>
+#include <asm/hvm/vcpu.h>
 #include <asm/altp2m.h>
 #include <asm/vm_event.h>
 #include <xsm/xsm.h>
@@ -1439,6 +1440,7 @@ p2m_flush(struct vcpu *v, struct p2m_domain *p2m)
     vcpu_nestedhvm(v).nv_p2m = NULL;
     p2m_flush_table(p2m);
     hvm_asid_flush_vcpu(v);
+    v->needs_tlb_flush = true;
 }
 
 void
@@ -1498,6 +1500,7 @@ static void assign_np2m(struct vcpu *v, struct p2m_domain 
*p2m)
 static void nvcpu_flush(struct vcpu *v)
 {
     hvm_asid_flush_vcpu(v);
+    v->needs_tlb_flush = true;
     vcpu_nestedhvm(v).stale_np2m = true;
 }
 
@@ -1618,6 +1621,7 @@ void np2m_schedule(int dir)
             {
                 /* This vCPU's np2m was flushed while it was not runnable */
                 hvm_asid_flush_core();
+                curr->needs_tlb_flush = true;
                 vcpu_nestedhvm(curr).nv_p2m = NULL;
             }
             else
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 2396f81ad5..b0b3bef753 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -939,6 +939,7 @@ void paging_update_nestedmode(struct vcpu *v)
         /* TODO: shadow-on-shadow */
         v->arch.paging.nestedmode = NULL;
     hvm_asid_flush_vcpu(v);
+    v->needs_tlb_flush = true;
 }
 
 int __init paging_set_allocation(struct domain *d, unsigned int pages,
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 80cd3299fa..2df2842138 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3165,6 +3165,7 @@ sh_update_linear_entries(struct vcpu *v)
      * without this change, it would fetch the wrong value due to a stale TLB.
      */
     sh_flush_local(d);
+    v->needs_tlb_flush = true;
 }
 
 static pagetable_t cf_check sh_update_cr3(struct vcpu *v, bool noflush)
-- 
2.52.0



--
Teddy Astie | Vates XCP-ng Developer

XCP-ng & Xen Orchestra - Vates solutions

web: https://vates.tech




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.