[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 5/5] x86/HVM: limit cache writeback overhead


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Wed, 19 Apr 2023 12:46:56 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=R7uQczNK/4mpSt5b9zhiriwBYD9oXL4EeN1Uwl2RegA=; b=jb5qkX000ImTuxlilqXO5HUKCE1GOraIe2kna+dlZgwgwi/uVql3vJhqKMWaCfIrebOy2inbLf1RXMbzCGTxY3ifved3Q33cWbgJIQvP/D1ddK+fZTZyaJ2WKPj/WeozQ773k6K4K8/RZ8mL0FVnSohSOTmQScWgEv7qxTuRUZ5r884xE3OfyXkwS6O+6aZTEKUZGQfE32OLopiGwvh26veVWLYUwABeWLjrncsuQfYY7w9a3R4zuUC7bmFLqpXMThO4OBA3Kc2EOFFiRJ9uqMW9V+D3/qoKgCXK5ajQp/JcEh4QDqnSeLfzNeF90hhZnRW420qYmDiWw4fCJi5fQA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=YJXEa/xQOLzSclZZsD042z7q+zZyZm60WvljSk4/1lYTZWm1UrAi24fca/YNGzE6KjLDg9EdMJW01zv93bUHhaqUB7iNcibnzOm/H53dFWWzxGbsqwKbkjGKjRaJFgSjrDGavmv5jOPGJy8pHRgjYdzESJGkRnNz2A73ONgN2gN8ZH3oGkkkDj9I0KZExiKHTOuTcQXJm1vDmKaCDgsbyZM1E3wpH+1W5PBteNsN31PB2g3nwy17djmMHh4cnoHLGTqc37HXORX76UM8zzmrlg1s66ZqCVXW6BCo2aaRJyle7zCJfOH0TLgSxW70R6SXsbW+tejmFRViEWAu+kw5/g==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Kevin Tian <kevin.tian@xxxxxxxxx>, Jun Nakajima <jun.nakajima@xxxxxxxxx>
  • Delivery-date: Wed, 19 Apr 2023 10:47:01 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

There's no need to write back caches on all CPUs upon seeing a WBINVD
exit; ones that a vCPU hasn't run on since the last writeback (or since
it was started) can't hold data which may need writing back.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
With us not running AMD IOMMUs in non-coherent ways, I wonder whether
svm_wbinvd_intercept() really needs to do anything (or whether it
couldn't check iommu_snoop just like VMX does, knowing that as of
c609108b2190 ["x86/shadow: make iommu_snoop usage consistent with
HAP's"] that's always set; this would largely serve as grep fodder then,
to make sure this code is updated once / when we do away with this
global variable, and it would be the penultimate step to being able to
fold SVM's and VT-x'es functions).

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -537,6 +537,8 @@ void hvm_do_resume(struct vcpu *v)
         v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
     }
 
+    __cpumask_set_cpu(v->processor, v->arch.hvm.cache_dirty_mask);
+
     if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
     {
         struct x86_event info;
@@ -1592,6 +1594,10 @@ int hvm_vcpu_initialise(struct vcpu *v)
     if ( rc )
         goto fail6;
 
+    rc = -ENOMEM;
+    if ( !zalloc_cpumask_var(&v->arch.hvm.cache_dirty_mask) )
+        goto fail6;
+
     rc = ioreq_server_add_vcpu_all(d, v);
     if ( rc != 0 )
         goto fail6;
@@ -1621,6 +1627,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
     hvm_vcpu_cacheattr_destroy(v);
  fail1:
     viridian_vcpu_deinit(v);
+    FREE_CPUMASK_VAR(v->arch.hvm.cache_dirty_mask);
     return rc;
 }
 
@@ -1628,6 +1635,8 @@ void hvm_vcpu_destroy(struct vcpu *v)
 {
     viridian_vcpu_deinit(v);
 
+    FREE_CPUMASK_VAR(v->arch.hvm.cache_dirty_mask);
+
     ioreq_server_remove_vcpu_all(v->domain, v);
 
     if ( hvm_altp2m_supported() )
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2363,8 +2363,14 @@ static void svm_vmexit_mce_intercept(
 
 static void cf_check svm_wbinvd_intercept(void)
 {
-    if ( cache_flush_permitted(current->domain) )
-        flush_all(FLUSH_WRITEBACK);
+    struct vcpu *curr = current;
+
+    if ( !cache_flush_permitted(curr->domain) )
+        return;
+
+    flush_mask(curr->arch.hvm.cache_dirty_mask, FLUSH_WRITEBACK);
+    cpumask_copy(curr->arch.hvm.cache_dirty_mask,
+                 cpumask_of(curr->processor));
 }
 
 static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs,
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3710,11 +3710,17 @@ static void vmx_do_extint(struct cpu_use
 
 static void cf_check vmx_wbinvd_intercept(void)
 {
-    if ( !cache_flush_permitted(current->domain) || iommu_snoop )
+    struct vcpu *curr = current;
+
+    if ( !cache_flush_permitted(curr->domain) || iommu_snoop )
         return;
 
     if ( cpu_has_wbinvd_exiting )
-        flush_all(FLUSH_WRITEBACK);
+    {
+        flush_mask(curr->arch.hvm.cache_dirty_mask, FLUSH_WRITEBACK);
+        cpumask_copy(curr->arch.hvm.cache_dirty_mask,
+                     cpumask_of(curr->processor));
+    }
     else
         wbnoinvd();
 }
--- a/xen/arch/x86/include/asm/hvm/vcpu.h
+++ b/xen/arch/x86/include/asm/hvm/vcpu.h
@@ -161,6 +161,8 @@ struct hvm_vcpu {
         struct svm_vcpu svm;
     };
 
+    cpumask_var_t       cache_dirty_mask;
+
     struct tasklet      assert_evtchn_irq_tasklet;
 
     struct nestedvcpu   nvcpu;




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.