[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen stable-4.17] x86/HVM: drop stdvga's "lock" struct member



commit 1db4f1a3b75eb73cfa1bc55ee85e2206ca4664ec
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Nov 12 14:09:10 2024 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Nov 12 14:09:10 2024 +0100

    x86/HVM: drop stdvga's "lock" struct member
    
    No state is left to protect. It being the last field, drop the struct
    itself as well. Similarly for then ending up empty, drop the .complete
    handler.
    
    This is part of XSA-463 / CVE-2024-45818
    
    Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    (cherry picked from commit b180a50326c8a2c171f37c1940a0fbbdcad4be90)
---
 xen/arch/x86/hvm/stdvga.c             | 30 ++----------------------------
 xen/arch/x86/include/asm/hvm/domain.h |  1 -
 xen/arch/x86/include/asm/hvm/io.h     |  4 ----
 3 files changed, 2 insertions(+), 33 deletions(-)

diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index 1d171cb427..fd38fb4800 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -70,61 +70,35 @@ static int cf_check stdvga_mem_write(
 static bool cf_check stdvga_mem_accept(
     const struct hvm_io_handler *handler, const ioreq_t *p)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
-
-    /*
-     * The range check must be done without taking the lock, to avoid
-     * deadlock when hvm_mmio_internal() is called from
-     * hvm_copy_to/from_guest_phys() in hvm_process_io_intercept().
-     */
     if ( (ioreq_mmio_first_byte(p) < VGA_MEM_BASE) ||
          (ioreq_mmio_last_byte(p) >= (VGA_MEM_BASE + VGA_MEM_SIZE)) )
         return 0;
 
-    spin_lock(&s->lock);
-
     if ( p->dir != IOREQ_WRITE || p->data_is_ptr || p->count != 1 )
     {
         /*
          * Only accept single direct writes, as that's the only thing we can
          * accelerate using buffered ioreq handling.
          */
-        goto reject;
+        return false;
     }
 
-    /* s->lock intentionally held */
-    return 1;
-
- reject:
-    spin_unlock(&s->lock);
-    return 0;
-}
-
-static void cf_check stdvga_mem_complete(const struct hvm_io_handler *handler)
-{
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
-
-    spin_unlock(&s->lock);
+    return true;
 }
 
 static const struct hvm_io_ops stdvga_mem_ops = {
     .accept = stdvga_mem_accept,
     .read = stdvga_mem_read,
     .write = stdvga_mem_write,
-    .complete = stdvga_mem_complete
 };
 
 void stdvga_init(struct domain *d)
 {
-    struct hvm_hw_stdvga *s = &d->arch.hvm.stdvga;
     struct hvm_io_handler *handler;
 
     if ( !has_vvga(d) )
         return;
 
-    memset(s, 0, sizeof(*s));
-    spin_lock_init(&s->lock);
-    
     /* VGA memory */
     handler = hvm_next_io_handler(d);
     if ( handler )
diff --git a/xen/arch/x86/include/asm/hvm/domain.h 
b/xen/arch/x86/include/asm/hvm/domain.h
index e8e59a72bc..0b86329ea3 100644
--- a/xen/arch/x86/include/asm/hvm/domain.h
+++ b/xen/arch/x86/include/asm/hvm/domain.h
@@ -83,7 +83,6 @@ struct hvm_domain {
     struct hvm_hw_vpic     vpic[2]; /* 0=master; 1=slave */
     struct hvm_vioapic    **vioapic;
     unsigned int           nr_vioapics;
-    struct hvm_hw_stdvga   stdvga;
 
     /*
      * hvm_hw_pmtimer is a publicly-visible name. We will defer renaming
diff --git a/xen/arch/x86/include/asm/hvm/io.h 
b/xen/arch/x86/include/asm/hvm/io.h
index 2f5bfeccae..784bd0c83f 100644
--- a/xen/arch/x86/include/asm/hvm/io.h
+++ b/xen/arch/x86/include/asm/hvm/io.h
@@ -121,10 +121,6 @@ struct vpci_arch_msix_entry {
     int pirq;
 };
 
-struct hvm_hw_stdvga {
-    spinlock_t lock;
-};
-
 void stdvga_init(struct domain *d);
 
 extern void hvm_dpci_msi_eoi(struct domain *d, int vector);
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.17



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.