[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen stable-4.20] x86/shadow: don't overrun trace_emul_write_val



commit f63a5ba705b929ba1fb2e7b7d106c610e0353b87
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Jan 27 14:03:48 2026 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Jan 27 14:03:48 2026 +0100

    x86/shadow: don't overrun trace_emul_write_val
    
    Guests can do wider-than-PTE-size writes on page tables. The tracing
    helper variable, however, only offers space for a single PTE (and it is
    being switched to the more correct type right here). Therefore bound
    incoming write sizes to the amount of space available.
    
    To not leave dead code (which is a Misra concern), drop the now unused
    guest_pa_t as well.
    
    Also move and adjust GUEST_PTE_SIZE: Derive it rather than using hard-
    coded numbers, and put it in the sole source file where it's actually
    needed. This then also addresses a Misra rule 20.9 ("All identifiers
    used in the controlling expression of #if or #elif preprocessing
    directives shall be #define'd before evaluation") violation:
    GUEST_PAGING_LEVELS is #define'd only in multi.c.
    
    This is XSA-477 / CVE-2025-58150.
    
    Fixes: 9a86ac1aa3d2 ("xentrace 5/7: Additional tracing for the shadow code")
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    master commit: a6626b613a843c6d4c3c453e45f84046f2d10302
    master date: 2026-01-27 13:55:01 +0100
---
 xen/arch/x86/mm/shadow/multi.c   | 18 ++++++++++++------
 xen/arch/x86/mm/shadow/private.h |  8 --------
 2 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 10ddc408ff..ccaac3c5ae 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1965,15 +1965,15 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
 
 #if GUEST_PAGING_LEVELS == 4
 typedef u64 guest_va_t;
-typedef u64 guest_pa_t;
 #elif GUEST_PAGING_LEVELS == 3
 typedef u32 guest_va_t;
-typedef u64 guest_pa_t;
 #else
 typedef u32 guest_va_t;
-typedef u32 guest_pa_t;
 #endif
 
+/* Size (in bytes) of a guest PTE */
+#define GUEST_PTE_SIZE sizeof(guest_l1e_t)
+
 /* Shadow trace event with GUEST_PAGING_LEVELS folded into the event field. */
 static void sh_trace(uint32_t event, unsigned int extra, const void 
*extra_data)
 {
@@ -2043,11 +2043,14 @@ static void __maybe_unused sh_trace_gfn_va(uint32_t 
event, gfn_t gfn,
 static DEFINE_PER_CPU(guest_va_t,trace_emulate_initial_va);
 static DEFINE_PER_CPU(int,trace_extra_emulation_count);
 #endif
-static DEFINE_PER_CPU(guest_pa_t,trace_emulate_write_val);
+static DEFINE_PER_CPU(guest_l1e_t, trace_emulate_write_val);
 
 static void cf_check trace_emulate_write_val(
     const void *ptr, unsigned long vaddr, const void *src, unsigned int bytes)
 {
+    if ( bytes > sizeof(this_cpu(trace_emulate_write_val)) )
+        bytes = sizeof(this_cpu(trace_emulate_write_val));
+
 #if GUEST_PAGING_LEVELS == 3
     if ( vaddr == this_cpu(trace_emulate_initial_va) )
         memcpy(&this_cpu(trace_emulate_write_val), src, bytes);
@@ -2072,13 +2075,16 @@ static inline void sh_trace_emulate(guest_l1e_t gl1e, 
unsigned long va)
             /*
              * For GUEST_PAGING_LEVELS=3 (PAE paging), guest_l1e is 64 while
              * guest_va is 32.  Put it first to avoid padding.
+             *
+             * Note: .write_val is an arbitrary set of written bytes, possibly
+             * misaligned and possibly spanning the next gl1e.
              */
             guest_l1e_t gl1e, write_val;
             guest_va_t va;
             uint32_t flags:29, emulation_count:3;
         } d = {
             .gl1e            = gl1e,
-            .write_val.l1    = this_cpu(trace_emulate_write_val),
+            .write_val       = this_cpu(trace_emulate_write_val),
             .va              = va,
 #if GUEST_PAGING_LEVELS == 3
             .emulation_count = this_cpu(trace_extra_emulation_count),
@@ -2659,7 +2665,7 @@ static int cf_check sh_page_fault(
     paging_unlock(d);
     put_gfn(d, gfn_x(gfn));
 
-    this_cpu(trace_emulate_write_val) = 0;
+    this_cpu(trace_emulate_write_val) = (guest_l1e_t){};
 
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
  early_emulation:
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index cef9dbef2e..62c7884e63 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -120,14 +120,6 @@ enum {
     TRCE_SFLAG_OOS_FIXUP_EVICT,
 };
 
-
-/* Size (in bytes) of a guest PTE */
-#if GUEST_PAGING_LEVELS >= 3
-# define GUEST_PTE_SIZE 8
-#else
-# define GUEST_PTE_SIZE 4
-#endif
-
 /******************************************************************************
  * Auditing routines
  */
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.20



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.