[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 01/20] x86/shadow: Whitespace cleanup



Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c  |  490 ++++++++++++------------
 xen/arch/x86/mm/shadow/multi.c   |  770 +++++++++++++++++++-------------------
 xen/arch/x86/mm/shadow/multi.h   |   36 +-
 xen/arch/x86/mm/shadow/private.h |  106 +++---
 xen/arch/x86/mm/shadow/types.h   |   42 +--
 5 files changed, 722 insertions(+), 722 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 3630ae0..502e0d8 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -5,7 +5,7 @@
  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -252,7 +252,7 @@ hvm_emulate_write(enum x86_segment seg,
         v, addr, p_data, bytes, sh_ctxt);
 }
 
-static int 
+static int
 hvm_emulate_cmpxchg(enum x86_segment seg,
                     unsigned long offset,
                     void *p_old,
@@ -329,7 +329,7 @@ pv_emulate_write(enum x86_segment seg,
         v, offset, p_data, bytes, sh_ctxt);
 }
 
-static int 
+static int
 pv_emulate_cmpxchg(enum x86_segment seg,
                    unsigned long offset,
                    void *p_old,
@@ -409,9 +409,9 @@ const struct x86_emulate_ops *shadow_init_emulation(
     return &hvm_shadow_emulator_ops;
 }
 
-/* Update an initialized emulation context to prepare for the next 
+/* Update an initialized emulation context to prepare for the next
  * instruction */
-void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt, 
+void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
                                struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
@@ -437,17 +437,17 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
         }
     }
 }
- 
+
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
 /**************************************************************************/
-/* Out-of-sync shadows. */ 
+/* Out-of-sync shadows. */
 
-/* From time to time, we let a shadowed pagetable page go out of sync 
- * with its shadow: the guest is allowed to write directly to the page, 
+/* From time to time, we let a shadowed pagetable page go out of sync
+ * with its shadow: the guest is allowed to write directly to the page,
  * and those writes are not synchronously reflected in the shadow.
- * This lets us avoid many emulations if the guest is writing a lot to a 
- * pagetable, but it relaxes a pretty important invariant in the shadow 
+ * This lets us avoid many emulations if the guest is writing a lot to a
+ * pagetable, but it relaxes a pretty important invariant in the shadow
  * pagetable design.  Therefore, some rules:
  *
  * 1. Only L1 pagetables may go out of sync: any page that is shadowed
@@ -455,21 +455,21 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
  *    using linear shadow pagetables much less dangerous.
  *    That means that: (a) unsyncing code needs to check for higher-level
  *    shadows, and (b) promotion code needs to resync.
- * 
+ *
  * 2. All shadow operations on a guest page require the page to be brought
  *    back into sync before proceeding.  This must be done under the
  *    paging lock so that the page is guaranteed to remain synced until
  *    the operation completes.
  *
- *    Exceptions to this rule: the pagefault and invlpg handlers may 
- *    update only one entry on an out-of-sync page without resyncing it. 
+ *    Exceptions to this rule: the pagefault and invlpg handlers may
+ *    update only one entry on an out-of-sync page without resyncing it.
  *
  * 3. Operations on shadows that do not start from a guest page need to
  *    be aware that they may be handling an out-of-sync shadow.
  *
- * 4. Operations that do not normally take the paging lock (fast-path 
- *    #PF handler, INVLPG) must fall back to a locking, syncing version 
- *    if they see an out-of-sync table. 
+ * 4. Operations that do not normally take the paging lock (fast-path
+ *    #PF handler, INVLPG) must fall back to a locking, syncing version
+ *    if they see an out-of-sync table.
  *
  * 5. Operations corresponding to guest TLB flushes (MOV CR3, INVLPG)
  *    must explicitly resync all relevant pages or update their
@@ -488,26 +488,26 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
 
 
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULL
-static void sh_oos_audit(struct domain *d) 
+static void sh_oos_audit(struct domain *d)
 {
     int idx, expected_idx, expected_idx_alt;
     struct page_info *pg;
     struct vcpu *v;
-    
-    for_each_vcpu(d, v) 
+
+    for_each_vcpu(d, v)
     {
         for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
         {
             mfn_t *oos = v->arch.paging.shadow.oos;
             if ( !mfn_valid(oos[idx]) )
                 continue;
-            
+
             expected_idx = mfn_x(oos[idx]) % SHADOW_OOS_PAGES;
             expected_idx_alt = ((expected_idx + 1) % SHADOW_OOS_PAGES);
             if ( idx != expected_idx && idx != expected_idx_alt )
             {
                 printk("%s: idx %d contains gmfn %lx, expected at %d or %d.\n",
-                       __func__, idx, mfn_x(oos[idx]), 
+                       __func__, idx, mfn_x(oos[idx]),
                        expected_idx, expected_idx_alt);
                 BUG();
             }
@@ -536,21 +536,21 @@ static void sh_oos_audit(struct domain *d)
 #endif
 
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
-void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn) 
+void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn)
 {
     int idx;
     struct vcpu *v;
     mfn_t *oos;
 
     ASSERT(mfn_is_out_of_sync(gmfn));
-    
-    for_each_vcpu(d, v) 
+
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
         if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
             idx = (idx + 1) % SHADOW_OOS_PAGES;
-        
+
         if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
             return;
     }
@@ -593,7 +593,7 @@ static inline int oos_fixup_flush_gmfn(struct vcpu *v, 
mfn_t gmfn,
         if ( mfn_x(fixup->smfn[i]) != INVALID_MFN )
         {
             sh_remove_write_access_from_sl1p(v, gmfn,
-                                             fixup->smfn[i], 
+                                             fixup->smfn[i],
                                              fixup->off[i]);
             fixup->smfn[i] = _mfn(INVALID_MFN);
         }
@@ -612,8 +612,8 @@ void oos_fixup_add(struct vcpu *v, mfn_t gmfn,
     struct domain *d = v->domain;
 
     perfc_incr(shadow_oos_fixup_add);
-    
-    for_each_vcpu(d, v) 
+
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         oos_fixup = v->arch.paging.shadow.oos_fixup;
@@ -638,7 +638,7 @@ void oos_fixup_add(struct vcpu *v, mfn_t gmfn,
                 TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_OOS_FIXUP_EVICT);
 
                 /* Reuse this slot and remove current writable mapping. */
-                sh_remove_write_access_from_sl1p(v, gmfn, 
+                sh_remove_write_access_from_sl1p(v, gmfn,
                                                  oos_fixup[idx].smfn[next],
                                                  oos_fixup[idx].off[next]);
                 perfc_incr(shadow_oos_fixup_evict);
@@ -681,7 +681,7 @@ static int oos_remove_write_access(struct vcpu *v, mfn_t 
gmfn,
 
     case -1:
         /* An unfindable writeable typecount has appeared, probably via a
-         * grant table entry: can't shoot the mapping, so try to unshadow 
+         * grant table entry: can't shoot the mapping, so try to unshadow
          * the page.  If that doesn't work either, the guest is granting
          * his pagetables and must be killed after all.
          * This will flush the tlb, so we can return with no worries. */
@@ -715,7 +715,7 @@ static void _sh_resync(struct vcpu *v, mfn_t gmfn,
     ASSERT(paging_locked_by_me(v->domain));
     ASSERT(mfn_is_out_of_sync(gmfn));
     /* Guest page must be shadowed *only* as L1 when out of sync. */
-    ASSERT(!(mfn_to_page(gmfn)->shadow_flags & SHF_page_type_mask 
+    ASSERT(!(mfn_to_page(gmfn)->shadow_flags & SHF_page_type_mask
              & ~SHF_L1_ANY));
     ASSERT(!sh_page_has_multiple_shadows(mfn_to_page(gmfn)));
 
@@ -751,14 +751,14 @@ static void oos_hash_add(struct vcpu *v, mfn_t gmfn)
     mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
     struct oos_fixup *oos_fixup = v->arch.paging.shadow.oos_fixup;
     struct oos_fixup fixup = { .next = 0 };
-    
+
     for (i = 0; i < SHADOW_OOS_FIXUPS; i++ )
         fixup.smfn[i] = _mfn(INVALID_MFN);
 
     idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
     oidx = idx;
 
-    if ( mfn_valid(oos[idx]) 
+    if ( mfn_valid(oos[idx])
          && (mfn_x(oos[idx]) % SHADOW_OOS_PAGES) == idx )
     {
         /* Punt the current occupant into the next slot */
@@ -795,7 +795,7 @@ static void oos_hash_remove(struct vcpu *v, mfn_t gmfn)
 
     SHADOW_PRINTK("%pv gmfn %lx\n", v, mfn_x(gmfn));
 
-    for_each_vcpu(d, v) 
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
@@ -818,8 +818,8 @@ mfn_t oos_snapshot_lookup(struct vcpu *v, mfn_t gmfn)
     mfn_t *oos;
     mfn_t *oos_snapshot;
     struct domain *d = v->domain;
-    
-    for_each_vcpu(d, v) 
+
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         oos_snapshot = v->arch.paging.shadow.oos_snapshot;
@@ -846,7 +846,7 @@ void sh_resync(struct vcpu *v, mfn_t gmfn)
     struct oos_fixup *oos_fixup;
     struct domain *d = v->domain;
 
-    for_each_vcpu(d, v) 
+    for_each_vcpu(d, v)
     {
         oos = v->arch.paging.shadow.oos;
         oos_fixup = v->arch.paging.shadow.oos_fixup;
@@ -854,7 +854,7 @@ void sh_resync(struct vcpu *v, mfn_t gmfn)
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
         if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
             idx = (idx + 1) % SHADOW_OOS_PAGES;
-        
+
         if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
         {
             _sh_resync(v, gmfn, &oos_fixup[idx], oos_snapshot[idx]);
@@ -905,7 +905,7 @@ void sh_resync_all(struct vcpu *v, int skip, int this, int 
others)
         goto resync_others;
 
     /* First: resync all of this vcpu's oos pages */
-    for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) 
+    for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
         if ( mfn_valid(oos[idx]) )
         {
             /* Write-protect and sync contents */
@@ -920,14 +920,14 @@ void sh_resync_all(struct vcpu *v, int skip, int this, 
int others)
     /* Second: make all *other* vcpus' oos pages safe. */
     for_each_vcpu(v->domain, other)
     {
-        if ( v == other ) 
+        if ( v == other )
             continue;
 
         oos = other->arch.paging.shadow.oos;
         oos_fixup = other->arch.paging.shadow.oos_fixup;
         oos_snapshot = other->arch.paging.shadow.oos_snapshot;
 
-        for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) 
+        for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
         {
             if ( !mfn_valid(oos[idx]) )
                 continue;
@@ -946,7 +946,7 @@ void sh_resync_all(struct vcpu *v, int skip, int this, int 
others)
                 _sh_resync(other, oos[idx], &oos_fixup[idx], 
oos_snapshot[idx]);
                 oos[idx] = _mfn(INVALID_MFN);
             }
-        }        
+        }
     }
 }
 
@@ -955,19 +955,19 @@ void sh_resync_all(struct vcpu *v, int skip, int this, 
int others)
 int sh_unsync(struct vcpu *v, mfn_t gmfn)
 {
     struct page_info *pg;
-    
+
     ASSERT(paging_locked_by_me(v->domain));
 
     SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
                   v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
 
     pg = mfn_to_page(gmfn);
- 
+
     /* Guest page must be shadowed *only* as L1 and *only* once when out
-     * of sync.  Also, get out now if it's already out of sync. 
+     * of sync.  Also, get out now if it's already out of sync.
      * Also, can't safely unsync if some vcpus have paging disabled.*/
-    if ( pg->shadow_flags & 
-         ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync) 
+    if ( pg->shadow_flags &
+         ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)
          || sh_page_has_multiple_shadows(pg)
          || is_pv_domain(v->domain)
          || !v->domain->arch.paging.shadow.oos_active )
@@ -995,9 +995,9 @@ void shadow_promote(struct vcpu *v, mfn_t gmfn, unsigned 
int type)
 
     ASSERT(mfn_valid(gmfn));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Is the page already shadowed and out of sync? */
-    if ( page_is_out_of_sync(page) ) 
+    if ( page_is_out_of_sync(page) )
         sh_resync(v, gmfn);
 #endif
 
@@ -1026,13 +1026,13 @@ void shadow_demote(struct vcpu *v, mfn_t gmfn, u32 type)
 
     if ( (page->shadow_flags & SHF_page_type_mask) == 0 )
     {
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         /* Was the page out of sync? */
-        if ( page_is_out_of_sync(page) ) 
+        if ( page_is_out_of_sync(page) )
         {
             oos_hash_remove(v, gmfn);
         }
-#endif 
+#endif
         clear_bit(_PGC_page_table, &page->count_info);
     }
 
@@ -1050,11 +1050,11 @@ sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, 
void *entry, u32 size)
     struct page_info *page = mfn_to_page(gmfn);
 
     paging_mark_dirty(v->domain, mfn_x(gmfn));
-    
+
     // Determine which types of shadows are affected, and update each.
     //
     // Always validate L1s before L2s to prevent another cpu with a linear
-    // mapping of this gmfn from seeing a walk that results from 
+    // mapping of this gmfn from seeing a walk that results from
     // using the new L2 value and the old L1 value.  (It is OK for such a
     // guest to see a walk that uses the old L2 value with the new L1 value,
     // as hardware could behave this way if one level of the pagewalk occurs
@@ -1067,40 +1067,40 @@ sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, 
void *entry, u32 size)
     if ( !(page->count_info & PGC_page_table) )
         return 0;  /* Not shadowed at all */
 
-    if ( page->shadow_flags & SHF_L1_32 ) 
+    if ( page->shadow_flags & SHF_L1_32 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 2)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2_32 ) 
+    if ( page->shadow_flags & SHF_L2_32 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 2)
             (v, gmfn, entry, size);
 
-    if ( page->shadow_flags & SHF_L1_PAE ) 
+    if ( page->shadow_flags & SHF_L1_PAE )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2_PAE ) 
+    if ( page->shadow_flags & SHF_L2_PAE )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2H_PAE ) 
+    if ( page->shadow_flags & SHF_L2H_PAE )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3)
             (v, gmfn, entry, size);
 
-    if ( page->shadow_flags & SHF_L1_64 ) 
+    if ( page->shadow_flags & SHF_L1_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2_64 ) 
+    if ( page->shadow_flags & SHF_L2_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L2H_64 ) 
+    if ( page->shadow_flags & SHF_L2H_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L3_64 ) 
+    if ( page->shadow_flags & SHF_L3_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4)
             (v, gmfn, entry, size);
-    if ( page->shadow_flags & SHF_L4_64 ) 
+    if ( page->shadow_flags & SHF_L4_64 )
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4)
             (v, gmfn, entry, size);
 
-    this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED)); 
+    this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED));
 
     return result;
 }
@@ -1121,12 +1121,12 @@ sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
     if ( rc & SHADOW_SET_FLUSH )
         /* Need to flush TLBs to pick up shadow PT changes */
         flush_tlb_mask(d->domain_dirty_cpumask);
-    if ( rc & SHADOW_SET_ERROR ) 
+    if ( rc & SHADOW_SET_ERROR )
     {
-        /* This page is probably not a pagetable any more: tear it out of the 
-         * shadows, along with any tables that reference it.  
-         * Since the validate call above will have made a "safe" (i.e. zero) 
-         * shadow entry, we can let the domain live even if we can't fully 
+        /* This page is probably not a pagetable any more: tear it out of the
+         * shadows, along with any tables that reference it.
+         * Since the validate call above will have made a "safe" (i.e. zero)
+         * shadow entry, we can let the domain live even if we can't fully
          * unshadow the page. */
         sh_remove_shadows(v, gmfn, 0, 0);
     }
@@ -1134,7 +1134,7 @@ sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
 
 int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
                              intpte_t new, mfn_t gmfn)
-/* Write a new value into the guest pagetable, and update the shadows 
+/* Write a new value into the guest pagetable, and update the shadows
  * appropriately.  Returns 0 if we page-faulted, 1 for success. */
 {
     int failed;
@@ -1148,7 +1148,7 @@ int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
 
 int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
                                intpte_t *old, intpte_t new, mfn_t gmfn)
-/* Cmpxchg a new value into the guest pagetable, and update the shadows 
+/* Cmpxchg a new value into the guest pagetable, and update the shadows
  * appropriately. Returns 0 if we page-faulted, 1 if not.
  * N.B. caller should check the value of "old" to see if the
  * cmpxchg itself was successful. */
@@ -1166,7 +1166,7 @@ int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t 
*p,
 
 
 /**************************************************************************/
-/* Memory management for shadow pages. */ 
+/* Memory management for shadow pages. */
 
 /* Allocating shadow pages
  * -----------------------
@@ -1180,12 +1180,12 @@ int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t 
*p,
  * PAE/64-bit l2 tables (1GB va each).  These multi-page shadows are
  * not contiguous in memory; functions for handling offsets into them are
  * defined in shadow/multi.c (shadow_l1_index() etc.)
- *    
+ *
  * This table shows the allocation behaviour of the different modes:
  *
  * Xen paging      64b  64b  64b
  * Guest paging    32b  pae  64b
- * PV or HVM       HVM  HVM   * 
+ * PV or HVM       HVM  HVM   *
  * Shadow paging   pae  pae  64b
  *
  * sl1 size         8k   4k   4k
@@ -1193,8 +1193,8 @@ int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t 
*p,
  * sl3 size         -    -    4k
  * sl4 size         -    -    4k
  *
- * In HVM guests, the p2m table is built out of shadow pages, and we provide 
- * a function for the p2m management to steal pages, in max-order chunks, from 
+ * In HVM guests, the p2m table is built out of shadow pages, and we provide
+ * a function for the p2m management to steal pages, in max-order chunks, from
  * the free pool.
  */
 
@@ -1221,15 +1221,15 @@ const u8 sh_type_to_size[] = {
 /* Figure out the least acceptable quantity of shadow memory.
  * The minimum memory requirement for always being able to free up a
  * chunk of memory is very small -- only three max-order chunks per
- * vcpu to hold the top level shadows and pages with Xen mappings in them.  
+ * vcpu to hold the top level shadows and pages with Xen mappings in them.
  *
  * But for a guest to be guaranteed to successfully execute a single
  * instruction, we must be able to map a large number (about thirty) VAs
  * at the same time, which means that to guarantee progress, we must
  * allow for more than ninety allocated pages per vcpu.  We round that
- * up to 128 pages, or half a megabyte per vcpu, and add 1 more vcpu's 
+ * up to 128 pages, or half a megabyte per vcpu, and add 1 more vcpu's
  * worth to make sure we never return zero. */
-static unsigned int shadow_min_acceptable_pages(struct domain *d) 
+static unsigned int shadow_min_acceptable_pages(struct domain *d)
 {
     u32 vcpu_count = 1;
     struct vcpu *v;
@@ -1238,7 +1238,7 @@ static unsigned int shadow_min_acceptable_pages(struct 
domain *d)
         vcpu_count++;
 
     return (vcpu_count * 128);
-} 
+}
 
 /* Dispatcher function: call the per-mode function that will unhook the
  * non-Xen mappings in this top-level shadow mfn.  With user_only == 1,
@@ -1290,7 +1290,7 @@ static void _shadow_prealloc(
     int i;
 
     if ( d->arch.paging.shadow.free_pages >= pages ) return;
-    
+
     v = current;
     if ( v->domain != d )
         v = d->vcpu[0];
@@ -1315,13 +1315,13 @@ static void _shadow_prealloc(
      * mappings. */
     perfc_incr(shadow_prealloc_2);
 
-    for_each_vcpu(d, v2) 
+    for_each_vcpu(d, v2)
         for ( i = 0 ; i < 4 ; i++ )
         {
             if ( !pagetable_is_null(v2->arch.shadow_table[i]) )
             {
                 TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PREALLOC_UNHOOK);
-                shadow_unhook_mappings(v, 
+                shadow_unhook_mappings(v,
                                pagetable_get_mfn(v2->arch.shadow_table[i]), 0);
 
                 /* See if that freed up enough space */
@@ -1332,7 +1332,7 @@ static void _shadow_prealloc(
                 }
             }
         }
-    
+
     /* Nothing more we can do: all remaining shadows are of pages that
      * hold Xen mappings for some vcpu.  This can never happen. */
     SHADOW_ERROR("Can't pre-allocate %u shadow pages!\n"
@@ -1356,7 +1356,7 @@ void shadow_prealloc(struct domain *d, u32 type, unsigned 
int count)
 
 /* Deliberately free all the memory we can: this will tear down all of
  * this domain's shadows */
-static void shadow_blow_tables(struct domain *d) 
+static void shadow_blow_tables(struct domain *d)
 {
     struct page_info *sp, *t;
     struct vcpu *v = d->vcpu[0];
@@ -1371,12 +1371,12 @@ static void shadow_blow_tables(struct domain *d)
         smfn = page_to_mfn(sp);
         sh_unpin(v, smfn);
     }
-        
+
     /* Second pass: unhook entries of in-use shadows */
-    for_each_vcpu(d, v) 
+    for_each_vcpu(d, v)
         for ( i = 0 ; i < 4 ; i++ )
             if ( !pagetable_is_null(v->arch.shadow_table[i]) )
-                shadow_unhook_mappings(v, 
+                shadow_unhook_mappings(v,
                                pagetable_get_mfn(v->arch.shadow_table[i]), 0);
 
     /* Make sure everyone sees the unshadowings */
@@ -1441,9 +1441,9 @@ set_next_shadow(struct page_info *sp, struct page_info 
*next)
 }
 
 /* Allocate another shadow's worth of (contiguous, aligned) pages,
- * and fill in the type and backpointer fields of their page_infos. 
+ * and fill in the type and backpointer fields of their page_infos.
  * Never fails to allocate. */
-mfn_t shadow_alloc(struct domain *d,  
+mfn_t shadow_alloc(struct domain *d,
                     u32 shadow_type,
                     unsigned long backpointer)
 {
@@ -1485,10 +1485,10 @@ mfn_t shadow_alloc(struct domain *d,
     INIT_PAGE_LIST_HEAD(&tmp_list);
 
     /* Init page info fields and clear the pages */
-    for ( i = 0; i < pages ; i++ ) 
+    for ( i = 0; i < pages ; i++ )
     {
         sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
-        /* Before we overwrite the old contents of this page, 
+        /* Before we overwrite the old contents of this page,
          * we need to be sure that no TLB holds a pointer to it. */
         cpumask_copy(&mask, d->domain_dirty_cpumask);
         tlbflush_filter(mask, sp->tlbflush_timestamp);
@@ -1512,7 +1512,7 @@ mfn_t shadow_alloc(struct domain *d,
         set_next_shadow(sp, NULL);
         perfc_incr(shadow_alloc_count);
     }
-    if ( shadow_type >= SH_type_min_shadow 
+    if ( shadow_type >= SH_type_min_shadow
          && shadow_type <= SH_type_max_shadow )
         sp->u.sh.head = 1;
 
@@ -1525,7 +1525,7 @@ mfn_t shadow_alloc(struct domain *d,
 /* Return some shadow pages to the pool. */
 void shadow_free(struct domain *d, mfn_t smfn)
 {
-    struct page_info *next = NULL, *sp = mfn_to_page(smfn); 
+    struct page_info *next = NULL, *sp = mfn_to_page(smfn);
     struct page_list_head *pin_list;
     unsigned int pages;
     u32 shadow_type;
@@ -1540,16 +1540,16 @@ void shadow_free(struct domain *d, mfn_t smfn)
     pages = shadow_size(shadow_type);
     pin_list = &d->arch.paging.shadow.pinned_shadows;
 
-    for ( i = 0; i < pages; i++ ) 
+    for ( i = 0; i < pages; i++ )
     {
 #if SHADOW_OPTIMIZATIONS & (SHOPT_WRITABLE_HEURISTIC | SHOPT_FAST_EMULATION)
         struct vcpu *v;
-        for_each_vcpu(d, v) 
+        for_each_vcpu(d, v)
         {
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
             /* No longer safe to look for a writeable mapping in this shadow */
-            if ( v->arch.paging.shadow.last_writeable_pte_smfn 
-                 == mfn_x(page_to_mfn(sp)) ) 
+            if ( v->arch.paging.shadow.last_writeable_pte_smfn
+                 == mfn_x(page_to_mfn(sp)) )
                 v->arch.paging.shadow.last_writeable_pte_smfn = 0;
 #endif
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
@@ -1562,7 +1562,7 @@ void shadow_free(struct domain *d, mfn_t smfn)
             next = page_list_next(sp, pin_list);
         /* Strip out the type: this is now a free shadow page */
         sp->u.sh.type = sp->u.sh.head = 0;
-        /* Remember the TLB timestamp so we will know whether to flush 
+        /* Remember the TLB timestamp so we will know whether to flush
          * TLBs when we reuse the page.  Because the destructors leave the
          * contents of the pages in place, we can delay TLB flushes until
          * just before the allocator hands the page out again. */
@@ -1584,11 +1584,11 @@ shadow_alloc_p2m_page(struct domain *d)
 {
     struct page_info *pg;
 
-    /* This is called both from the p2m code (which never holds the 
+    /* This is called both from the p2m code (which never holds the
      * paging lock) and the log-dirty code (which always does). */
     paging_lock_recursive(d);
 
-    if ( d->arch.paging.shadow.total_pages 
+    if ( d->arch.paging.shadow.total_pages
          < shadow_min_acceptable_pages(d) + 1 )
     {
         if ( !d->arch.paging.p2m_alloc_failed )
@@ -1630,9 +1630,9 @@ shadow_free_p2m_page(struct domain *d, struct page_info 
*pg)
     }
     pg->count_info &= ~PGC_count_mask;
     pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */
-    page_set_owner(pg, NULL); 
+    page_set_owner(pg, NULL);
 
-    /* This is called both from the p2m code (which never holds the 
+    /* This is called both from the p2m code (which never holds the
      * paging lock) and the log-dirty code (which always does). */
     paging_lock_recursive(d);
 
@@ -1647,7 +1647,7 @@ shadow_free_p2m_page(struct domain *d, struct page_info 
*pg)
  * Input will be rounded up to at least shadow_min_acceptable_pages(),
  * plus space for the p2m table.
  * Returns 0 for success, non-zero for failure. */
-static unsigned int sh_set_allocation(struct domain *d, 
+static unsigned int sh_set_allocation(struct domain *d,
                                       unsigned int pages,
                                       int *preempted)
 {
@@ -1663,7 +1663,7 @@ static unsigned int sh_set_allocation(struct domain *d,
             pages = 0;
         else
             pages -= d->arch.paging.shadow.p2m_pages;
-        
+
         /* Don't allocate less than the minimum acceptable, plus one page per
          * megabyte of RAM (for the p2m table) */
         lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256);
@@ -1671,18 +1671,18 @@ static unsigned int sh_set_allocation(struct domain *d,
             pages = lower_bound;
     }
 
-    SHADOW_PRINTK("current %i target %i\n", 
+    SHADOW_PRINTK("current %i target %i\n",
                    d->arch.paging.shadow.total_pages, pages);
 
     for ( ; ; )
     {
-        if ( d->arch.paging.shadow.total_pages < pages ) 
+        if ( d->arch.paging.shadow.total_pages < pages )
         {
             /* Need to allocate more memory from domheap */
             sp = (struct page_info *)
                 alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
-            if ( sp == NULL ) 
-            { 
+            if ( sp == NULL )
+            {
                 SHADOW_PRINTK("failed to allocate shadow pages.\n");
                 return -ENOMEM;
             }
@@ -1693,8 +1693,8 @@ static unsigned int sh_set_allocation(struct domain *d,
             sp->u.sh.count = 0;
             sp->tlbflush_timestamp = 0; /* Not in any TLB */
             page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
-        } 
-        else if ( d->arch.paging.shadow.total_pages > pages ) 
+        }
+        else if ( d->arch.paging.shadow.total_pages > pages )
         {
             /* Need to return memory to domheap */
             _shadow_prealloc(d, 1);
@@ -1734,7 +1734,7 @@ static unsigned int shadow_get_allocation(struct domain 
*d)
 
 /**************************************************************************/
 /* Hash table for storing the guest->shadow mappings.
- * The table itself is an array of pointers to shadows; the shadows are then 
+ * The table itself is an array of pointers to shadows; the shadows are then
  * threaded on a singly-linked list of shadows with the same hash value */
 
 #define SHADOW_HASH_BUCKETS 251
@@ -1742,7 +1742,7 @@ static unsigned int shadow_get_allocation(struct domain 
*d)
 
 /* Hash function that takes a gfn or mfn, plus another byte of type info */
 typedef u32 key_t;
-static inline key_t sh_hash(unsigned long n, unsigned int t) 
+static inline key_t sh_hash(unsigned long n, unsigned int t)
 {
     unsigned char *p = (unsigned char *)&n;
     key_t k = t;
@@ -1801,7 +1801,7 @@ static void sh_hash_audit_bucket(struct domain *d, int 
bucket)
                         SHADOW_ERROR("MFN %#"PRI_mfn" shadowed (by 
%#"PRI_mfn")"
                                      " and not OOS but has typecount %#lx\n",
                                      __backpointer(sp),
-                                     mfn_x(page_to_mfn(sp)), 
+                                     mfn_x(page_to_mfn(sp)),
                                      gpg->u.inuse.type_info);
                         BUG();
                     }
@@ -1809,7 +1809,7 @@ static void sh_hash_audit_bucket(struct domain *d, int 
bucket)
             }
             else /* Not an l1 */
 #endif
-            if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page 
+            if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page
                  && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
             {
                 SHADOW_ERROR("MFN %#"PRI_mfn" shadowed (by %#"PRI_mfn")"
@@ -1839,7 +1839,7 @@ static void sh_hash_audit(struct domain *d)
     if ( !(SHADOW_AUDIT_ENABLE) )
         return;
 
-    for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ ) 
+    for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ )
     {
         sh_hash_audit_bucket(d, i);
     }
@@ -1849,7 +1849,7 @@ static void sh_hash_audit(struct domain *d)
 #define sh_hash_audit(_d) do {} while(0)
 #endif /* Hashtable bucket audit */
 
-/* Allocate and initialise the table itself.  
+/* Allocate and initialise the table itself.
  * Returns 0 for success, 1 for error. */
 static int shadow_hash_alloc(struct domain *d)
 {
@@ -1906,11 +1906,11 @@ mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long 
n, unsigned int t)
                 if ( unlikely(d->arch.paging.shadow.hash_walking != 0) )
                     /* Can't reorder: someone is walking the hash chains */
                     return page_to_mfn(sp);
-                else 
+                else
                 {
                     ASSERT(prev);
                     /* Delete sp from the list */
-                    prev->next_shadow = sp->next_shadow;                    
+                    prev->next_shadow = sp->next_shadow;
                     /* Re-insert it at the head of the list */
                     set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
                     d->arch.paging.shadow.hash_table[key] = sp;
@@ -1930,14 +1930,14 @@ mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long 
n, unsigned int t)
     return _mfn(INVALID_MFN);
 }
 
-void shadow_hash_insert(struct vcpu *v, unsigned long n, unsigned int t, 
+void shadow_hash_insert(struct vcpu *v, unsigned long n, unsigned int t,
                         mfn_t smfn)
 /* Put a mapping (n,t)->smfn into the hash table */
 {
     struct domain *d = v->domain;
     struct page_info *sp;
     key_t key;
-    
+
     ASSERT(paging_locked_by_me(d));
     ASSERT(d->arch.paging.shadow.hash_table);
     ASSERT(t);
@@ -1947,16 +1947,16 @@ void shadow_hash_insert(struct vcpu *v, unsigned long 
n, unsigned int t,
     perfc_incr(shadow_hash_inserts);
     key = sh_hash(n, t);
     sh_hash_audit_bucket(d, key);
-    
+
     /* Insert this shadow at the top of the bucket */
     sp = mfn_to_page(smfn);
     set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
     d->arch.paging.shadow.hash_table[key] = sp;
-    
+
     sh_hash_audit_bucket(d, key);
 }
 
-void shadow_hash_delete(struct vcpu *v, unsigned long n, unsigned int t, 
+void shadow_hash_delete(struct vcpu *v, unsigned long n, unsigned int t,
                         mfn_t smfn)
 /* Excise the mapping (n,t)->smfn from the hash table */
 {
@@ -1973,12 +1973,12 @@ void shadow_hash_delete(struct vcpu *v, unsigned long 
n, unsigned int t,
     perfc_incr(shadow_hash_deletes);
     key = sh_hash(n, t);
     sh_hash_audit_bucket(d, key);
-    
+
     sp = mfn_to_page(smfn);
-    if ( d->arch.paging.shadow.hash_table[key] == sp ) 
+    if ( d->arch.paging.shadow.hash_table[key] == sp )
         /* Easy case: we're deleting the head item. */
         d->arch.paging.shadow.hash_table[key] = next_shadow(sp);
-    else 
+    else
     {
         /* Need to search for the one we want */
         x = d->arch.paging.shadow.hash_table[key];
@@ -2001,17 +2001,17 @@ void shadow_hash_delete(struct vcpu *v, unsigned long 
n, unsigned int t,
 
 typedef int (*hash_callback_t)(struct vcpu *v, mfn_t smfn, mfn_t other_mfn);
 
-static void hash_foreach(struct vcpu *v, 
-                         unsigned int callback_mask, 
+static void hash_foreach(struct vcpu *v,
+                         unsigned int callback_mask,
                          const hash_callback_t callbacks[],
                          mfn_t callback_mfn)
-/* Walk the hash table looking at the types of the entries and 
- * calling the appropriate callback function for each entry. 
+/* Walk the hash table looking at the types of the entries and
+ * calling the appropriate callback function for each entry.
  * The mask determines which shadow types we call back for, and the array
  * of callbacks tells us which function to call.
- * Any callback may return non-zero to let us skip the rest of the scan. 
+ * Any callback may return non-zero to let us skip the rest of the scan.
  *
- * WARNING: Callbacks MUST NOT add or remove hash entries unless they 
+ * WARNING: Callbacks MUST NOT add or remove hash entries unless they
  * then return non-zero to terminate the scan. */
 {
     int i, done = 0;
@@ -2028,7 +2028,7 @@ static void hash_foreach(struct vcpu *v,
     ASSERT(d->arch.paging.shadow.hash_walking == 0);
     d->arch.paging.shadow.hash_walking = 1;
 
-    for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ ) 
+    for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ )
     {
         /* WARNING: This is not safe against changes to the hash table.
          * The callback *must* return non-zero if it has inserted or
@@ -2044,15 +2044,15 @@ static void hash_foreach(struct vcpu *v,
                 if ( done ) break;
             }
         }
-        if ( done ) break; 
+        if ( done ) break;
     }
-    d->arch.paging.shadow.hash_walking = 0; 
+    d->arch.paging.shadow.hash_walking = 0;
 }
 
 
 /**************************************************************************/
 /* Destroy a shadow page: simple dispatcher to call the per-type destructor
- * which will decrement refcounts appropriately and return memory to the 
+ * which will decrement refcounts appropriately and return memory to the
  * free pool. */
 
 void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
@@ -2065,13 +2065,13 @@ void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
 
     /* Double-check, if we can, that the shadowed page belongs to this
      * domain, (by following the back-pointer). */
-    ASSERT(t == SH_type_fl1_32_shadow  ||  
-           t == SH_type_fl1_pae_shadow ||  
-           t == SH_type_fl1_64_shadow  || 
-           t == SH_type_monitor_table  || 
+    ASSERT(t == SH_type_fl1_32_shadow  ||
+           t == SH_type_fl1_pae_shadow ||
+           t == SH_type_fl1_64_shadow  ||
+           t == SH_type_monitor_table  ||
            (is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
            (page_get_owner(mfn_to_page(backpointer(sp)))
-            == v->domain)); 
+            == v->domain));
 
     /* The down-shifts here are so that the switch statement is on nice
      * small numbers that the compiler will enjoy */
@@ -2115,7 +2115,7 @@ void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
         SHADOW_ERROR("tried to destroy shadow of bad type %08lx\n",
                      (unsigned long)t);
         BUG();
-    }    
+    }
 }
 
 static inline void trace_shadow_wrmap_bf(mfn_t gmfn)
@@ -2129,13 +2129,13 @@ static inline void trace_shadow_wrmap_bf(mfn_t gmfn)
 }
 
 /**************************************************************************/
-/* Remove all writeable mappings of a guest frame from the shadow tables 
- * Returns non-zero if we need to flush TLBs. 
+/* Remove all writeable mappings of a guest frame from the shadow tables
+ * Returns non-zero if we need to flush TLBs.
  * level and fault_addr desribe how we found this to be a pagetable;
  * level==0 means we have some other reason for revoking write access.
  * If level==0 we are allowed to fail, returning -1. */
 
-int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, 
+int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                            unsigned int level,
                            unsigned long fault_addr)
 {
@@ -2180,7 +2180,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
 
     /* Early exit if it's already a pagetable, or otherwise not writeable */
     if ( (sh_mfn_is_a_page_table(gmfn)
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
          /* Unless they've been allowed to go out of sync with their shadows */
            && !mfn_oos_may_write(gmfn)
 #endif
@@ -2192,11 +2192,11 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
 
     perfc_incr(shadow_writeable);
 
-    /* If this isn't a "normal" writeable page, the domain is trying to 
+    /* If this isn't a "normal" writeable page, the domain is trying to
      * put pagetables in special memory of some kind.  We can't allow that. */
     if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_writable_page )
     {
-        SHADOW_ERROR("can't remove write access to mfn %lx, type_info is %" 
+        SHADOW_ERROR("can't remove write access to mfn %lx, type_info is %"
                       PRtype_info "\n",
                       mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info);
         domain_crash(v->domain);
@@ -2219,7 +2219,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                 return 1;                                               \
             }                                                           \
         } while (0)
-        
+
         if ( v->arch.paging.mode->guest_levels == 2 )
         {
             if ( level == 1 )
@@ -2227,27 +2227,27 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                 GUESS(0xC0000000UL + (fault_addr >> 10), 1);
 
             /* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
-            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 ) 
+            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
                 GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
 
             /* FreeBSD: Linear map at 0xBFC00000 */
             if ( level == 1 )
-                GUESS(0xBFC00000UL 
+                GUESS(0xBFC00000UL
                       + ((fault_addr & VADDR_MASK) >> 10), 6);
         }
         else if ( v->arch.paging.mode->guest_levels == 3 )
         {
             /* 32bit PAE w2k3: linear map at 0xC0000000 */
-            switch ( level ) 
+            switch ( level )
             {
             case 1: GUESS(0xC0000000UL + (fault_addr >> 9), 2); break;
             case 2: GUESS(0xC0600000UL + (fault_addr >> 18), 2); break;
             }
 
             /* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
-            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 ) 
+            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
                 GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
-            
+
             /* FreeBSD PAE: Linear map at 0xBF800000 */
             switch ( level )
             {
@@ -2260,20 +2260,20 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
         else if ( v->arch.paging.mode->guest_levels == 4 )
         {
             /* 64bit w2k3: linear map at 0xfffff68000000000 */
-            switch ( level ) 
+            switch ( level )
             {
-            case 1: GUESS(0xfffff68000000000UL 
+            case 1: GUESS(0xfffff68000000000UL
                           + ((fault_addr & VADDR_MASK) >> 9), 3); break;
             case 2: GUESS(0xfffff6fb40000000UL
                           + ((fault_addr & VADDR_MASK) >> 18), 3); break;
-            case 3: GUESS(0xfffff6fb7da00000UL 
+            case 3: GUESS(0xfffff6fb7da00000UL
                           + ((fault_addr & VADDR_MASK) >> 27), 3); break;
             }
 
             /* 64bit Linux direct map at 0xffff880000000000; older kernels
              * had it at 0xffff810000000000, and older kernels yet had it
              * at 0x0000010000000000UL */
-            gfn = mfn_to_gfn(v->domain, gmfn); 
+            gfn = mfn_to_gfn(v->domain, gmfn);
             GUESS(0xffff880000000000UL + (gfn << PAGE_SHIFT), 4);
             GUESS(0xffff810000000000UL + (gfn << PAGE_SHIFT), 4);
             GUESS(0x0000010000000000UL + (gfn << PAGE_SHIFT), 4);
@@ -2283,7 +2283,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
              * kpm_vbase; 0xfffffe0000000000UL
              */
             GUESS(0xfffffe0000000000UL + (gfn << PAGE_SHIFT), 4);
- 
+
              /* FreeBSD 64bit: linear map 0xffff800000000000 */
              switch ( level )
              {
@@ -2316,7 +2316,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
         mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
         int shtype = mfn_to_page(last_smfn)->u.sh.type;
 
-        if ( callbacks[shtype] ) 
+        if ( callbacks[shtype] )
             callbacks[shtype](v, last_smfn, gmfn);
 
         if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
@@ -2327,7 +2327,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
         return 1;
 
 #endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */
-    
+
     /* Brute-force search of all the shadows, by walking the hash */
     trace_shadow_wrmap_bf(gmfn);
     if ( level == 0 )
@@ -2348,20 +2348,20 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
                       (mfn_to_page(gmfn)->u.inuse.type_info&PGT_count_mask));
         domain_crash(v->domain);
     }
-    
+
     /* We killed at least one writeable mapping, so must flush TLBs. */
     return 1;
 }
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
 int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
                                      mfn_t smfn, unsigned long off)
 {
     struct page_info *sp = mfn_to_page(smfn);
-    
+
     ASSERT(mfn_valid(smfn));
     ASSERT(mfn_valid(gmfn));
-    
+
     if ( sp->u.sh.type == SH_type_l1_32_shadow
          || sp->u.sh.type == SH_type_fl1_32_shadow )
     {
@@ -2379,7 +2379,7 @@ int sh_remove_write_access_from_sl1p(struct vcpu *v, 
mfn_t gmfn,
 
     return 0;
 }
-#endif 
+#endif
 
 /**************************************************************************/
 /* Remove all mappings of a guest frame from the shadow tables.
@@ -2427,9 +2427,9 @@ static int sh_remove_all_mappings(struct vcpu *v, mfn_t 
gmfn)
      * can be called via put_page_type when we clear a shadow l1e).*/
     paging_lock_recursive(v->domain);
 
-    /* XXX TODO: 
+    /* XXX TODO:
      * Heuristics for finding the (probably) single mapping of this gmfn */
-    
+
     /* Brute-force search of all the shadows, by walking the hash */
     perfc_incr(shadow_mappings_bf);
     hash_foreach(v, callback_mask, callbacks, gmfn);
@@ -2437,8 +2437,8 @@ static int sh_remove_all_mappings(struct vcpu *v, mfn_t 
gmfn)
     /* If that didn't catch the mapping, something is very wrong */
     if ( !sh_check_page_has_no_refs(page) )
     {
-        /* Don't complain if we're in HVM and there are some extra mappings: 
-         * The qemu helper process has an untyped mapping of this dom's RAM 
+        /* Don't complain if we're in HVM and there are some extra mappings:
+         * The qemu helper process has an untyped mapping of this dom's RAM
          * and the HVM restore program takes another.
          * Also allow one typed refcount for xenheap pages, to match
          * share_xen_page_with_guest(). */
@@ -2448,7 +2448,7 @@ static int sh_remove_all_mappings(struct vcpu *v, mfn_t 
gmfn)
                    == !!is_xen_heap_page(page))) )
         {
             SHADOW_ERROR("can't find all mappings of mfn %lx: "
-                          "c=%08lx t=%08lx\n", mfn_x(gmfn), 
+                          "c=%08lx t=%08lx\n", mfn_x(gmfn),
                           page->count_info, page->u.inuse.type_info);
         }
     }
@@ -2475,7 +2475,7 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
     ASSERT(sp->u.sh.type > 0);
     ASSERT(sp->u.sh.type < SH_type_max_shadow);
     ASSERT(sh_type_has_up_pointer(v, sp->u.sh.type));
-    
+
     if (sp->up == 0) return 0;
     pmfn = _mfn(sp->up >> PAGE_SHIFT);
     ASSERT(mfn_valid(pmfn));
@@ -2483,7 +2483,7 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
     ASSERT(vaddr);
     vaddr += sp->up & (PAGE_SIZE-1);
     ASSERT(l1e_get_pfn(*(l1_pgentry_t *)vaddr) == mfn_x(smfn));
-    
+
     /* Is this the only reference to this shadow? */
     rc = (sp->u.sh.count == 1) ? 1 : 0;
 
@@ -2508,7 +2508,7 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
         break;
     default: BUG(); /* Some wierd unknown shadow type */
     }
-    
+
     sh_unmap_domain_page(vaddr);
     if ( rc )
         perfc_incr(shadow_up_pointer);
@@ -2519,8 +2519,8 @@ static int sh_remove_shadow_via_pointer(struct vcpu *v, 
mfn_t smfn)
 }
 
 void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
-/* Remove the shadows of this guest page.  
- * If fast != 0, just try the quick heuristic, which will remove 
+/* Remove the shadows of this guest page.
+ * If fast != 0, just try the quick heuristic, which will remove
  * at most one reference to each shadow of the page.  Otherwise, walk
  * all the shadow tables looking for refs to shadows of this gmfn.
  * If all != 0, kill the domain if we can't find all the shadows.
@@ -2530,7 +2530,7 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int 
fast, int all)
     struct page_info *pg = mfn_to_page(gmfn);
     mfn_t smfn;
     unsigned char t;
-    
+
     /* Dispatch table for getting per-type functions: each level must
      * be called with the function to remove a lower-level shadow. */
     static const hash_callback_t callbacks[SH_type_unused] = {
@@ -2642,7 +2642,7 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int 
fast, int all)
         domain_crash(v->domain);
     }
 
-    /* Need to flush TLBs now, so that linear maps are safe next time we 
+    /* Need to flush TLBs now, so that linear maps are safe next time we
      * take a fault. */
     flush_tlb_mask(v->domain->domain_dirty_cpumask);
 
@@ -2656,18 +2656,18 @@ sh_remove_all_shadows_and_parents(struct vcpu *v, mfn_t 
gmfn)
 {
     sh_remove_shadows(v, gmfn, 0, 1);
     /* XXX TODO:
-     * Rework this hashtable walker to return a linked-list of all 
-     * the shadows it modified, then do breadth-first recursion 
-     * to find the way up to higher-level tables and unshadow them too. 
+     * Rework this hashtable walker to return a linked-list of all
+     * the shadows it modified, then do breadth-first recursion
+     * to find the way up to higher-level tables and unshadow them too.
      *
      * The current code (just tearing down each page's shadows as we
-     * detect that it is not a pagetable) is correct, but very slow. 
+     * detect that it is not a pagetable) is correct, but very slow.
      * It means extra emulated writes and slows down removal of mappings. */
 }
 
 /**************************************************************************/
 
-/* Reset the up-pointers of every L3 shadow to 0. 
+/* Reset the up-pointers of every L3 shadow to 0.
  * This is called when l3 shadows stop being pinnable, to clear out all
  * the list-head bits so the up-pointer field is properly inititalised. */
 static int sh_clear_up_pointer(struct vcpu *v, mfn_t smfn, mfn_t unused)
@@ -2711,7 +2711,7 @@ static void sh_update_paging_modes(struct vcpu *v)
 
     ASSERT(paging_locked_by_me(d));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
     /* Make sure this vcpu has a virtual TLB array allocated */
     if ( unlikely(!v->arch.paging.vtlb) )
     {
@@ -2727,7 +2727,7 @@ static void sh_update_paging_modes(struct vcpu *v)
     }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     if ( mfn_x(v->arch.paging.shadow.oos_snapshot[0]) == INVALID_MFN )
     {
         int i;
@@ -2768,7 +2768,7 @@ static void sh_update_paging_modes(struct vcpu *v)
         ASSERT(shadow_mode_translate(d));
         ASSERT(shadow_mode_external(d));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         /* Need to resync all our pages now, because if a page goes out
          * of sync with paging enabled and is resynced with paging
          * disabled, the resync will go wrong. */
@@ -2827,7 +2827,7 @@ static void sh_update_paging_modes(struct vcpu *v)
                 /* Need to make a new monitor table for the new mode */
                 mfn_t new_mfn, old_mfn;
 
-                if ( v != current && vcpu_runnable(v) ) 
+                if ( v != current && vcpu_runnable(v) )
                 {
                     SHADOW_ERROR("Some third party (d=%u v=%u) is changing "
                                  "this HVM vcpu's (d=%u v=%u) paging mode "
@@ -2847,7 +2847,7 @@ static void sh_update_paging_modes(struct vcpu *v)
                 SHADOW_PRINTK("new monitor table %"PRI_mfn "\n",
                                mfn_x(new_mfn));
 
-                /* Don't be running on the old monitor table when we 
+                /* Don't be running on the old monitor table when we
                  * pull it down!  Switch CR3, and warn the HVM code that
                  * its host cr3 has changed. */
                 make_cr3(v, mfn_x(new_mfn));
@@ -2914,9 +2914,9 @@ static void sh_new_mode(struct domain *d, u32 new_mode)
 int shadow_enable(struct domain *d, u32 mode)
 /* Turn on "permanent" shadow features: external, translate, refcount.
  * Can only be called once on a domain, and these features cannot be
- * disabled. 
+ * disabled.
  * Returns 0 for success, -errno for failure. */
-{    
+{
     unsigned int old_pages;
     struct page_info *pg = NULL;
     uint32_t *e;
@@ -2942,14 +2942,14 @@ int shadow_enable(struct domain *d, u32 mode)
     if ( old_pages == 0 )
     {
         unsigned int r;
-        paging_lock(d);                
+        paging_lock(d);
         r = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */
         if ( r != 0 )
         {
             sh_set_allocation(d, 0, NULL);
             rv = -ENOMEM;
             goto out_locked;
-        }        
+        }
         paging_unlock(d);
     }
 
@@ -2957,7 +2957,7 @@ int shadow_enable(struct domain *d, u32 mode)
     d->arch.paging.alloc_page = shadow_alloc_p2m_page;
     d->arch.paging.free_page = shadow_free_p2m_page;
 
-    /* Init the P2M table.  Must be done before we take the paging lock 
+    /* Init the P2M table.  Must be done before we take the paging lock
      * to avoid possible deadlock. */
     if ( mode & PG_translate )
     {
@@ -2970,7 +2970,7 @@ int shadow_enable(struct domain *d, u32 mode)
      * have paging disabled */
     if ( is_hvm_domain(d) )
     {
-        /* Get a single page from the shadow pool.  Take it via the 
+        /* Get a single page from the shadow pool.  Take it via the
          * P2M interface to make freeing it simpler afterwards. */
         pg = shadow_alloc_p2m_page(d);
         if ( pg == NULL )
@@ -2979,11 +2979,11 @@ int shadow_enable(struct domain *d, u32 mode)
             goto out_unlocked;
         }
         /* Fill it with 32-bit, non-PAE superpage entries, each mapping 4MB
-         * of virtual address space onto the same physical address range */ 
+         * of virtual address space onto the same physical address range */
         e = __map_domain_page(pg);
         for ( i = 0; i < PAGE_SIZE / sizeof(*e); i++ )
             e[i] = ((0x400000U * i)
-                    | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER 
+                    | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER
                     | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
         sh_unmap_domain_page(e);
         pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated;
@@ -3005,8 +3005,8 @@ int shadow_enable(struct domain *d, u32 mode)
         goto out_locked;
     }
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) 
-    /* We assume we're dealing with an older 64bit linux guest until we 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
+    /* We assume we're dealing with an older 64bit linux guest until we
      * see the guest use more than one l4 per vcpu. */
     d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
 #endif
@@ -3073,7 +3073,7 @@ void shadow_teardown(struct domain *d)
         }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         {
             int i;
             mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
@@ -3093,24 +3093,24 @@ void shadow_teardown(struct domain *d)
         SHADOW_PRINTK("teardown of domain %u starts."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
                        d->domain_id,
-                       d->arch.paging.shadow.total_pages, 
-                       d->arch.paging.shadow.free_pages, 
+                       d->arch.paging.shadow.total_pages,
+                       d->arch.paging.shadow.free_pages,
                        d->arch.paging.shadow.p2m_pages);
         /* Destroy all the shadows and release memory to domheap */
         sh_set_allocation(d, 0, NULL);
         /* Release the hash table back to xenheap */
-        if (d->arch.paging.shadow.hash_table) 
+        if (d->arch.paging.shadow.hash_table)
             shadow_hash_teardown(d);
         /* Should not have any more memory held */
         SHADOW_PRINTK("teardown done."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
-                       d->arch.paging.shadow.total_pages, 
-                       d->arch.paging.shadow.free_pages, 
+                       d->arch.paging.shadow.total_pages,
+                       d->arch.paging.shadow.free_pages,
                        d->arch.paging.shadow.p2m_pages);
         ASSERT(d->arch.paging.shadow.total_pages == 0);
     }
 
-    /* Free the non-paged-vcpus pagetable; must happen after we've 
+    /* Free the non-paged-vcpus pagetable; must happen after we've
      * destroyed any shadows of it or sh_destroy_shadow will get confused. */
     if ( !pagetable_is_null(d->arch.paging.shadow.unpaged_pagetable) )
     {
@@ -3120,7 +3120,7 @@ void shadow_teardown(struct domain *d)
             if ( !hvm_paging_enabled(v) )
                 v->arch.guest_table = pagetable_null();
         }
-        unpaged_pagetable = 
+        unpaged_pagetable =
             pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable);
         d->arch.paging.shadow.unpaged_pagetable = pagetable_null();
     }
@@ -3140,7 +3140,7 @@ void shadow_teardown(struct domain *d)
     paging_unlock(d);
 
     /* Must be called outside the lock */
-    if ( unpaged_pagetable ) 
+    if ( unpaged_pagetable )
         shadow_free_p2m_page(d, unpaged_pagetable);
 }
 
@@ -3150,11 +3150,11 @@ void shadow_final_teardown(struct domain *d)
     SHADOW_PRINTK("dom %u final teardown starts."
                    "  Shadow pages total = %u, free = %u, p2m=%u\n",
                    d->domain_id,
-                   d->arch.paging.shadow.total_pages, 
-                   d->arch.paging.shadow.free_pages, 
+                   d->arch.paging.shadow.total_pages,
+                   d->arch.paging.shadow.free_pages,
                    d->arch.paging.shadow.p2m_pages);
 
-    /* Double-check that the domain didn't have any shadow memory.  
+    /* Double-check that the domain didn't have any shadow memory.
      * It is possible for a domain that never got domain_kill()ed
      * to get here with its shadow allocation intact. */
     if ( d->arch.paging.shadow.total_pages != 0 )
@@ -3168,8 +3168,8 @@ void shadow_final_teardown(struct domain *d)
     SHADOW_PRINTK("dom %u final teardown done."
                    "  Shadow pages total = %u, free = %u, p2m=%u\n",
                    d->domain_id,
-                   d->arch.paging.shadow.total_pages, 
-                   d->arch.paging.shadow.free_pages, 
+                   d->arch.paging.shadow.total_pages,
+                   d->arch.paging.shadow.free_pages,
                    d->arch.paging.shadow.p2m_pages);
     paging_unlock(d);
 }
@@ -3214,7 +3214,7 @@ static int shadow_one_bit_enable(struct domain *d, u32 
mode)
     return 0;
 }
 
-static int shadow_one_bit_disable(struct domain *d, u32 mode) 
+static int shadow_one_bit_disable(struct domain *d, u32 mode)
 /* Turn off a single shadow mode feature */
 {
     struct vcpu *v;
@@ -3234,8 +3234,8 @@ static int shadow_one_bit_disable(struct domain *d, u32 
mode)
         SHADOW_PRINTK("un-shadowing of domain %u starts."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
                        d->domain_id,
-                       d->arch.paging.shadow.total_pages, 
-                       d->arch.paging.shadow.free_pages, 
+                       d->arch.paging.shadow.total_pages,
+                       d->arch.paging.shadow.free_pages,
                        d->arch.paging.shadow.p2m_pages);
         for_each_vcpu(d, v)
         {
@@ -3246,7 +3246,7 @@ static int shadow_one_bit_disable(struct domain *d, u32 
mode)
             else
                 make_cr3(v, pagetable_get_pfn(v->arch.guest_table));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
             {
                 int i;
                 mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
@@ -3267,8 +3267,8 @@ static int shadow_one_bit_disable(struct domain *d, u32 
mode)
         SHADOW_PRINTK("un-shadowing of domain %u done."
                        "  Shadow pages total = %u, free = %u, p2m=%u\n",
                        d->domain_id,
-                       d->arch.paging.shadow.total_pages, 
-                       d->arch.paging.shadow.free_pages, 
+                       d->arch.paging.shadow.total_pages,
+                       d->arch.paging.shadow.free_pages,
                        d->arch.paging.shadow.p2m_pages);
     }
 
@@ -3306,7 +3306,7 @@ static int shadow_test_disable(struct domain *d)
 /* P2M map manipulations */
 
 /* shadow specific code which should be called when P2M table entry is updated
- * with new content. It is responsible for update the entry, as well as other 
+ * with new content. It is responsible for update the entry, as well as other
  * shadow processing jobs.
  */
 
@@ -3329,7 +3329,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
     {
         mfn_t mfn = _mfn(l1e_get_pfn(*p));
         p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
-        if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) ) 
+        if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
         {
             sh_remove_all_shadows_and_parents(v, mfn);
             if ( sh_remove_all_mappings(v, mfn) )
@@ -3337,8 +3337,8 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
         }
     }
 
-    /* If we're removing a superpage mapping from the p2m, we need to check 
-     * all the pages covered by it.  If they're still there in the new 
+    /* If we're removing a superpage mapping from the p2m, we need to check
+     * all the pages covered by it.  If they're still there in the new
      * scheme, that's OK, but otherwise they must be unshadowed. */
     if ( level == 2 && (l1e_get_flags(*p) & _PAGE_PRESENT) &&
          (l1e_get_flags(*p) & _PAGE_PSE) )
@@ -3355,13 +3355,13 @@ static void sh_unshadow_for_p2m_change(struct domain 
*d, unsigned long gfn,
 
             /* If we're replacing a superpage with a normal L1 page, map it */
             if ( (l1e_get_flags(new) & _PAGE_PRESENT)
-                 && !(l1e_get_flags(new) & _PAGE_PSE) 
+                 && !(l1e_get_flags(new) & _PAGE_PSE)
                  && mfn_valid(nmfn) )
                 npte = map_domain_page(mfn_x(nmfn));
-            
+
             for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
             {
-                if ( !npte 
+                if ( !npte
                      || !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i])))
                      || l1e_get_pfn(npte[i]) != mfn_x(omfn) )
                 {
@@ -3374,7 +3374,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
                 omfn = _mfn(mfn_x(omfn) + 1);
             }
             flush_tlb_mask(&flushmask);
-            
+
             if ( npte )
                 unmap_domain_page(npte);
         }
@@ -3389,7 +3389,7 @@ shadow_write_p2m_entry(struct domain *d, unsigned long 
gfn,
     paging_lock(d);
 
     /* If there are any shadows, update them.  But if shadow_teardown()
-     * has already been called then it's not safe to try. */ 
+     * has already been called then it's not safe to try. */
     if ( likely(d->arch.paging.shadow.total_pages != 0) )
          sh_unshadow_for_p2m_change(d, gfn, p, new, level);
 
@@ -3426,8 +3426,8 @@ static int sh_enable_log_dirty(struct domain *d, bool_t 
log_global)
     paging_lock(d);
     if ( shadow_mode_enabled(d) )
     {
-        /* This domain already has some shadows: need to clear them out 
-         * of the way to make sure that all references to guest memory are 
+        /* This domain already has some shadows: need to clear them out
+         * of the way to make sure that all references to guest memory are
          * properly write-protected */
         shadow_blow_tables(d);
     }
@@ -3439,7 +3439,7 @@ static int sh_enable_log_dirty(struct domain *d, bool_t 
log_global)
     if ( is_pv_32on64_domain(d) )
         d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
 #endif
-    
+
     ret = shadow_one_bit_enable(d, PG_log_dirty);
     paging_unlock(d);
 
@@ -3454,12 +3454,12 @@ static int sh_disable_log_dirty(struct domain *d)
     paging_lock(d);
     ret = shadow_one_bit_disable(d, PG_log_dirty);
     paging_unlock(d);
-    
+
     return ret;
 }
 
-/* This function is called when we CLEAN log dirty bitmap. See 
- * paging_log_dirty_op() for details. 
+/* This function is called when we CLEAN log dirty bitmap. See
+ * paging_log_dirty_op() for details.
  */
 static void sh_clean_dirty_bitmap(struct domain *d)
 {
@@ -3519,7 +3519,7 @@ int shadow_track_dirty_vram(struct domain *d,
      * no need to be careful. */
     if ( !dirty_vram )
     {
-        /* Throw away all the shadows rather than walking through them 
+        /* Throw away all the shadows rather than walking through them
          * up to nr times getting rid of mappings of each pfn */
         shadow_blow_tables(d);
 
@@ -3665,7 +3665,7 @@ out:
 /**************************************************************************/
 /* Shadow-control XEN_DOMCTL dispatcher */
 
-int shadow_domctl(struct domain *d, 
+int shadow_domctl(struct domain *d,
                   xen_domctl_shadow_op_t *sc,
                   XEN_GUEST_HANDLE_PARAM(void) u_domctl)
 {
@@ -3675,7 +3675,7 @@ int shadow_domctl(struct domain *d,
     {
     case XEN_DOMCTL_SHADOW_OP_OFF:
         if ( d->arch.paging.mode == PG_SH_enable )
-            if ( (rc = shadow_test_disable(d)) != 0 ) 
+            if ( (rc = shadow_test_disable(d)) != 0 )
                 return rc;
         return 0;
 
@@ -3695,7 +3695,7 @@ int shadow_domctl(struct domain *d,
     case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
         paging_lock(d);
         if ( sc->mb == 0 && shadow_mode_enabled(d) )
-        {            
+        {
             /* Can't set the allocation to zero unless the domain stops using
              * shadow pagetables first */
             SHADOW_ERROR("Can't set shadow allocation to zero, domain %u"
@@ -3709,7 +3709,7 @@ int shadow_domctl(struct domain *d,
             /* Not finished.  Set up to re-run the call. */
             rc = hypercall_create_continuation(
                 __HYPERVISOR_domctl, "h", u_domctl);
-        else 
+        else
             /* Finished.  Return the new allocation */
             sc->mb = shadow_get_allocation(d);
         return rc;
@@ -3726,7 +3726,7 @@ int shadow_domctl(struct domain *d,
 
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULL
 
-void shadow_audit_tables(struct vcpu *v) 
+void shadow_audit_tables(struct vcpu *v)
 {
     /* Dispatch table for getting per-type functions */
     static const hash_callback_t callbacks[SH_type_unused] = {
@@ -3746,7 +3746,7 @@ void shadow_audit_tables(struct vcpu *v)
         SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4),  /* l4_64   */
         NULL  /* All the rest */
     };
-    unsigned int mask; 
+    unsigned int mask;
 
     if ( !(SHADOW_AUDIT_ENABLE) )
         return;
@@ -3765,7 +3765,7 @@ void shadow_audit_tables(struct vcpu *v)
         case 2: mask = (SHF_L1_32|SHF_FL1_32|SHF_L2_32); break;
         case 3: mask = (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE
                         |SHF_L2H_PAE); break;
-        case 4: mask = (SHF_L1_64|SHF_FL1_64|SHF_L2_64  
+        case 4: mask = (SHF_L1_64|SHF_FL1_64|SHF_L2_64
                         |SHF_L3_64|SHF_L4_64); break;
         default: BUG();
         }
@@ -3782,5 +3782,5 @@ void shadow_audit_tables(struct vcpu *v)
  * c-file-style: "BSD"
  * c-basic-offset: 4
  * indent-tabs-mode: nil
- * End: 
+ * End:
  */
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 5fc10c9..434df61 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1,7 +1,7 @@
 /******************************************************************************
  * arch/x86/mm/shadow/multi.c
  *
- * Simple, mostly-synchronous shadow page tables. 
+ * Simple, mostly-synchronous shadow page tables.
  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
@@ -43,25 +43,25 @@
 #include "types.h"
 
 /* THINGS TO DO LATER:
- * 
+ *
  * TEARDOWN HEURISTICS
- * Also: have a heuristic for when to destroy a previous paging-mode's 
+ * Also: have a heuristic for when to destroy a previous paging-mode's
  * shadows.  When a guest is done with its start-of-day 32-bit tables
- * and reuses the memory we want to drop those shadows.  Start with 
- * shadows in a page in two modes as a hint, but beware of clever tricks 
+ * and reuses the memory we want to drop those shadows.  Start with
+ * shadows in a page in two modes as a hint, but beware of clever tricks
  * like reusing a pagetable for both PAE and 64-bit during boot...
  *
  * PAE LINEAR MAPS
  * Rework shadow_get_l*e() to have the option of using map_domain_page()
- * instead of linear maps.  Add appropriate unmap_l*e calls in the users. 
- * Then we can test the speed difference made by linear maps.  If the 
- * map_domain_page() version is OK on PAE, we could maybe allow a lightweight 
- * l3-and-l2h-only shadow mode for PAE PV guests that would allow them 
- * to share l2h pages again. 
+ * instead of linear maps.  Add appropriate unmap_l*e calls in the users.
+ * Then we can test the speed difference made by linear maps.  If the
+ * map_domain_page() version is OK on PAE, we could maybe allow a lightweight
+ * l3-and-l2h-only shadow mode for PAE PV guests that would allow them
+ * to share l2h pages again.
  *
  * PSE disabled / PSE36
  * We don't support any modes other than PSE enabled, PSE36 disabled.
- * Neither of those would be hard to change, but we'd need to be able to 
+ * Neither of those would be hard to change, but we'd need to be able to
  * deal with shadows made in one mode and used in another.
  */
 
@@ -90,7 +90,7 @@ static char *fetch_type_names[] = {
  *              shadow L1 which maps its "splinters".
  */
 
-static inline mfn_t 
+static inline mfn_t
 get_fl1_shadow_status(struct vcpu *v, gfn_t gfn)
 /* Look for FL1 shadows in the hash table */
 {
@@ -99,7 +99,7 @@ get_fl1_shadow_status(struct vcpu *v, gfn_t gfn)
     return smfn;
 }
 
-static inline mfn_t 
+static inline mfn_t
 get_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
 /* Look for shadows in the hash table */
 {
@@ -109,7 +109,7 @@ get_shadow_status(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
     return smfn;
 }
 
-static inline void 
+static inline void
 set_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
 /* Put an FL1 shadow into the hash table */
 {
@@ -120,7 +120,7 @@ set_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
     shadow_hash_insert(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
 }
 
-static inline void 
+static inline void
 set_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
 /* Put a shadow into the hash table */
 {
@@ -143,7 +143,7 @@ set_shadow_status(struct vcpu *v, mfn_t gmfn, u32 
shadow_type, mfn_t smfn)
     shadow_hash_insert(v, mfn_x(gmfn), shadow_type, smfn);
 }
 
-static inline void 
+static inline void
 delete_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
 /* Remove a shadow from the hash table */
 {
@@ -153,7 +153,7 @@ delete_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t 
smfn)
     shadow_hash_delete(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
 }
 
-static inline void 
+static inline void
 delete_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
 /* Remove a shadow from the hash table */
 {
@@ -172,10 +172,10 @@ delete_shadow_status(struct vcpu *v, mfn_t gmfn, u32 
shadow_type, mfn_t smfn)
 /* Functions for walking the guest page tables */
 
 static inline uint32_t
-sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw, 
+sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw,
                      uint32_t pfec)
 {
-    return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec, 
+    return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec,
 #if GUEST_PAGING_LEVELS == 3 /* PAE */
                              _mfn(INVALID_MFN),
                              v->arch.paging.shadow.gl3e
@@ -323,7 +323,7 @@ gw_remove_write_accesses(struct vcpu *v, unsigned long va, 
walk_t *gw)
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
 /* Lightweight audit: pass all the shadows associated with this guest walk
  * through the audit mechanisms */
-static void sh_audit_gw(struct vcpu *v, walk_t *gw) 
+static void sh_audit_gw(struct vcpu *v, walk_t *gw)
 {
     mfn_t smfn;
 
@@ -332,32 +332,32 @@ static void sh_audit_gw(struct vcpu *v, walk_t *gw)
 
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
     if ( mfn_valid(gw->l4mfn)
-         && mfn_valid((smfn = get_shadow_status(v, gw->l4mfn, 
+         && mfn_valid((smfn = get_shadow_status(v, gw->l4mfn,
                                                 SH_type_l4_shadow))) )
         (void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN));
     if ( mfn_valid(gw->l3mfn)
-         && mfn_valid((smfn = get_shadow_status(v, gw->l3mfn, 
+         && mfn_valid((smfn = get_shadow_status(v, gw->l3mfn,
                                                 SH_type_l3_shadow))) )
         (void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));
 #endif /* PAE or 64... */
     if ( mfn_valid(gw->l2mfn) )
     {
-        if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn, 
+        if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
                                                  SH_type_l2_shadow))) )
             (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
 #if GUEST_PAGING_LEVELS == 3
-        if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn, 
+        if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
                                                  SH_type_l2h_shadow))) )
             (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
 #endif
     }
     if ( mfn_valid(gw->l1mfn)
-         && mfn_valid((smfn = get_shadow_status(v, gw->l1mfn, 
+         && mfn_valid((smfn = get_shadow_status(v, gw->l1mfn,
                                                 SH_type_l1_shadow))) )
         (void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN));
     else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT)
               && (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)
-              && mfn_valid( 
+              && mfn_valid(
               (smfn = get_fl1_shadow_status(v, guest_l2e_get_gfn(gw->l2e)))) )
         (void) sh_audit_fl1_table(v, smfn, _mfn(INVALID_MFN));
 }
@@ -376,11 +376,11 @@ sh_guest_map_l1e(struct vcpu *v, unsigned long addr,
     walk_t gw;
 
     ASSERT(shadow_mode_translate(v->domain));
-        
+
     // XXX -- this is expensive, but it's easy to cobble together...
     // FIXME!
 
-    if ( sh_walk_guest_tables(v, addr, &gw, PFEC_page_present) == 0 
+    if ( sh_walk_guest_tables(v, addr, &gw, PFEC_page_present) == 0
          && mfn_valid(gw.l1mfn) )
     {
         if ( gl1mfn )
@@ -398,7 +398,7 @@ sh_guest_get_eff_l1e(struct vcpu *v, unsigned long addr, 
void *eff_l1e)
     walk_t gw;
 
     ASSERT(shadow_mode_translate(v->domain));
-        
+
     // XXX -- this is expensive, but it's easy to cobble together...
     // FIXME!
 
@@ -506,12 +506,12 @@ shadow_l4_index(mfn_t *smfn, u32 guest_index)
  */
 
 static always_inline void
-_sh_propagate(struct vcpu *v, 
+_sh_propagate(struct vcpu *v,
               guest_intpte_t guest_intpte,
-              mfn_t target_mfn, 
+              mfn_t target_mfn,
               void *shadow_entry_ptr,
               int level,
-              fetch_type_t ft, 
+              fetch_type_t ft,
               p2m_type_t p2mt)
 {
     guest_l1e_t guest_entry = { guest_intpte };
@@ -537,11 +537,11 @@ _sh_propagate(struct vcpu *v,
     if ( unlikely(!(gflags & _PAGE_PRESENT)) )
     {
 #if !(SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
-        /* If a guest l1 entry is not present, shadow with the magic 
+        /* If a guest l1 entry is not present, shadow with the magic
          * guest-not-present entry. */
         if ( level == 1 )
             *sp = sh_l1e_gnp();
-        else 
+        else
 #endif /* !OOS */
             *sp = shadow_l1e_empty();
         goto done;
@@ -562,7 +562,7 @@ _sh_propagate(struct vcpu *v,
     // return early.
     //
     if ( !mfn_valid(target_mfn)
-         && !(level == 1 && (!shadow_mode_refcounts(d) 
+         && !(level == 1 && (!shadow_mode_refcounts(d)
                              || p2mt == p2m_mmio_direct)) )
     {
         ASSERT((ft == ft_prefetch));
@@ -595,7 +595,7 @@ _sh_propagate(struct vcpu *v,
         ASSERT(!(sflags & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)));
 
         /* compute the PAT index for shadow page entry when VT-d is enabled
-         * and device assigned. 
+         * and device assigned.
          * 1) direct MMIO: compute the PAT index with gMTRR=UC and gPAT.
          * 2) if enables snoop control, compute the PAT index as WB.
          * 3) if disables snoop control, compute the PAT index with
@@ -613,7 +613,7 @@ _sh_propagate(struct vcpu *v,
                             gflags,
                             gfn_to_paddr(target_gfn),
                             pfn_to_paddr(mfn_x(target_mfn)),
-                            MTRR_TYPE_UNCACHABLE); 
+                            MTRR_TYPE_UNCACHABLE);
                 else if ( iommu_snoop )
                     sflags |= pat_type_2_pte_flags(PAT_TYPE_WRBACK);
                 else
@@ -654,12 +654,12 @@ _sh_propagate(struct vcpu *v,
     // Only allow the guest write access to a page a) on a demand fault,
     // or b) if the page is already marked as dirty.
     //
-    // (We handle log-dirty entirely inside the shadow code, without using the 
+    // (We handle log-dirty entirely inside the shadow code, without using the
     // p2m_ram_logdirty p2m type: only HAP uses that.)
     if ( unlikely((level == 1) && shadow_mode_log_dirty(d)) )
     {
         if ( mfn_valid(target_mfn) ) {
-            if ( ft & FETCH_TYPE_WRITE ) 
+            if ( ft & FETCH_TYPE_WRITE )
                 paging_mark_dirty(d, mfn_x(target_mfn));
             else if ( !paging_mfn_is_dirty(d, target_mfn) )
                 sflags &= ~_PAGE_RW;
@@ -682,10 +682,10 @@ _sh_propagate(struct vcpu *v,
          (p2mt == p2m_mmio_direct &&
           rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn))) )
         sflags &= ~_PAGE_RW;
-    
+
     // protect guest page tables
     //
-    if ( unlikely((level == 1) 
+    if ( unlikely((level == 1)
                   && sh_mfn_is_a_page_table(target_mfn)
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
                   /* Unless the page is out of sync and the guest is
@@ -699,7 +699,7 @@ _sh_propagate(struct vcpu *v,
     // PV guests in 64-bit mode use two different page tables for user vs
     // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
     // It is always shadowed as present...
-    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d) 
+    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)
          && is_pv_domain(d) )
     {
         sflags |= _PAGE_USER;
@@ -720,7 +720,7 @@ _sh_propagate(struct vcpu *v,
 
 #if GUEST_PAGING_LEVELS >= 4
 static void
-l4e_propagate_from_guest(struct vcpu *v, 
+l4e_propagate_from_guest(struct vcpu *v,
                          guest_l4e_t gl4e,
                          mfn_t sl3mfn,
                          shadow_l4e_t *sl4e,
@@ -732,7 +732,7 @@ l4e_propagate_from_guest(struct vcpu *v,
 static void
 l3e_propagate_from_guest(struct vcpu *v,
                          guest_l3e_t gl3e,
-                         mfn_t sl2mfn, 
+                         mfn_t sl2mfn,
                          shadow_l3e_t *sl3e,
                          fetch_type_t ft)
 {
@@ -741,7 +741,7 @@ l3e_propagate_from_guest(struct vcpu *v,
 #endif // GUEST_PAGING_LEVELS >= 4
 
 static void
-l2e_propagate_from_guest(struct vcpu *v, 
+l2e_propagate_from_guest(struct vcpu *v,
                          guest_l2e_t gl2e,
                          mfn_t sl1mfn,
                          shadow_l2e_t *sl2e,
@@ -751,11 +751,11 @@ l2e_propagate_from_guest(struct vcpu *v,
 }
 
 static void
-l1e_propagate_from_guest(struct vcpu *v, 
+l1e_propagate_from_guest(struct vcpu *v,
                          guest_l1e_t gl1e,
-                         mfn_t gmfn, 
+                         mfn_t gmfn,
                          shadow_l1e_t *sl1e,
-                         fetch_type_t ft, 
+                         fetch_type_t ft,
                          p2m_type_t p2mt)
 {
     _sh_propagate(v, gl1e.l1, gmfn, sl1e, 1, ft, p2mt);
@@ -768,10 +768,10 @@ l1e_propagate_from_guest(struct vcpu *v,
  * functions which ever write (non-zero) data onto a shadow page.
  */
 
-static inline void safe_write_entry(void *dst, void *src) 
+static inline void safe_write_entry(void *dst, void *src)
 /* Copy one PTE safely when processors might be running on the
  * destination pagetable.   This does *not* give safety against
- * concurrent writes (that's what the paging lock is for), just 
+ * concurrent writes (that's what the paging lock is for), just
  * stops the hardware picking up partially written entries. */
 {
     volatile unsigned long *d = dst;
@@ -784,7 +784,7 @@ static inline void safe_write_entry(void *dst, void *src)
 }
 
 
-static inline void 
+static inline void
 shadow_write_entries(void *d, void *s, int entries, mfn_t mfn)
 /* This function does the actual writes to shadow pages.
  * It must not be called directly, since it doesn't do the bookkeeping
@@ -797,10 +797,10 @@ shadow_write_entries(void *d, void *s, int entries, mfn_t 
mfn)
 
     /* Because we mirror access rights at all levels in the shadow, an
      * l2 (or higher) entry with the RW bit cleared will leave us with
-     * no write access through the linear map.  
-     * We detect that by writing to the shadow with copy_to_user() and 
+     * no write access through the linear map.
+     * We detect that by writing to the shadow with copy_to_user() and
      * using map_domain_page() to get a writeable mapping if we need to. */
-    if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 ) 
+    if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )
     {
         perfc_incr(shadow_linear_map_failed);
         map = sh_map_domain_page(mfn);
@@ -874,7 +874,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain 
*d, p2m_type_t type)
 
 static void inline
 shadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain *d)
-{ 
+{
     if ( !shadow_mode_refcounts(d) )
         return;
 
@@ -882,9 +882,9 @@ shadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain 
*d)
 }
 
 #if GUEST_PAGING_LEVELS >= 4
-static int shadow_set_l4e(struct vcpu *v, 
-                          shadow_l4e_t *sl4e, 
-                          shadow_l4e_t new_sl4e, 
+static int shadow_set_l4e(struct vcpu *v,
+                          shadow_l4e_t *sl4e,
+                          shadow_l4e_t new_sl4e,
                           mfn_t sl4mfn)
 {
     int flags = 0, ok;
@@ -894,13 +894,13 @@ static int shadow_set_l4e(struct vcpu *v,
     old_sl4e = *sl4e;
 
     if ( old_sl4e.l4 == new_sl4e.l4 ) return 0; /* Nothing to do */
-    
-    paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) 
+
+    paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
              | (((unsigned long)sl4e) & ~PAGE_MASK));
 
-    if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT ) 
+    if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT )
     {
-        /* About to install a new reference */        
+        /* About to install a new reference */
         mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
         ok = sh_get_ref(v, sl3mfn, paddr);
         /* Are we pinning l3 shadows to handle wierd linux behaviour? */
@@ -917,12 +917,12 @@ static int shadow_set_l4e(struct vcpu *v,
     shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn);
     flags |= SHADOW_SET_CHANGED;
 
-    if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT ) 
+    if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT )
     {
         /* We lost a reference to an old mfn. */
         mfn_t osl3mfn = shadow_l4e_get_mfn(old_sl4e);
         if ( (mfn_x(osl3mfn) != mfn_x(shadow_l4e_get_mfn(new_sl4e)))
-             || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e), 
+             || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e),
                                           shadow_l4e_get_flags(new_sl4e)) )
         {
             flags |= SHADOW_SET_FLUSH;
@@ -932,9 +932,9 @@ static int shadow_set_l4e(struct vcpu *v,
     return flags;
 }
 
-static int shadow_set_l3e(struct vcpu *v, 
-                          shadow_l3e_t *sl3e, 
-                          shadow_l3e_t new_sl3e, 
+static int shadow_set_l3e(struct vcpu *v,
+                          shadow_l3e_t *sl3e,
+                          shadow_l3e_t new_sl3e,
                           mfn_t sl3mfn)
 {
     int flags = 0;
@@ -945,12 +945,12 @@ static int shadow_set_l3e(struct vcpu *v,
 
     if ( old_sl3e.l3 == new_sl3e.l3 ) return 0; /* Nothing to do */
 
-    paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) 
+    paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
              | (((unsigned long)sl3e) & ~PAGE_MASK));
-    
+
     if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT )
     {
-        /* About to install a new reference */        
+        /* About to install a new reference */
         if ( !sh_get_ref(v, shadow_l3e_get_mfn(new_sl3e), paddr) )
         {
             domain_crash(v->domain);
@@ -962,13 +962,13 @@ static int shadow_set_l3e(struct vcpu *v,
     shadow_write_entries(sl3e, &new_sl3e, 1, sl3mfn);
     flags |= SHADOW_SET_CHANGED;
 
-    if ( shadow_l3e_get_flags(old_sl3e) & _PAGE_PRESENT ) 
+    if ( shadow_l3e_get_flags(old_sl3e) & _PAGE_PRESENT )
     {
         /* We lost a reference to an old mfn. */
         mfn_t osl2mfn = shadow_l3e_get_mfn(old_sl3e);
         if ( (mfn_x(osl2mfn) != mfn_x(shadow_l3e_get_mfn(new_sl3e))) ||
-             !perms_strictly_increased(shadow_l3e_get_flags(old_sl3e), 
-                                       shadow_l3e_get_flags(new_sl3e)) ) 
+             !perms_strictly_increased(shadow_l3e_get_flags(old_sl3e),
+                                       shadow_l3e_get_flags(new_sl3e)) )
         {
             flags |= SHADOW_SET_FLUSH;
         }
@@ -976,11 +976,11 @@ static int shadow_set_l3e(struct vcpu *v,
     }
     return flags;
 }
-#endif /* GUEST_PAGING_LEVELS >= 4 */ 
+#endif /* GUEST_PAGING_LEVELS >= 4 */
 
-static int shadow_set_l2e(struct vcpu *v, 
-                          shadow_l2e_t *sl2e, 
-                          shadow_l2e_t new_sl2e, 
+static int shadow_set_l2e(struct vcpu *v,
+                          shadow_l2e_t *sl2e,
+                          shadow_l2e_t new_sl2e,
                           mfn_t sl2mfn)
 {
     int flags = 0;
@@ -990,7 +990,7 @@ static int shadow_set_l2e(struct vcpu *v,
 #if GUEST_PAGING_LEVELS == 2
     /* In 2-on-3 we work with pairs of l2es pointing at two-page
      * shadows.  Reference counting and up-pointers track from the first
-     * page of the shadow to the first l2e, so make sure that we're 
+     * page of the shadow to the first l2e, so make sure that we're
      * working with those:
      * Start with a pair of identical entries */
     shadow_l2e_t pair[2] = { new_sl2e, new_sl2e };
@@ -1000,13 +1000,13 @@ static int shadow_set_l2e(struct vcpu *v,
 
     ASSERT(sl2e != NULL);
     old_sl2e = *sl2e;
-    
+
     if ( old_sl2e.l2 == new_sl2e.l2 ) return 0; /* Nothing to do */
-    
+
     paddr = ((((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
              | (((unsigned long)sl2e) & ~PAGE_MASK));
 
-    if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT ) 
+    if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT )
     {
         mfn_t sl1mfn = shadow_l2e_get_mfn(new_sl2e);
         ASSERT(mfn_to_page(sl1mfn)->u.sh.head);
@@ -1028,7 +1028,7 @@ static int shadow_set_l2e(struct vcpu *v,
                the GFN instead of the GMFN, and it's definitely not
                OOS. */
             if ( (sp->u.sh.type != SH_type_fl1_shadow) && mfn_valid(gl1mfn)
-                 && mfn_is_out_of_sync(gl1mfn) ) 
+                 && mfn_is_out_of_sync(gl1mfn) )
                 sh_resync(v, gl1mfn);
         }
 #endif
@@ -1047,13 +1047,13 @@ static int shadow_set_l2e(struct vcpu *v,
 #endif
     flags |= SHADOW_SET_CHANGED;
 
-    if ( shadow_l2e_get_flags(old_sl2e) & _PAGE_PRESENT ) 
+    if ( shadow_l2e_get_flags(old_sl2e) & _PAGE_PRESENT )
     {
         /* We lost a reference to an old mfn. */
         mfn_t osl1mfn = shadow_l2e_get_mfn(old_sl2e);
         if ( (mfn_x(osl1mfn) != mfn_x(shadow_l2e_get_mfn(new_sl2e))) ||
-             !perms_strictly_increased(shadow_l2e_get_flags(old_sl2e), 
-                                       shadow_l2e_get_flags(new_sl2e)) ) 
+             !perms_strictly_increased(shadow_l2e_get_flags(old_sl2e),
+                                       shadow_l2e_get_flags(new_sl2e)) )
         {
             flags |= SHADOW_SET_FLUSH;
         }
@@ -1066,7 +1066,7 @@ static inline void shadow_vram_get_l1e(shadow_l1e_t 
new_sl1e,
                                        shadow_l1e_t *sl1e,
                                        mfn_t sl1mfn,
                                        struct domain *d)
-{ 
+{
     mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
     int flags = shadow_l1e_get_flags(new_sl1e);
     unsigned long gfn;
@@ -1085,7 +1085,7 @@ static inline void shadow_vram_get_l1e(shadow_l1e_t 
new_sl1e,
     {
         unsigned long i = gfn - dirty_vram->begin_pfn;
         struct page_info *page = mfn_to_page(mfn);
-        
+
         if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
             /* Initial guest reference, record it */
             dirty_vram->sl1ma[i] = pfn_to_paddr(mfn_x(sl1mfn))
@@ -1159,8 +1159,8 @@ static inline void shadow_vram_put_l1e(shadow_l1e_t 
old_sl1e,
     }
 }
 
-static int shadow_set_l1e(struct vcpu *v, 
-                          shadow_l1e_t *sl1e, 
+static int shadow_set_l1e(struct vcpu *v,
+                          shadow_l1e_t *sl1e,
                           shadow_l1e_t new_sl1e,
                           p2m_type_t new_type,
                           mfn_t sl1mfn)
@@ -1179,15 +1179,15 @@ static int shadow_set_l1e(struct vcpu *v,
              == (_PAGE_RW|_PAGE_PRESENT)) )
         oos_fixup_add(v, new_gmfn, sl1mfn, pgentry_ptr_to_slot(sl1e));
 #endif
-    
+
     old_sl1e = *sl1e;
 
     if ( old_sl1e.l1 == new_sl1e.l1 ) return 0; /* Nothing to do */
-    
+
     if ( (shadow_l1e_get_flags(new_sl1e) & _PAGE_PRESENT)
-         && !sh_l1e_is_magic(new_sl1e) ) 
+         && !sh_l1e_is_magic(new_sl1e) )
     {
-        /* About to install a new reference */        
+        /* About to install a new reference */
         if ( shadow_mode_refcounts(d) ) {
             TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
             switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
@@ -1205,45 +1205,45 @@ static int shadow_set_l1e(struct vcpu *v,
                 break;
             }
         }
-    } 
+    }
 
     /* Write the new entry */
     shadow_write_entries(sl1e, &new_sl1e, 1, sl1mfn);
     flags |= SHADOW_SET_CHANGED;
 
-    if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT) 
+    if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT)
          && !sh_l1e_is_magic(old_sl1e) )
     {
         /* We lost a reference to an old mfn. */
-        /* N.B. Unlike higher-level sets, never need an extra flush 
-         * when writing an l1e.  Because it points to the same guest frame 
+        /* N.B. Unlike higher-level sets, never need an extra flush
+         * when writing an l1e.  Because it points to the same guest frame
          * as the guest l1e did, it's the guest's responsibility to
          * trigger a flush later. */
-        if ( shadow_mode_refcounts(d) ) 
+        if ( shadow_mode_refcounts(d) )
         {
             shadow_vram_put_l1e(old_sl1e, sl1e, sl1mfn, d);
             shadow_put_page_from_l1e(old_sl1e, d);
             TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_PUT_REF);
-        } 
+        }
     }
     return flags;
 }
 
 
 /**************************************************************************/
-/* Macros to walk pagetables.  These take the shadow of a pagetable and 
- * walk every "interesting" entry.  That is, they don't touch Xen mappings, 
- * and for 32-bit l2s shadowed onto PAE or 64-bit, they only touch every 
+/* Macros to walk pagetables.  These take the shadow of a pagetable and
+ * walk every "interesting" entry.  That is, they don't touch Xen mappings,
+ * and for 32-bit l2s shadowed onto PAE or 64-bit, they only touch every
  * second entry (since pairs of entries are managed together). For multi-page
  * shadows they walk all pages.
- * 
- * Arguments are an MFN, the variable to point to each entry, a variable 
- * to indicate that we are done (we will shortcut to the end of the scan 
+ *
+ * Arguments are an MFN, the variable to point to each entry, a variable
+ * to indicate that we are done (we will shortcut to the end of the scan
  * when _done != 0), a variable to indicate that we should avoid Xen mappings,
- * and the code. 
+ * and the code.
  *
- * WARNING: These macros have side-effects.  They change the values of both 
- * the pointer and the MFN. */ 
+ * WARNING: These macros have side-effects.  They change the values of both
+ * the pointer and the MFN. */
 
 static inline void increment_ptr_to_guest_entry(void *ptr)
 {
@@ -1288,7 +1288,7 @@ do {                                                      
              \
 #define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)         \
        _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)
 #endif
-    
+
 
 #if GUEST_PAGING_LEVELS == 2
 
@@ -1335,7 +1335,7 @@ do {                                                      
                 \
     sh_unmap_domain_page(_sp);                                             \
 } while (0)
 
-#else 
+#else
 
 /* 64-bit l2: touch all entries except for PAE compat guests. */
 #define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)       \
@@ -1424,7 +1424,7 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t 
gl4mfn, mfn_t sl4mfn)
 
     sl4e = sh_map_domain_page(sl4mfn);
     BUILD_BUG_ON(sizeof (l4_pgentry_t) != sizeof (shadow_l4e_t));
-    
+
     /* Copy the common Xen mappings from the idle domain */
     slots = (shadow_mode_external(d)
              ? ROOT_PAGETABLE_XEN_SLOTS
@@ -1458,7 +1458,7 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t 
gl4mfn, mfn_t sl4mfn)
             shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
     }
 
-    sh_unmap_domain_page(sl4e);    
+    sh_unmap_domain_page(sl4e);
 }
 #endif
 
@@ -1504,12 +1504,12 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
         mfn_to_page(smfn)->up = 0;
 
 #if GUEST_PAGING_LEVELS == 4
-#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
     if ( shadow_type == SH_type_l4_64_shadow &&
          unlikely(v->domain->arch.paging.shadow.opt_flags & 
SHOPT_LINUX_L3_TOPLEVEL) )
     {
         /* We're shadowing a new l4, but we've been assuming the guest uses
-         * only one l4 per vcpu and context switches using an l4 entry. 
+         * only one l4 per vcpu and context switches using an l4 entry.
          * Count the number of active l4 shadows.  If there are enough
          * of them, decide that this isn't an old linux guest, and stop
          * pinning l3es.  This is not very quick but it doesn't happen
@@ -1522,9 +1522,9 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
             if ( sp->u.sh.type == SH_type_l4_64_shadow )
                 l4count++;
         }
-        for_each_vcpu ( v->domain, v2 ) 
+        for_each_vcpu ( v->domain, v2 )
             vcpus++;
-        if ( l4count > 2 * vcpus ) 
+        if ( l4count > 2 * vcpus )
         {
             /* Unpin all the pinned l3 tables, and don't pin any more. */
             page_list_for_each_safe(sp, t, 
&v->domain->arch.paging.shadow.pinned_shadows)
@@ -1542,7 +1542,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
     // Create the Xen mappings...
     if ( !shadow_mode_external(v->domain) )
     {
-        switch (shadow_type) 
+        switch (shadow_type)
         {
 #if GUEST_PAGING_LEVELS == 4
         case SH_type_l4_shadow:
@@ -1584,7 +1584,7 @@ sh_make_monitor_table(struct vcpu *v)
     struct domain *d = v->domain;
 
     ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
-    
+
     /* Guarantee we can get the memory we need */
     shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
 
@@ -1599,8 +1599,8 @@ sh_make_monitor_table(struct vcpu *v)
             mfn_t m3mfn, m2mfn;
             l4_pgentry_t *l4e;
             l3_pgentry_t *l3e;
-            /* Install an l3 table and an l2 table that will hold the shadow 
-             * linear map entries.  This overrides the linear map entry that 
+            /* Install an l3 table and an l2 table that will hold the shadow
+             * linear map entries.  This overrides the linear map entry that
              * was installed by sh_install_xen_entries_in_l4. */
             l4e = sh_map_domain_page(m4mfn);
 
@@ -1622,7 +1622,7 @@ sh_make_monitor_table(struct vcpu *v)
                 m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
                 mfn_to_page(m3mfn)->shadow_flags = 3;
                 l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
-                
+
                 m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
                 mfn_to_page(m2mfn)->shadow_flags = 2;
                 l3e = sh_map_domain_page(m3mfn);
@@ -1647,13 +1647,13 @@ sh_make_monitor_table(struct vcpu *v)
  * If the necessary tables are not present in the guest, they return NULL. */
 
 /* N.B. The use of GUEST_PAGING_LEVELS here is correct.  If the shadow has
- * more levels than the guest, the upper levels are always fixed and do not 
- * reflect any information from the guest, so we do not use these functions 
+ * more levels than the guest, the upper levels are always fixed and do not
+ * reflect any information from the guest, so we do not use these functions
  * to access them. */
 
 #if GUEST_PAGING_LEVELS >= 4
-static shadow_l4e_t * shadow_get_and_create_l4e(struct vcpu *v, 
-                                                walk_t *gw, 
+static shadow_l4e_t * shadow_get_and_create_l4e(struct vcpu *v,
+                                                walk_t *gw,
                                                 mfn_t *sl4mfn)
 {
     /* There is always a shadow of the top level table.  Get it. */
@@ -1662,8 +1662,8 @@ static shadow_l4e_t * shadow_get_and_create_l4e(struct 
vcpu *v,
     return sh_linear_l4_table(v) + shadow_l4_linear_offset(gw->va);
 }
 
-static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v, 
-                                                walk_t *gw, 
+static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v,
+                                                walk_t *gw,
                                                 mfn_t *sl3mfn,
                                                 fetch_type_t ft,
                                                 int *resync)
@@ -1674,18 +1674,18 @@ static shadow_l3e_t * shadow_get_and_create_l3e(struct 
vcpu *v,
     /* Get the l4e */
     sl4e = shadow_get_and_create_l4e(v, gw, &sl4mfn);
     ASSERT(sl4e != NULL);
-    if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT ) 
+    if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
     {
         *sl3mfn = shadow_l4e_get_mfn(*sl4e);
         ASSERT(mfn_valid(*sl3mfn));
-    } 
-    else 
+    }
+    else
     {
         int r;
         shadow_l4e_t new_sl4e;
         /* No l3 shadow installed: find and install it. */
         *sl3mfn = get_shadow_status(v, gw->l3mfn, SH_type_l3_shadow);
-        if ( !mfn_valid(*sl3mfn) ) 
+        if ( !mfn_valid(*sl3mfn) )
         {
             /* No l3 shadow of this page exists at all: make one. */
             *sl3mfn = sh_make_shadow(v, gw->l3mfn, SH_type_l3_shadow);
@@ -1708,8 +1708,8 @@ static shadow_l3e_t * shadow_get_and_create_l3e(struct 
vcpu *v,
 #endif /* GUEST_PAGING_LEVELS >= 4 */
 
 
-static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v, 
-                                                walk_t *gw, 
+static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v,
+                                                walk_t *gw,
                                                 mfn_t *sl2mfn,
                                                 fetch_type_t ft,
                                                 int *resync)
@@ -1720,13 +1720,13 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
     if ( !mfn_valid(gw->l2mfn) ) return NULL; /* No guest page. */
     /* Get the l3e */
     sl3e = shadow_get_and_create_l3e(v, gw, &sl3mfn, ft, resync);
-    if ( sl3e == NULL ) return NULL; 
-    if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT ) 
+    if ( sl3e == NULL ) return NULL;
+    if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
     {
         *sl2mfn = shadow_l3e_get_mfn(*sl3e);
         ASSERT(mfn_valid(*sl2mfn));
-    } 
-    else 
+    }
+    else
     {
         int r;
         shadow_l3e_t new_sl3e;
@@ -1740,7 +1740,7 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
 
         /* No l2 shadow installed: find and install it. */
         *sl2mfn = get_shadow_status(v, gw->l2mfn, t);
-        if ( !mfn_valid(*sl2mfn) ) 
+        if ( !mfn_valid(*sl2mfn) )
         {
             /* No l2 shadow of this page exists at all: make one. */
             *sl2mfn = sh_make_shadow(v, gw->l2mfn, t);
@@ -1750,7 +1750,7 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
         r = shadow_set_l3e(v, sl3e, new_sl3e, sl3mfn);
         ASSERT((r & SHADOW_SET_FLUSH) == 0);
         if ( r & SHADOW_SET_ERROR )
-            return NULL;        
+            return NULL;
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
         *resync |= 1;
@@ -1762,9 +1762,9 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
 #elif GUEST_PAGING_LEVELS == 3 /* PAE... */
     /* We never demand-shadow PAE l3es: they are only created in
      * sh_update_cr3().  Check if the relevant sl3e is present. */
-    shadow_l3e_t *sl3e = ((shadow_l3e_t *)&v->arch.paging.shadow.l3table) 
+    shadow_l3e_t *sl3e = ((shadow_l3e_t *)&v->arch.paging.shadow.l3table)
         + shadow_l3_linear_offset(gw->va);
-    if ( !(shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT) ) 
+    if ( !(shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT) )
         return NULL;
     *sl2mfn = shadow_l3e_get_mfn(*sl3e);
     ASSERT(mfn_valid(*sl2mfn));
@@ -1778,12 +1778,12 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
     (void) shadow_l2_index(sl2mfn, guest_l2_table_offset(gw->va));
     /* Reading the top level table is always valid. */
     return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va);
-#endif 
+#endif
 }
 
 
-static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v, 
-                                                walk_t *gw, 
+static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v,
+                                                walk_t *gw,
                                                 mfn_t *sl1mfn,
                                                 fetch_type_t ft)
 {
@@ -1797,38 +1797,38 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct 
vcpu *v,
 
     /* Install the sl1 in the l2e if it wasn't there or if we need to
      * re-do it to fix a PSE dirty bit. */
-    if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT 
+    if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT
          && likely(ft != ft_demand_write
-                   || (shadow_l2e_get_flags(*sl2e) & _PAGE_RW) 
+                   || (shadow_l2e_get_flags(*sl2e) & _PAGE_RW)
                    || !(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)) )
     {
         *sl1mfn = shadow_l2e_get_mfn(*sl2e);
         ASSERT(mfn_valid(*sl1mfn));
-    } 
-    else 
+    }
+    else
     {
         shadow_l2e_t new_sl2e;
         int r, flags = guest_l2e_get_flags(gw->l2e);
         /* No l1 shadow installed: find and install it. */
         if ( !(flags & _PAGE_PRESENT) )
             return NULL; /* No guest page. */
-        if ( guest_supports_superpages(v) && (flags & _PAGE_PSE) ) 
+        if ( guest_supports_superpages(v) && (flags & _PAGE_PSE) )
         {
             /* Splintering a superpage */
             gfn_t l2gfn = guest_l2e_get_gfn(gw->l2e);
             *sl1mfn = get_fl1_shadow_status(v, l2gfn);
-            if ( !mfn_valid(*sl1mfn) ) 
+            if ( !mfn_valid(*sl1mfn) )
             {
                 /* No fl1 shadow of this superpage exists at all: make one. */
                 *sl1mfn = make_fl1_shadow(v, l2gfn);
             }
-        } 
-        else 
+        }
+        else
         {
             /* Shadowing an actual guest l1 table */
             if ( !mfn_valid(gw->l1mfn) ) return NULL; /* No guest page. */
             *sl1mfn = get_shadow_status(v, gw->l1mfn, SH_type_l1_shadow);
-            if ( !mfn_valid(*sl1mfn) ) 
+            if ( !mfn_valid(*sl1mfn) )
             {
                 /* No l1 shadow of this page exists at all: make one. */
                 *sl1mfn = sh_make_shadow(v, gw->l1mfn, SH_type_l1_shadow);
@@ -1837,7 +1837,7 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct 
vcpu *v,
         /* Install the new sl1 table in the sl2e */
         l2e_propagate_from_guest(v, gw->l2e, *sl1mfn, &new_sl2e, ft);
         r = shadow_set_l2e(v, sl2e, new_sl2e, sl2mfn);
-        ASSERT((r & SHADOW_SET_FLUSH) == 0);        
+        ASSERT((r & SHADOW_SET_FLUSH) == 0);
         if ( r & SHADOW_SET_ERROR )
             return NULL;
 
@@ -1863,7 +1863,7 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct 
vcpu *v,
 
 
 /**************************************************************************/
-/* Destructors for shadow tables: 
+/* Destructors for shadow tables:
  * Unregister the shadow, decrement refcounts of any entries present in it,
  * and release the memory.
  *
@@ -1890,16 +1890,16 @@ void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
     delete_shadow_status(v, gmfn, t, smfn);
     shadow_demote(v, gmfn, t);
     /* Decrement refcounts of all the old entries */
-    sl4mfn = smfn; 
+    sl4mfn = smfn;
     SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
-        if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT ) 
+        if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
         {
             sh_put_ref(v, shadow_l4e_get_mfn(*sl4e),
-                       (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) 
+                       (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
                        | ((unsigned long)sl4e & ~PAGE_MASK));
         }
     });
-    
+
     /* Put the memory back in the pool */
     shadow_free(v->domain, smfn);
 }
@@ -1922,11 +1922,11 @@ void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
     shadow_demote(v, gmfn, t);
 
     /* Decrement refcounts of all the old entries */
-    sl3mfn = smfn; 
+    sl3mfn = smfn;
     SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, 0, {
-        if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT ) 
+        if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
             sh_put_ref(v, shadow_l3e_get_mfn(*sl3e),
-                        (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) 
+                        (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
                         | ((unsigned long)sl3e & ~PAGE_MASK));
     });
 
@@ -1961,9 +1961,9 @@ void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
     /* Decrement refcounts of all the old entries */
     sl2mfn = smfn;
     SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
-        if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT ) 
+        if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
             sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
-                        (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) 
+                        (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
                         | ((unsigned long)sl2e & ~PAGE_MASK));
     });
 
@@ -1989,17 +1989,17 @@ void sh_destroy_l1_shadow(struct vcpu *v, mfn_t smfn)
         gfn_t gfn = _gfn(sp->v.sh.back);
         delete_fl1_shadow_status(v, gfn, smfn);
     }
-    else 
+    else
     {
         mfn_t gmfn = backpointer(sp);
         delete_shadow_status(v, gmfn, t, smfn);
         shadow_demote(v, gmfn, t);
     }
-    
+
     if ( shadow_mode_refcounts(d) )
     {
         /* Decrement refcounts of all the old entries */
-        mfn_t sl1mfn = smfn; 
+        mfn_t sl1mfn = smfn;
         SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, 0, {
             if ( (shadow_l1e_get_flags(*sl1e) & _PAGE_PRESENT)
                  && !sh_l1e_is_magic(*sl1e) ) {
@@ -2008,7 +2008,7 @@ void sh_destroy_l1_shadow(struct vcpu *v, mfn_t smfn)
             }
         });
     }
-    
+
     /* Put the memory back in the pool */
     shadow_free(v->domain, smfn);
 }
@@ -2025,8 +2025,8 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
         l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
         l3_pgentry_t *l3e;
         int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
- 
-        /* Need to destroy the l3 and l2 monitor pages used 
+
+        /* Need to destroy the l3 and l2 monitor pages used
          * for the linear map */
         ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
         m3mfn = _mfn(l4e_get_pfn(l4e[linear_slot]));
@@ -2060,18 +2060,18 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t 
mmfn)
 /**************************************************************************/
 /* Functions to destroy non-Xen mappings in a pagetable hierarchy.
  * These are called from common code when we are running out of shadow
- * memory, and unpinning all the top-level shadows hasn't worked. 
+ * memory, and unpinning all the top-level shadows hasn't worked.
  *
  * With user_only == 1, we leave guest kernel-mode mappings in place too,
  * unhooking only the user-mode mappings
  *
- * This implementation is pretty crude and slow, but we hope that it won't 
+ * This implementation is pretty crude and slow, but we hope that it won't
  * be called very often. */
 
 #if GUEST_PAGING_LEVELS == 2
 
 void sh_unhook_32b_mappings(struct vcpu *v, mfn_t sl2mfn, int user_only)
-{    
+{
     shadow_l2e_t *sl2e;
     SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
         if ( !user_only || (sl2e->l2 & _PAGE_USER) )
@@ -2109,7 +2109,7 @@ void sh_unhook_64b_mappings(struct vcpu *v, mfn_t sl4mfn, 
int user_only)
  * These functions require a pointer to the shadow entry that will be updated.
  */
 
-/* These functions take a new guest entry, translate it to shadow and write 
+/* These functions take a new guest entry, translate it to shadow and write
  * the shadow entry.
  *
  * They return the same bitmaps as the shadow_set_lXe() functions.
@@ -2240,7 +2240,7 @@ static int validate_gl2e(struct vcpu *v, void *new_ge, 
mfn_t sl2mfn, void *se)
             mfn_t gl1mfn = get_gfn_query_unlocked(v->domain, gfn_x(gl1gfn),
                                                   &p2mt);
             if ( p2m_is_ram(p2mt) )
-                sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow); 
+                sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
             else if ( p2mt != p2m_populate_on_demand )
                 result |= SHADOW_SET_ERROR;
         }
@@ -2275,7 +2275,7 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, 
mfn_t sl1mfn, void *se)
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     gl1mfn = backpointer(mfn_to_page(sl1mfn));
-    if ( mfn_valid(gl1mfn) 
+    if ( mfn_valid(gl1mfn)
          && mfn_is_out_of_sync(gl1mfn) )
     {
         /* Update the OOS snapshot. */
@@ -2295,7 +2295,7 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, 
mfn_t sl1mfn, void *se)
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
 /**************************************************************************/
-/* Special validation function for re-syncing out-of-sync shadows. 
+/* Special validation function for re-syncing out-of-sync shadows.
  * Walks the *shadow* page, and for every entry that it finds,
  * revalidates the guest entry that corresponds to it.
  * N.B. This function is called with the vcpu that unsynced the page,
@@ -2342,10 +2342,10 @@ void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn, mfn_t 
snpmfn)
     ASSERT(!(rc & SHADOW_SET_FLUSH));
 }
 
-/* Figure out whether it's definitely safe not to sync this l1 table. 
- * That is: if we can tell that it's only used once, and that the 
- * toplevel shadow responsible is not one of ours. 
- * N.B. This function is called with the vcpu that required the resync, 
+/* Figure out whether it's definitely safe not to sync this l1 table.
+ * That is: if we can tell that it's only used once, and that the
+ * toplevel shadow responsible is not one of ours.
+ * N.B. This function is called with the vcpu that required the resync,
  *      *not* the one that originally unsynced the page, but it is
  *      called in the *mode* of the vcpu that unsynced it.  Clear?  Good. */
 int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
@@ -2366,7 +2366,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
     smfn = _mfn(sp->up >> PAGE_SHIFT);
     ASSERT(mfn_valid(smfn));
 
-#if (SHADOW_PAGING_LEVELS == 4) 
+#if (SHADOW_PAGING_LEVELS == 4)
     /* up to l3 */
     sp = mfn_to_page(smfn);
     ASSERT(sh_type_has_up_pointer(v, SH_type_l2_shadow));
@@ -2385,15 +2385,15 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
 #endif
 
     if ( pagetable_get_pfn(v->arch.shadow_table[0]) == mfn_x(smfn)
-#if (SHADOW_PAGING_LEVELS == 3) 
+#if (SHADOW_PAGING_LEVELS == 3)
          || pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn)
          || pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn)
-         || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn) 
+         || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn)
 #endif
         )
         return 0;
-    
-    /* Only in use in one toplevel shadow, and it's not the one we're 
+
+    /* Only in use in one toplevel shadow, and it's not the one we're
      * running on */
     return 1;
 }
@@ -2401,15 +2401,15 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
 
 
 /**************************************************************************/
-/* Functions which translate and install the shadows of arbitrary guest 
+/* Functions which translate and install the shadows of arbitrary guest
  * entries that we have just seen the guest write. */
 
 
-static inline int 
+static inline int
 sh_map_and_validate(struct vcpu *v, mfn_t gmfn,
-                     void *new_gp, u32 size, u32 sh_type, 
+                     void *new_gp, u32 size, u32 sh_type,
                      u32 (*shadow_index)(mfn_t *smfn, u32 idx),
-                     int (*validate_ge)(struct vcpu *v, void *ge, 
+                     int (*validate_ge)(struct vcpu *v, void *ge,
                                         mfn_t smfn, void *se))
 /* Generic function for mapping and validating. */
 {
@@ -2462,25 +2462,25 @@ sh_map_and_validate_gl4e(struct vcpu *v, mfn_t gl4mfn,
                           void *new_gl4p, u32 size)
 {
 #if GUEST_PAGING_LEVELS >= 4
-    return sh_map_and_validate(v, gl4mfn, new_gl4p, size, 
-                                SH_type_l4_shadow, 
-                                shadow_l4_index, 
+    return sh_map_and_validate(v, gl4mfn, new_gl4p, size,
+                                SH_type_l4_shadow,
+                                shadow_l4_index,
                                 validate_gl4e);
 #else // ! GUEST_PAGING_LEVELS >= 4
     SHADOW_ERROR("called in wrong paging mode!\n");
     BUG();
     return 0;
-#endif 
+#endif
 }
-    
+
 int
 sh_map_and_validate_gl3e(struct vcpu *v, mfn_t gl3mfn,
                           void *new_gl3p, u32 size)
 {
 #if GUEST_PAGING_LEVELS >= 4
-    return sh_map_and_validate(v, gl3mfn, new_gl3p, size, 
-                                SH_type_l3_shadow, 
-                                shadow_l3_index, 
+    return sh_map_and_validate(v, gl3mfn, new_gl3p, size,
+                                SH_type_l3_shadow,
+                                shadow_l3_index,
                                 validate_gl3e);
 #else // ! GUEST_PAGING_LEVELS >= 4
     SHADOW_ERROR("called in wrong paging mode!\n");
@@ -2493,9 +2493,9 @@ int
 sh_map_and_validate_gl2e(struct vcpu *v, mfn_t gl2mfn,
                           void *new_gl2p, u32 size)
 {
-    return sh_map_and_validate(v, gl2mfn, new_gl2p, size, 
-                                SH_type_l2_shadow, 
-                                shadow_l2_index, 
+    return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
+                                SH_type_l2_shadow,
+                                shadow_l2_index,
                                 validate_gl2e);
 }
 
@@ -2504,9 +2504,9 @@ sh_map_and_validate_gl2he(struct vcpu *v, mfn_t gl2mfn,
                            void *new_gl2p, u32 size)
 {
 #if GUEST_PAGING_LEVELS >= 3
-    return sh_map_and_validate(v, gl2mfn, new_gl2p, size, 
-                                SH_type_l2h_shadow, 
-                                shadow_l2_index, 
+    return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
+                                SH_type_l2h_shadow,
+                                shadow_l2_index,
                                 validate_gl2e);
 #else /* Non-PAE guests don't have different kinds of l2 table */
     SHADOW_ERROR("called in wrong paging mode!\n");
@@ -2519,9 +2519,9 @@ int
 sh_map_and_validate_gl1e(struct vcpu *v, mfn_t gl1mfn,
                           void *new_gl1p, u32 size)
 {
-    return sh_map_and_validate(v, gl1mfn, new_gl1p, size, 
-                                SH_type_l1_shadow, 
-                                shadow_l1_index, 
+    return sh_map_and_validate(v, gl1mfn, new_gl1p, size,
+                                SH_type_l1_shadow,
+                                shadow_l1_index,
                                 validate_gl1e);
 }
 
@@ -2572,7 +2572,7 @@ static inline void reset_early_unshadow(struct vcpu *v)
 
 
 /**************************************************************************/
-/* Optimization: Prefetch multiple L1 entries.  This is called after we have 
+/* Optimization: Prefetch multiple L1 entries.  This is called after we have
  * demand-faulted a shadow l1e in the fault handler, to see if it's
  * worth fetching some more.
  */
@@ -2582,7 +2582,7 @@ static inline void reset_early_unshadow(struct vcpu *v)
 /* XXX magic number */
 #define PREFETCH_DISTANCE 32
 
-static void sh_prefetch(struct vcpu *v, walk_t *gw, 
+static void sh_prefetch(struct vcpu *v, walk_t *gw,
                         shadow_l1e_t *ptr_sl1e, mfn_t sl1mfn)
 {
     int i, dist;
@@ -2621,7 +2621,7 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
 #endif /* OOS */
     }
 
-    for ( i = 1; i < dist ; i++ ) 
+    for ( i = 1; i < dist ; i++ )
     {
         /* No point in prefetching if there's already a shadow */
         if ( ptr_sl1e[i].l1 != 0 )
@@ -2634,18 +2634,18 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
             /* Not worth continuing if we hit an entry that will need another
              * fault for A/D-bit propagation anyway */
             gflags = guest_l1e_get_flags(gl1e);
-            if ( (gflags & _PAGE_PRESENT) 
+            if ( (gflags & _PAGE_PRESENT)
                  && (!(gflags & _PAGE_ACCESSED)
                      || ((gflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY))) )
                 break;
-        } 
-        else 
+        }
+        else
         {
             /* Fragmented superpage, unless we've been called wrongly */
             ASSERT(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE);
             /* Increment the l1e's GFN by the right number of guest pages */
             gl1e = guest_l1e_from_gfn(
-                _gfn(gfn_x(guest_l1e_get_gfn(gw->l1e)) + i), 
+                _gfn(gfn_x(guest_l1e_get_gfn(gw->l1e)) + i),
                 guest_l1e_get_flags(gw->l1e));
         }
 
@@ -2715,7 +2715,7 @@ static inline void trace_shadow_fixup(guest_l1e_t gl1e,
         __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
-                                          
+
 static inline void trace_not_shadow_fault(guest_l1e_t gl1e,
                                           guest_va_t va)
 {
@@ -2739,7 +2739,7 @@ static inline void trace_not_shadow_fault(guest_l1e_t 
gl1e,
         __trace_var(event, 0/*!tsc*/, sizeof(d), &d);
     }
 }
-                                          
+
 static inline void trace_shadow_emulate_other(u32 event,
                                                  guest_va_t va,
                                                  gfn_t gfn)
@@ -2807,8 +2807,8 @@ static inline void trace_shadow_emulate(guest_l1e_t gl1e, 
unsigned long va)
  * shadow code (and the guest should retry) or 0 if it is not (and the
  * fault should be handled elsewhere or passed to the guest). */
 
-static int sh_page_fault(struct vcpu *v, 
-                          unsigned long va, 
+static int sh_page_fault(struct vcpu *v,
+                          unsigned long va,
                           struct cpu_user_regs *regs)
 {
     struct domain *d = v->domain;
@@ -2848,7 +2848,7 @@ static int sh_page_fault(struct vcpu *v,
      * Then try to emulate early to avoid lock aquisition.
      */
     if ( v->arch.paging.last_write_emul_ok
-         && v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) ) 
+         && v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) )
     {
         /* check whether error code is 3, or else fall back to normal path
          * in case of some validation is required
@@ -2858,7 +2858,7 @@ static int sh_page_fault(struct vcpu *v,
             fast_emul = 1;
             gmfn = _mfn(v->arch.paging.shadow.last_emulated_mfn);
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
             /* Fall back to the slow path if we're trying to emulate
                writes to an out of sync page. */
             if ( mfn_valid(gmfn) && mfn_is_out_of_sync(gmfn) )
@@ -2886,7 +2886,7 @@ static int sh_page_fault(struct vcpu *v,
 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
     if ( (regs->error_code & PFEC_reserved_bit) )
     {
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         /* First, need to check that this isn't an out-of-sync
          * shadow l1e.  If it is, we fall back to the slow path, which
          * will sync it up again. */
@@ -2902,7 +2902,7 @@ static int sh_page_fault(struct vcpu *v,
                                   shadow_l2e_get_mfn(sl2e))))
                  || unlikely(mfn_is_out_of_sync(gl1mfn)) )
             {
-                /* Hit the slow path as if there had been no 
+                /* Hit the slow path as if there had been no
                  * shadow entry at all, and let it tidy up */
                 ASSERT(regs->error_code & PFEC_page_present);
                 regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present);
@@ -2910,10 +2910,10 @@ static int sh_page_fault(struct vcpu *v,
             }
         }
 #endif /* SHOPT_OUT_OF_SYNC */
-        /* The only reasons for reserved bits to be set in shadow entries 
+        /* The only reasons for reserved bits to be set in shadow entries
          * are the two "magic" shadow_l1e entries. */
-        if ( likely((__copy_from_user(&sl1e, 
-                                      (sh_linear_l1_table(v) 
+        if ( likely((__copy_from_user(&sl1e,
+                                      (sh_linear_l1_table(v)
                                        + shadow_l1_linear_offset(va)),
                                       sizeof(sl1e)) == 0)
                     && sh_l1e_is_magic(sl1e)) )
@@ -2935,8 +2935,8 @@ static int sh_page_fault(struct vcpu *v,
             {
                 /* Magic MMIO marker: extract gfn for MMIO address */
                 ASSERT(sh_l1e_is_mmio(sl1e));
-                gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e)))) 
-                       << PAGE_SHIFT) 
+                gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
+                       << PAGE_SHIFT)
                     | (va & ~PAGE_MASK);
             }
             perfc_incr(shadow_fault_fast_mmio);
@@ -2949,24 +2949,24 @@ static int sh_page_fault(struct vcpu *v,
         else
         {
             /* This should be exceptionally rare: another vcpu has fixed
-             * the tables between the fault and our reading the l1e. 
+             * the tables between the fault and our reading the l1e.
              * Retry and let the hardware give us the right fault next time. */
             perfc_incr(shadow_fault_fast_fail);
-            SHADOW_PRINTK("fast path false alarm!\n");            
+            SHADOW_PRINTK("fast path false alarm!\n");
             trace_shadow_gen(TRC_SHADOW_FALSE_FAST_PATH, va);
             return EXCRET_fault_fixed;
         }
     }
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
  page_fault_slow_path:
 #endif
 #endif /* SHOPT_FAST_FAULT_PATH */
 
     /* Detect if this page fault happened while we were already in Xen
      * doing a shadow operation.  If that happens, the only thing we can
-     * do is let Xen's normal fault handlers try to fix it.  In any case, 
-     * a diagnostic trace of the fault will be more useful than 
+     * do is let Xen's normal fault handlers try to fix it.  In any case,
+     * a diagnostic trace of the fault will be more useful than
      * a BUG() when we try to take the lock again. */
     if ( unlikely(paging_locked_by_me(d)) )
     {
@@ -2980,7 +2980,7 @@ static int sh_page_fault(struct vcpu *v,
     /* The walk is done in a lock-free style, with some sanity check
      * postponed after grabbing paging lock later. Those delayed checks
      * will make sure no inconsistent mapping being translated into
-     * shadow page table. */ 
+     * shadow page table. */
     version = atomic_read(&d->arch.paging.shadow.gtable_dirty_version);
     rmb();
     rc = sh_walk_guest_tables(v, va, &gw, regs->error_code);
@@ -3001,9 +3001,9 @@ static int sh_page_fault(struct vcpu *v,
         goto propagate;
     }
 
-    /* It's possible that the guest has put pagetables in memory that it has 
+    /* It's possible that the guest has put pagetables in memory that it has
      * already used for some special purpose (ioreq pages, or granted pages).
-     * If that happens we'll have killed the guest already but it's still not 
+     * If that happens we'll have killed the guest already but it's still not
      * safe to propagate entries out of the guest PT so get out now. */
     if ( unlikely(d->is_shutting_down && d->shutdown_code == SHUTDOWN_crash) )
     {
@@ -3019,12 +3019,12 @@ static int sh_page_fault(struct vcpu *v,
     gfn = guest_l1e_get_gfn(gw.l1e);
     gmfn = get_gfn(d, gfn, &p2mt);
 
-    if ( shadow_mode_refcounts(d) && 
+    if ( shadow_mode_refcounts(d) &&
          ((!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) ||
           (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn))) )
     {
         perfc_incr(shadow_fault_bail_bad_gfn);
-        SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n", 
+        SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n",
                       gfn_x(gfn), mfn_x(gmfn));
         reset_early_unshadow(v);
         put_gfn(d, gfn_x(gfn));
@@ -3033,7 +3033,7 @@ static int sh_page_fault(struct vcpu *v,
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
     /* Remember this successful VA->GFN translation for later. */
-    vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), 
+    vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn),
                 regs->error_code | PFEC_page_present);
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
@@ -3053,7 +3053,7 @@ static int sh_page_fault(struct vcpu *v,
     shadow_prealloc(d,
                     SH_type_l1_shadow,
                     GUEST_PAGING_LEVELS < 4 ? 1 : GUEST_PAGING_LEVELS - 1);
-    
+
     rc = gw_remove_write_accesses(v, va, &gw);
 
     /* First bit set: Removed write access to a page. */
@@ -3088,10 +3088,10 @@ static int sh_page_fault(struct vcpu *v,
     shadow_audit_tables(v);
     sh_audit_gw(v, &gw);
 
-    /* Acquire the shadow.  This must happen before we figure out the rights 
+    /* Acquire the shadow.  This must happen before we figure out the rights
      * for the shadow entry, since we might promote a page here. */
     ptr_sl1e = shadow_get_and_create_l1e(v, &gw, &sl1mfn, ft);
-    if ( unlikely(ptr_sl1e == NULL) ) 
+    if ( unlikely(ptr_sl1e == NULL) )
     {
         /* Couldn't get the sl1e!  Since we know the guest entries
          * are OK, this can only have been caused by a failed
@@ -3146,15 +3146,15 @@ static int sh_page_fault(struct vcpu *v,
     r = shadow_set_l1e(v, ptr_sl1e, sl1e, p2mt, sl1mfn);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
-    if ( mfn_valid(gw.l1mfn) 
+    if ( mfn_valid(gw.l1mfn)
          && mfn_is_out_of_sync(gw.l1mfn) )
     {
         /* Update the OOS snapshot. */
         mfn_t snpmfn = oos_snapshot_lookup(v, gw.l1mfn);
         guest_l1e_t *snp;
-        
+
         ASSERT(mfn_valid(snpmfn));
-        
+
         snp = sh_map_domain_page(snpmfn);
         snp[guest_l1_table_offset(va)] = gw.l1e;
         sh_unmap_domain_page(snp);
@@ -3168,7 +3168,7 @@ static int sh_page_fault(struct vcpu *v,
 
     /* Need to emulate accesses to page tables */
     if ( sh_mfn_is_a_page_table(gmfn)
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
          /* Unless they've been allowed to go out of sync with their
             shadows and we don't need to unshadow it. */
          && !(mfn_is_out_of_sync(gmfn)
@@ -3202,10 +3202,10 @@ static int sh_page_fault(struct vcpu *v,
     /* In HVM guests, we force CR0.WP always to be set, so that the
      * pagetables are always write-protected.  If the guest thinks
      * CR0.WP is clear, we must emulate faulting supervisor writes to
-     * allow the guest to write through read-only PTEs.  Emulate if the 
+     * allow the guest to write through read-only PTEs.  Emulate if the
      * fault was a non-user write to a present page.  */
-    if ( is_hvm_domain(d) 
-         && unlikely(!hvm_wp_enabled(v)) 
+    if ( is_hvm_domain(d)
+         && unlikely(!hvm_wp_enabled(v))
          && regs->error_code == (PFEC_write_access|PFEC_page_present)
          && mfn_valid(gmfn) )
     {
@@ -3237,7 +3237,7 @@ static int sh_page_fault(struct vcpu *v,
      */
     if ( (regs->error_code & PFEC_user_mode) )
     {
-        SHADOW_PRINTK("user-mode fault to PT, unshadowing mfn %#lx\n", 
+        SHADOW_PRINTK("user-mode fault to PT, unshadowing mfn %#lx\n",
                       mfn_x(gmfn));
         perfc_incr(shadow_fault_emulate_failed);
         sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
@@ -3318,7 +3318,7 @@ static int sh_page_fault(struct vcpu *v,
             }
 #endif
             gdprintk(XENLOG_DEBUG, "write to pagetable during event "
-                     "injection: cr2=%#lx, mfn=%#lx\n", 
+                     "injection: cr2=%#lx, mfn=%#lx\n",
                      va, mfn_x(gmfn));
             sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
             trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ,
@@ -3327,7 +3327,7 @@ static int sh_page_fault(struct vcpu *v,
         }
     }
 
-    SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n", 
+    SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
                   (unsigned long)regs->eip, (unsigned long)regs->esp);
 
     emul_ops = shadow_init_emulation(&emul_ctxt, regs);
@@ -3349,10 +3349,10 @@ static int sh_page_fault(struct vcpu *v,
             v->arch.paging.last_write_emul_ok = 0;
         }
 #endif
-        SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n", 
+        SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n",
                        mfn_x(gmfn));
-        /* If this is actually a page table, then we have a bug, and need 
-         * to support more operations in the emulator.  More likely, 
+        /* If this is actually a page table, then we have a bug, and need
+         * to support more operations in the emulator.  More likely,
          * though, this is a hint that this page should not be shadowed. */
         shadow_remove_all_shadows(v, gmfn);
 
@@ -3387,7 +3387,7 @@ static int sh_page_fault(struct vcpu *v,
     if ( r == X86EMUL_OKAY ) {
         int i, emulation_count=0;
         this_cpu(trace_emulate_initial_va) = va;
-        /* Emulate up to four extra instructions in the hope of catching 
+        /* Emulate up to four extra instructions in the hope of catching
          * the "second half" of a 64-bit pagetable write. */
         for ( i = 0 ; i < 4 ; i++ )
         {
@@ -3395,7 +3395,7 @@ static int sh_page_fault(struct vcpu *v,
             v->arch.paging.last_write_was_pt = 0;
             r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
             if ( r == X86EMUL_OKAY )
-            { 
+            {
                 emulation_count++;
                 if ( v->arch.paging.last_write_was_pt )
                 {
@@ -3403,7 +3403,7 @@ static int sh_page_fault(struct vcpu *v,
                     
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATION_2ND_PT_WRITTEN);
                     break; /* Don't emulate past the other half of the write */
                 }
-                else 
+                else
                     perfc_incr(shadow_em_ex_non_pt);
             }
             else
@@ -3459,7 +3459,7 @@ sh_invlpg(struct vcpu *v, unsigned long va)
 {
     mfn_t sl1mfn;
     shadow_l2e_t sl2e;
-    
+
     perfc_incr(shadow_invlpg);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
@@ -3472,7 +3472,7 @@ sh_invlpg(struct vcpu *v, unsigned long va)
 #endif
 
     /* First check that we can safely read the shadow l2e.  SMP/PAE linux can
-     * run as high as 6% of invlpg calls where we haven't shadowed the l2 
+     * run as high as 6% of invlpg calls where we haven't shadowed the l2
      * yet. */
 #if SHADOW_PAGING_LEVELS == 4
     {
@@ -3484,7 +3484,7 @@ sh_invlpg(struct vcpu *v, unsigned long va)
         /* This must still be a copy-from-user because we don't have the
          * paging lock, and the higher-level shadows might disappear
          * under our feet. */
-        if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v) 
+        if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
                                       + shadow_l3_linear_offset(va)),
                               sizeof (sl3e)) != 0 )
         {
@@ -3503,7 +3503,7 @@ sh_invlpg(struct vcpu *v, unsigned long va)
 
     /* This must still be a copy-from-user because we don't have the shadow
      * lock, and the higher-level shadows might disappear under our feet. */
-    if ( __copy_from_user(&sl2e, 
+    if ( __copy_from_user(&sl2e,
                           sh_linear_l2_table(v) + shadow_l2_linear_offset(va),
                           sizeof (sl2e)) != 0 )
     {
@@ -3529,12 +3529,12 @@ sh_invlpg(struct vcpu *v, unsigned long va)
         return 0;
     }
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Check to see if the SL1 is out of sync. */
     {
         mfn_t gl1mfn = backpointer(mfn_to_page(sl1mfn));
         struct page_info *pg = mfn_to_page(gl1mfn);
-        if ( mfn_valid(gl1mfn) 
+        if ( mfn_valid(gl1mfn)
              && page_is_out_of_sync(pg) )
         {
             /* The test above may give false positives, since we don't
@@ -3545,7 +3545,7 @@ sh_invlpg(struct vcpu *v, unsigned long va)
              * have the paging lock last time we checked, and the
              * higher-level shadows might have disappeared under our
              * feet. */
-            if ( __copy_from_user(&sl2e, 
+            if ( __copy_from_user(&sl2e,
                                   sh_linear_l2_table(v)
                                   + shadow_l2_linear_offset(va),
                                   sizeof (sl2e)) != 0 )
@@ -3564,7 +3564,7 @@ sh_invlpg(struct vcpu *v, unsigned long va)
             sl1mfn = shadow_l2e_get_mfn(sl2e);
             gl1mfn = backpointer(mfn_to_page(sl1mfn));
             pg = mfn_to_page(gl1mfn);
-            
+
             if ( likely(sh_mfn_is_a_page_table(gl1mfn)
                         && page_is_out_of_sync(pg) ) )
             {
@@ -3598,7 +3598,7 @@ sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
     /* Check the vTLB cache first */
     unsigned long vtlb_gfn = vtlb_lookup(v, va, pfec[0]);
-    if ( VALID_GFN(vtlb_gfn) ) 
+    if ( VALID_GFN(vtlb_gfn) )
         return vtlb_gfn;
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
@@ -3637,7 +3637,7 @@ sh_update_linear_entries(struct vcpu *v)
      * is subtler.  Normal linear mappings are made by having an entry
      * in the top-level table that points to itself (shadow linear) or
      * to the guest top-level table (guest linear).  For PAE, to set up
-     * a linear map requires us to copy the four top-level entries into 
+     * a linear map requires us to copy the four top-level entries into
      * level-2 entries.  That means that every time we change a PAE l3e,
      * we need to reflect the change into the copy.
      *
@@ -3647,41 +3647,41 @@ sh_update_linear_entries(struct vcpu *v)
      * For HVM guests, the linear pagetables are installed in the monitor
      * tables (since we can't put them in the shadow).  Shadow linear
      * pagetables, which map the shadows, are at SH_LINEAR_PT_VIRT_START,
-     * and we use the linear pagetable slot at LINEAR_PT_VIRT_START for 
-     * a linear pagetable of the monitor tables themselves.  We have 
+     * and we use the linear pagetable slot at LINEAR_PT_VIRT_START for
+     * a linear pagetable of the monitor tables themselves.  We have
      * the same issue of having to re-copy PAE l3 entries whevever we use
-     * PAE shadows. 
+     * PAE shadows.
      *
-     * Because HVM guests run on the same monitor tables regardless of the 
-     * shadow tables in use, the linear mapping of the shadow tables has to 
-     * be updated every time v->arch.shadow_table changes. 
+     * Because HVM guests run on the same monitor tables regardless of the
+     * shadow tables in use, the linear mapping of the shadow tables has to
+     * be updated every time v->arch.shadow_table changes.
      */
 
     /* Don't try to update the monitor table if it doesn't exist */
-    if ( shadow_mode_external(d) 
-         && pagetable_get_pfn(v->arch.monitor_table) == 0 ) 
+    if ( shadow_mode_external(d)
+         && pagetable_get_pfn(v->arch.monitor_table) == 0 )
         return;
 
 #if SHADOW_PAGING_LEVELS == 4
-    
+
     /* For PV, one l4e points at the guest l4, one points at the shadow
-     * l4.  No maintenance required. 
+     * l4.  No maintenance required.
      * For HVM, just need to update the l4e that points to the shadow l4. */
 
     if ( shadow_mode_external(d) )
     {
         /* Use the linear map if we can; otherwise make a new mapping */
-        if ( v == current ) 
+        if ( v == current )
         {
-            __linear_l4_table[l4_linear_offset(SH_LINEAR_PT_VIRT_START)] = 
+            __linear_l4_table[l4_linear_offset(SH_LINEAR_PT_VIRT_START)] =
                 l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
                              __PAGE_HYPERVISOR);
-        } 
+        }
         else
-        { 
+        {
             l4_pgentry_t *ml4e;
             ml4e = 
sh_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
-            ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] = 
+            ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
                 l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
                              __PAGE_HYPERVISOR);
             sh_unmap_domain_page(ml4e);
@@ -3712,8 +3712,8 @@ sh_update_linear_entries(struct vcpu *v)
         if ( v == current )
             ml2e = __linear_l2_table
                 + l2_linear_offset(SH_LINEAR_PT_VIRT_START);
-        else 
-        {   
+        else
+        {
             mfn_t l3mfn, l2mfn;
             l4_pgentry_t *ml4e;
             l3_pgentry_t *ml3e;
@@ -3736,14 +3736,14 @@ sh_update_linear_entries(struct vcpu *v)
 
         for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
         {
-            ml2e[i] = 
-                (shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT) 
+            ml2e[i] =
+                (shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT)
                 ? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(sl3e[i])),
-                               __PAGE_HYPERVISOR) 
+                               __PAGE_HYPERVISOR)
                 : l2e_empty();
         }
 
-        if ( v != current ) 
+        if ( v != current )
             sh_unmap_domain_page(ml2e);
     }
     else
@@ -3758,11 +3758,11 @@ sh_update_linear_entries(struct vcpu *v)
         /*
          * Having modified the linear pagetable mapping, flush local host TLBs.
          * This was not needed when vmenter/vmexit always had the side effect
-         * of flushing host TLBs but, with ASIDs, it is possible to finish 
-         * this CR3 update, vmenter the guest, vmexit due to a page fault, 
-         * without an intervening host TLB flush. Then the page fault code 
-         * could use the linear pagetable to read a top-level shadow page 
-         * table entry. But, without this change, it would fetch the wrong 
+         * of flushing host TLBs but, with ASIDs, it is possible to finish
+         * this CR3 update, vmenter the guest, vmexit due to a page fault,
+         * without an intervening host TLB flush. Then the page fault code
+         * could use the linear pagetable to read a top-level shadow page
+         * table entry. But, without this change, it would fetch the wrong
          * value due to a stale TLB.
          */
         flush_tlb_local();
@@ -3815,16 +3815,16 @@ sh_detach_old_tables(struct vcpu *v)
 
 /* Set up the top-level shadow and install it in slot 'slot' of shadow_table */
 static void
-sh_set_toplevel_shadow(struct vcpu *v, 
+sh_set_toplevel_shadow(struct vcpu *v,
                        int slot,
-                       mfn_t gmfn, 
-                       unsigned int root_type) 
+                       mfn_t gmfn,
+                       unsigned int root_type)
 {
     mfn_t smfn;
     pagetable_t old_entry, new_entry;
 
     struct domain *d = v->domain;
-    
+
     /* Remember the old contents of this slot */
     old_entry = v->arch.shadow_table[slot];
 
@@ -3845,7 +3845,7 @@ sh_set_toplevel_shadow(struct vcpu *v,
         smfn = sh_make_shadow(v, gmfn, root_type);
     }
     ASSERT(mfn_valid(smfn));
-    
+
     /* Pin the shadow and put it (back) on the list of pinned shadows */
     if ( sh_pin(v, smfn) == 0 )
     {
@@ -3892,10 +3892,10 @@ sh_update_cr3(struct vcpu *v, int do_locking)
  * Paravirtual guests should set v->arch.guest_table (and guest_table_user,
  * if appropriate).
  * HVM guests should also make sure hvm_get_guest_cntl_reg(v, 3) works;
- * this function will call hvm_update_guest_cr(v, 3) to tell them where the 
+ * this function will call hvm_update_guest_cr(v, 3) to tell them where the
  * shadow tables are.
- * If do_locking != 0, assume we are being called from outside the 
- * shadow code, and must take and release the paging lock; otherwise 
+ * If do_locking != 0, assume we are being called from outside the
+ * shadow code, and must take and release the paging lock; otherwise
  * that is the caller's responsibility.
  */
 {
@@ -3929,22 +3929,22 @@ sh_update_cr3(struct vcpu *v, int do_locking)
     ////
     //// vcpu->arch.guest_table is already set
     ////
-    
-#ifndef NDEBUG 
+
+#ifndef NDEBUG
     /* Double-check that the HVM code has sent us a sane guest_table */
     if ( is_hvm_domain(d) )
     {
         ASSERT(shadow_mode_external(d));
         if ( hvm_paging_enabled(v) )
             ASSERT(pagetable_get_pfn(v->arch.guest_table));
-        else 
+        else
             ASSERT(v->arch.guest_table.pfn
                    == d->arch.paging.shadow.unpaged_pagetable.pfn);
     }
 #endif
 
     SHADOW_PRINTK("d=%u v=%u guest_table=%05lx\n",
-                   d->domain_id, v->vcpu_id, 
+                   d->domain_id, v->vcpu_id,
                    (unsigned long)pagetable_get_pfn(v->arch.guest_table));
 
 #if GUEST_PAGING_LEVELS == 4
@@ -3975,18 +3975,18 @@ sh_update_cr3(struct vcpu *v, int do_locking)
       * table.  We cache the current state of that table and shadow that,
       * until the next CR3 write makes us refresh our cache. */
      ASSERT(v->arch.paging.shadow.guest_vtable == NULL);
- 
-     if ( shadow_mode_external(d) ) 
+
+     if ( shadow_mode_external(d) )
          /* Find where in the page the l3 table is */
          guest_idx = guest_index((void *)v->arch.hvm_vcpu.guest_cr[3]);
      else
-         /* PV guest: l3 is at the start of a page */ 
-         guest_idx = 0; 
+         /* PV guest: l3 is at the start of a page */
+         guest_idx = 0;
 
      // Ignore the low 2 bits of guest_idx -- they are really just
      // cache control.
      guest_idx &= ~3;
-     
+
      gl3e = ((guest_l3e_t *)sh_map_domain_page(gmfn)) + guest_idx;
      for ( i = 0; i < 4 ; i++ )
          v->arch.paging.shadow.gl3e[i] = gl3e[i];
@@ -4013,14 +4013,14 @@ sh_update_cr3(struct vcpu *v, int do_locking)
     ////
 
     /* We revoke write access to the new guest toplevel page(s) before we
-     * replace the old shadow pagetable(s), so that we can safely use the 
+     * replace the old shadow pagetable(s), so that we can safely use the
      * (old) shadow linear maps in the writeable mapping heuristics. */
 #if GUEST_PAGING_LEVELS == 2
     if ( sh_remove_write_access(v, gmfn, 2, 0) != 0 )
         flush_tlb_mask(d->domain_dirty_cpumask);
     sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
 #elif GUEST_PAGING_LEVELS == 3
-    /* PAE guests have four shadow_table entries, based on the 
+    /* PAE guests have four shadow_table entries, based on the
      * current values of the guest's four l3es. */
     {
         int flush = 0;
@@ -4039,24 +4039,24 @@ sh_update_cr3(struct vcpu *v, int do_locking)
                     flush |= sh_remove_write_access(v, gl2mfn, 2, 0);
             }
         }
-        if ( flush ) 
+        if ( flush )
             flush_tlb_mask(d->domain_dirty_cpumask);
         /* Now install the new shadows. */
-        for ( i = 0; i < 4; i++ ) 
+        for ( i = 0; i < 4; i++ )
         {
             if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
             {
                 gl2gfn = guest_l3e_get_gfn(gl3e[i]);
                 gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
                 if ( p2m_is_ram(p2mt) )
-                    sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 
-                                           ? SH_type_l2h_shadow 
+                    sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3)
+                                           ? SH_type_l2h_shadow
                                            : SH_type_l2_shadow);
                 else
-                    sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0); 
+                    sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
             }
             else
-                sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0); 
+                sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
         }
     }
 #elif GUEST_PAGING_LEVELS == 4
@@ -4064,11 +4064,11 @@ sh_update_cr3(struct vcpu *v, int do_locking)
         flush_tlb_mask(d->domain_dirty_cpumask);
     sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
 #else
-#error This should never happen 
+#error This should never happen
 #endif
 
 
-    /// 
+    ///
     /// v->arch.paging.shadow.l3table
     ///
 #if SHADOW_PAGING_LEVELS == 3
@@ -4085,8 +4085,8 @@ sh_update_cr3(struct vcpu *v, int do_locking)
                 /* 3-on-3: make a PAE l3 that points at the four l2 pages */
                 smfn = pagetable_get_mfn(v->arch.shadow_table[i]);
 #endif
-                v->arch.paging.shadow.l3table[i] = 
-                    (mfn_x(smfn) == 0) 
+                v->arch.paging.shadow.l3table[i] =
+                    (mfn_x(smfn) == 0)
                     ? shadow_l3e_empty()
                     : shadow_l3e_from_mfn(smfn, _PAGE_PRESENT);
             }
@@ -4165,7 +4165,7 @@ sh_update_cr3(struct vcpu *v, int do_locking)
 /* Functions to revoke guest rights */
 
 #if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC
-int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn, 
+int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
                                  mfn_t smfn, unsigned long off)
 {
     int r;
@@ -4236,7 +4236,7 @@ static int sh_guess_wrmap(struct vcpu *v, unsigned long 
vaddr, mfn_t gmfn)
     if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
         return 0;
 #else /* SHADOW_PAGING_LEVELS == 3 */
-    sl3p = ((shadow_l3e_t *) v->arch.paging.shadow.l3table) 
+    sl3p = ((shadow_l3e_t *) v->arch.paging.shadow.l3table)
         + shadow_l3_linear_offset(vaddr);
     if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
         return 0;
@@ -4272,20 +4272,20 @@ int sh_rm_write_access_from_l1(struct vcpu *v, mfn_t 
sl1mfn,
     shadow_l1e_t *sl1e;
     int done = 0;
     int flags;
-#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC 
+#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
     mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */
 #endif
-    
-    SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, 
+
+    SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done,
     {
         flags = shadow_l1e_get_flags(*sl1e);
-        if ( (flags & _PAGE_PRESENT) 
-             && (flags & _PAGE_RW) 
+        if ( (flags & _PAGE_PRESENT)
+             && (flags & _PAGE_RW)
              && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) )
         {
             shadow_l1e_t ro_sl1e = shadow_l1e_remove_flags(*sl1e, _PAGE_RW);
             (void) shadow_set_l1e(v, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn);
-#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC 
+#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
             /* Remember the last shadow that we shot a writeable mapping in */
             v->arch.paging.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn);
 #endif
@@ -4305,11 +4305,11 @@ int sh_rm_mappings_from_l1(struct vcpu *v, mfn_t 
sl1mfn, mfn_t target_mfn)
     shadow_l1e_t *sl1e;
     int done = 0;
     int flags;
-    
-    SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, 
+
+    SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done,
     {
         flags = shadow_l1e_get_flags(*sl1e);
-        if ( (flags & _PAGE_PRESENT) 
+        if ( (flags & _PAGE_PRESENT)
              && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(target_mfn)) )
         {
             (void) shadow_set_l1e(v, sl1e, shadow_l1e_empty(),
@@ -4357,11 +4357,11 @@ int sh_remove_l1_shadow(struct vcpu *v, mfn_t sl2mfn, 
mfn_t sl1mfn)
     shadow_l2e_t *sl2e;
     int done = 0;
     int flags;
-    
-    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, v->domain, 
+
+    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, v->domain,
     {
         flags = shadow_l2e_get_flags(*sl2e);
-        if ( (flags & _PAGE_PRESENT) 
+        if ( (flags & _PAGE_PRESENT)
              && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
         {
             (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
@@ -4380,11 +4380,11 @@ int sh_remove_l2_shadow(struct vcpu *v, mfn_t sl3mfn, 
mfn_t sl2mfn)
     shadow_l3e_t *sl3e;
     int done = 0;
     int flags;
-    
-    SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, done, 
+
+    SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, done,
     {
         flags = shadow_l3e_get_flags(*sl3e);
-        if ( (flags & _PAGE_PRESENT) 
+        if ( (flags & _PAGE_PRESENT)
              && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
         {
             (void) shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
@@ -4402,11 +4402,11 @@ int sh_remove_l3_shadow(struct vcpu *v, mfn_t sl4mfn, 
mfn_t sl3mfn)
     shadow_l4e_t *sl4e;
     int done = 0;
     int flags;
-    
+
     SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, v->domain,
     {
         flags = shadow_l4e_get_flags(*sl4e);
-        if ( (flags & _PAGE_PRESENT) 
+        if ( (flags & _PAGE_PRESENT)
              && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
         {
             (void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
@@ -4417,7 +4417,7 @@ int sh_remove_l3_shadow(struct vcpu *v, mfn_t sl4mfn, 
mfn_t sl3mfn)
     });
     return done;
 }
-#endif /* 64bit guest */ 
+#endif /* 64bit guest */
 
 /**************************************************************************/
 /* Function for the guest to inform us that a process is being torn
@@ -4517,7 +4517,7 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t 
gpa)
 #else
     smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l4_64_shadow);
 #endif
-    
+
     if ( mfn_valid(smfn) )
     {
         mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
@@ -4557,7 +4557,7 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v,
 
     /* Translate the VA to a GFN */
     gfn = sh_gva_to_gfn(v, NULL, vaddr, &pfec);
-    if ( gfn == INVALID_GFN ) 
+    if ( gfn == INVALID_GFN )
     {
         if ( is_hvm_vcpu(v) )
             hvm_inject_page_fault(pfec, vaddr);
@@ -4596,7 +4596,7 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v,
     return mfn;
 }
 
-/* Check that the user is allowed to perform this write. 
+/* Check that the user is allowed to perform this write.
  * Returns a mapped pointer to write to, or NULL for error. */
 #define MAPPING_UNHANDLEABLE ((void *)(unsigned long)X86EMUL_UNHANDLEABLE)
 #define MAPPING_EXCEPTION    ((void *)(unsigned long)X86EMUL_EXCEPTION)
@@ -4610,7 +4610,7 @@ static void *emulate_map_dest(struct vcpu *v,
     void *map = NULL;
 
     sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
-    if ( !mfn_valid(sh_ctxt->mfn1) ) 
+    if ( !mfn_valid(sh_ctxt->mfn1) )
         return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ?
                 MAPPING_EXCEPTION :
                 (mfn_x(sh_ctxt->mfn1) == READONLY_GFN) ?
@@ -4625,7 +4625,7 @@ static void *emulate_map_dest(struct vcpu *v,
         return MAPPING_UNHANDLEABLE;
     }
 #endif
-                
+
     /* Unaligned writes mean probably this isn't a pagetable */
     if ( vaddr & (bytes - 1) )
         sh_remove_shadows(v, sh_ctxt->mfn1, 0, 0 /* Slow, can fail */ );
@@ -4636,11 +4636,11 @@ static void *emulate_map_dest(struct vcpu *v,
         sh_ctxt->mfn2 = _mfn(INVALID_MFN);
         map = sh_map_domain_page(sh_ctxt->mfn1) + (vaddr & ~PAGE_MASK);
     }
-    else 
+    else
     {
         unsigned long mfns[2];
 
-        /* Cross-page emulated writes are only supported for HVM guests; 
+        /* Cross-page emulated writes are only supported for HVM guests;
          * PV guests ought to know better */
         if ( !is_hvm_vcpu(v) )
             return MAPPING_UNHANDLEABLE;
@@ -4648,7 +4648,7 @@ static void *emulate_map_dest(struct vcpu *v,
         /* This write crosses a page boundary.  Translate the second page */
         sh_ctxt->mfn2 = emulate_gva_to_mfn(v, (vaddr + bytes - 1) & PAGE_MASK,
                                            sh_ctxt);
-        if ( !mfn_valid(sh_ctxt->mfn2) ) 
+        if ( !mfn_valid(sh_ctxt->mfn2) )
             return ((mfn_x(sh_ctxt->mfn2) == BAD_GVA_TO_GFN) ?
                     MAPPING_EXCEPTION :
                     (mfn_x(sh_ctxt->mfn2) == READONLY_GFN) ?
@@ -4656,7 +4656,7 @@ static void *emulate_map_dest(struct vcpu *v,
 
         /* Cross-page writes mean probably not a pagetable */
         sh_remove_shadows(v, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
-        
+
         mfns[0] = mfn_x(sh_ctxt->mfn1);
         mfns[1] = mfn_x(sh_ctxt->mfn2);
         map = vmap(mfns, 2);
@@ -4690,10 +4690,10 @@ static void emulate_unmap_dest(struct vcpu *v,
     {
         if ( ((unsigned long) addr & ((sizeof (guest_intpte_t)) - 1)) == 0 )
             check_for_early_unshadow(v, sh_ctxt->mfn1);
-        /* Don't reset the heuristic if we're writing zeros at non-aligned 
+        /* Don't reset the heuristic if we're writing zeros at non-aligned
          * addresses, otherwise it doesn't catch REP MOVSD on PAE guests */
     }
-    else 
+    else
         reset_early_unshadow(v);
 
     /* We can avoid re-verifying the page contents after the write if:
@@ -4717,11 +4717,11 @@ static void emulate_unmap_dest(struct vcpu *v,
               && bytes <= 4)) )
     {
         /* Writes with this alignment constraint can't possibly cross pages */
-        ASSERT(!mfn_valid(sh_ctxt->mfn2)); 
+        ASSERT(!mfn_valid(sh_ctxt->mfn2));
     }
-    else 
+    else
 #endif /* SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY */
-    {        
+    {
         if ( unlikely(mfn_valid(sh_ctxt->mfn2)) )
         {
             /* Validate as two writes, one to each page */
@@ -4742,7 +4742,7 @@ static void emulate_unmap_dest(struct vcpu *v,
         paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn2));
         vunmap((void *)((unsigned long)addr & PAGE_MASK));
     }
-    else 
+    else
         sh_unmap_domain_page(addr);
 
     atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
@@ -4788,7 +4788,7 @@ sh_x86_emulate_write(struct vcpu *v, unsigned long vaddr, 
void *src,
 }
 
 static int
-sh_x86_emulate_cmpxchg(struct vcpu *v, unsigned long vaddr, 
+sh_x86_emulate_cmpxchg(struct vcpu *v, unsigned long vaddr,
                         unsigned long old, unsigned long new,
                         unsigned int bytes, struct sh_emulate_ctxt *sh_ctxt)
 {
@@ -4816,7 +4816,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, unsigned long 
vaddr,
         prev = ~old;
     }
 
-    if ( prev != old ) 
+    if ( prev != old )
         rv = X86EMUL_CMPXCHG_FAILED;
 
     SHADOW_DEBUG(EMULATE, "va %#lx was %#lx expected %#lx"
@@ -4865,28 +4865,28 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, unsigned long 
vaddr,
 } while (0)
 
 static char * sh_audit_flags(struct vcpu *v, int level,
-                              int gflags, int sflags) 
+                              int gflags, int sflags)
 /* Common code for auditing flag bits */
 {
     if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_PRESENT) )
         return "shadow is present but guest is not present";
-    if ( (sflags & _PAGE_GLOBAL) && !is_hvm_vcpu(v) ) 
+    if ( (sflags & _PAGE_GLOBAL) && !is_hvm_vcpu(v) )
         return "global bit set in PV shadow";
     if ( level == 2 && (sflags & _PAGE_PSE) )
         return "PS bit set in shadow";
 #if SHADOW_PAGING_LEVELS == 3
     if ( level == 3 ) return NULL; /* All the other bits are blank in PAEl3 */
 #endif
-    if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_ACCESSED) ) 
+    if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_ACCESSED) )
         return "accessed bit not propagated";
     if ( (level == 1 || (level == 2 && (gflags & _PAGE_PSE)))
-         && ((sflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY)) ) 
+         && ((sflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY)) )
         return "dirty bit not propagated";
-    if ( (sflags & _PAGE_USER) != (gflags & _PAGE_USER) ) 
+    if ( (sflags & _PAGE_USER) != (gflags & _PAGE_USER) )
         return "user/supervisor bit does not match";
-    if ( (sflags & _PAGE_NX_BIT) != (gflags & _PAGE_NX_BIT) ) 
+    if ( (sflags & _PAGE_NX_BIT) != (gflags & _PAGE_NX_BIT) )
         return "NX bit does not match";
-    if ( (sflags & _PAGE_RW) && !(gflags & _PAGE_RW) ) 
+    if ( (sflags & _PAGE_RW) && !(gflags & _PAGE_RW) )
         return "shadow grants write access but guest does not";
     return NULL;
 }
@@ -4900,7 +4900,7 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t 
x)
     p2m_type_t p2mt;
     char *s;
     int done = 0;
-    
+
     /* Follow the backpointer */
     ASSERT(mfn_to_page(sl1mfn)->u.sh.head);
     gl1mfn = backpointer(mfn_to_page(sl1mfn));
@@ -4917,32 +4917,32 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, 
mfn_t x)
     gl1e = gp = sh_map_domain_page(gl1mfn);
     SHADOW_FOREACH_L1E(sl1mfn, sl1e, &gl1e, done, {
 
-        if ( sh_l1e_is_magic(*sl1e) ) 
+        if ( sh_l1e_is_magic(*sl1e) )
         {
 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
             if ( sh_l1e_is_gnp(*sl1e) )
             {
                 if ( guest_l1e_get_flags(*gl1e) & _PAGE_PRESENT )
                     AUDIT_FAIL(1, "shadow is GNP magic but guest is present");
-            } 
-            else 
+            }
+            else
             {
                 ASSERT(sh_l1e_is_mmio(*sl1e));
                 gfn = sh_l1e_mmio_get_gfn(*sl1e);
                 if ( gfn_x(gfn) != gfn_x(guest_l1e_get_gfn(*gl1e)) )
-                    AUDIT_FAIL(1, "shadow MMIO gfn is %" SH_PRI_gfn 
+                    AUDIT_FAIL(1, "shadow MMIO gfn is %" SH_PRI_gfn
                                " but guest gfn is %" SH_PRI_gfn,
                                gfn_x(gfn),
                                gfn_x(guest_l1e_get_gfn(*gl1e)));
             }
 #endif
         }
-        else 
+        else
         {
             s = sh_audit_flags(v, 1, guest_l1e_get_flags(*gl1e),
                                shadow_l1e_get_flags(*sl1e));
             if ( s ) AUDIT_FAIL(1, "%s", s);
-            
+
             if ( SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_MFNS )
             {
                 gfn = guest_l1e_get_gfn(*gl1e);
@@ -4972,12 +4972,12 @@ int sh_audit_fl1_table(struct vcpu *v, mfn_t sl1mfn, 
mfn_t x)
     SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, {
         f = shadow_l1e_get_flags(*sl1e);
         f &= ~(_PAGE_AVAIL0|_PAGE_AVAIL1|_PAGE_AVAIL2);
-        if ( !(f == 0 
+        if ( !(f == 0
                || f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
-                        _PAGE_ACCESSED) 
+                        _PAGE_ACCESSED)
                || f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_ACCESSED)
                || f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
-                        _PAGE_ACCESSED|_PAGE_DIRTY) 
+                        _PAGE_ACCESSED|_PAGE_DIRTY)
                || f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
                || sh_l1e_is_magic(*sl1e)) )
             AUDIT_FAIL(1, "fl1e has bad flags");
@@ -5016,16 +5016,16 @@ int sh_audit_l2_table(struct vcpu *v, mfn_t sl2mfn, 
mfn_t x)
         {
             gfn = guest_l2e_get_gfn(*gl2e);
             mfn = shadow_l2e_get_mfn(*sl2e);
-            gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)  
+            gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)
                 ? get_fl1_shadow_status(v, gfn)
-                : get_shadow_status(v, 
-                    get_gfn_query_unlocked(v->domain, gfn_x(gfn), 
+                : get_shadow_status(v,
+                    get_gfn_query_unlocked(v->domain, gfn_x(gfn),
                                         &p2mt), SH_type_l1_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
                            " (--> %" PRI_mfn ")"
                            " --> %" PRI_mfn " != mfn %" PRI_mfn,
-                           gfn_x(gfn), 
+                           gfn_x(gfn),
                            (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
                            : mfn_x(get_gfn_query_unlocked(v->domain,
                                    gfn_x(gfn), &p2mt)), mfn_x(gmfn), 
mfn_x(mfn));
@@ -5050,7 +5050,7 @@ int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t 
x)
     ASSERT(mfn_to_page(sl3mfn)->u.sh.head);
     gl3mfn = backpointer(mfn_to_page(sl3mfn));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Only L1's may be out of sync. */
     if ( page_is_out_of_sync(mfn_to_page(gl3mfn)) )
         AUDIT_FAIL_MIN(3, "gmfn %lx is out of sync", mfn_x(gl3mfn));
@@ -5099,7 +5099,7 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t 
x)
     ASSERT(mfn_to_page(sl4mfn)->u.sh.head);
     gl4mfn = backpointer(mfn_to_page(sl4mfn));
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Only L1's may be out of sync. */
     if ( page_is_out_of_sync(mfn_to_page(gl4mfn)) )
         AUDIT_FAIL_MIN(4, "gmfn %lx is out of sync", mfn_x(gl4mfn));
@@ -5117,7 +5117,7 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t 
x)
             gfn = guest_l4e_get_gfn(*gl4e);
             mfn = shadow_l4e_get_mfn(*sl4e);
             gmfn = get_shadow_status(v, get_gfn_query_unlocked(
-                                     v->domain, gfn_x(gfn), &p2mt), 
+                                     v->domain, gfn_x(gfn), &p2mt),
                                      SH_type_l3_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
@@ -5139,7 +5139,7 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t 
x)
 /* Entry points into this mode of the shadow code.
  * This will all be mangled by the preprocessor to uniquify everything. */
 const struct paging_mode sh_paging_mode = {
-    .page_fault                    = sh_page_fault, 
+    .page_fault                    = sh_page_fault,
     .invlpg                        = sh_invlpg,
     .gva_to_gfn                    = sh_gva_to_gfn,
     .update_cr3                    = sh_update_cr3,
@@ -5168,5 +5168,5 @@ const struct paging_mode sh_paging_mode = {
  * c-file-style: "BSD"
  * c-basic-offset: 4
  * indent-tabs-mode: nil
- * End: 
+ * End:
  */
diff --git a/xen/arch/x86/mm/shadow/multi.h b/xen/arch/x86/mm/shadow/multi.h
index 835121e..7f829fd 100644
--- a/xen/arch/x86/mm/shadow/multi.h
+++ b/xen/arch/x86/mm/shadow/multi.h
@@ -21,42 +21,42 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
-extern int 
+extern int
 SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl1mfn, void *new_gl1p, u32 size);
-extern int 
+extern int
 SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, u32 size);
-extern int 
+extern int
 SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, u32 size);
-extern int 
+extern int
 SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl3mfn, void *new_gl3p, u32 size);
-extern int 
+extern int
 SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, GUEST_LEVELS)(
     struct vcpu *v, mfn_t gl4mfn, void *new_gl4p, u32 size);
 
-extern void 
+extern void
 SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, GUEST_LEVELS)(
     struct vcpu *v, mfn_t smfn);
-extern void 
+extern void
 SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, GUEST_LEVELS)(
     struct vcpu *v, mfn_t smfn);
-extern void 
+extern void
 SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, GUEST_LEVELS)(
     struct vcpu *v, mfn_t smfn);
-extern void 
+extern void
 SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, GUEST_LEVELS)(
     struct vcpu *v, mfn_t smfn);
 
-extern void 
+extern void
 SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl2mfn, int user_only);
-extern void 
+extern void
 SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl3mfn, int user_only);
-extern void 
+extern void
 SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl4mfn, int user_only);
 
@@ -82,19 +82,19 @@ SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl4mfn, mfn_t sl3mfn);
 
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
-int 
+int
 SHADOW_INTERNAL_NAME(sh_audit_l1_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl1mfn, mfn_t x);
-int 
+int
 SHADOW_INTERNAL_NAME(sh_audit_fl1_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl1mfn, mfn_t x);
-int 
+int
 SHADOW_INTERNAL_NAME(sh_audit_l2_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl2mfn, mfn_t x);
-int 
+int
 SHADOW_INTERNAL_NAME(sh_audit_l3_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl3mfn, mfn_t x);
-int 
+int
 SHADOW_INTERNAL_NAME(sh_audit_l4_table, GUEST_LEVELS)
     (struct vcpu *v, mfn_t sl4mfn, mfn_t x);
 #endif
@@ -117,7 +117,7 @@ extern const struct paging_mode
 SHADOW_INTERNAL_NAME(sh_paging_mode, GUEST_LEVELS);
 
 #if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC
-extern void 
+extern void
 SHADOW_INTERNAL_NAME(sh_resync_l1, GUEST_LEVELS)
      (struct vcpu *v, mfn_t gmfn, mfn_t snpmfn);
 
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 72518fd..4b69626 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -139,7 +139,7 @@ enum {
 #endif
 
 /******************************************************************************
- * Auditing routines 
+ * Auditing routines
  */
 
 #if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULL
@@ -191,27 +191,27 @@ extern void shadow_audit_tables(struct vcpu *v);
 #define SH_type_oos_snapshot  (16U) /* in use as OOS snapshot */
 #define SH_type_unused        (17U)
 
-/* 
+/*
  * What counts as a pinnable shadow?
  */
 
-static inline int sh_type_is_pinnable(struct vcpu *v, unsigned int t) 
+static inline int sh_type_is_pinnable(struct vcpu *v, unsigned int t)
 {
-    /* Top-level shadow types in each mode can be pinned, so that they 
+    /* Top-level shadow types in each mode can be pinned, so that they
      * persist even when not currently in use in a guest CR3 */
     if ( t == SH_type_l2_32_shadow
          || t == SH_type_l2_pae_shadow
-         || t == SH_type_l2h_pae_shadow 
+         || t == SH_type_l2h_pae_shadow
          || t == SH_type_l4_64_shadow )
         return 1;
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
     /* Early 64-bit linux used three levels of pagetables for the guest
      * and context switched by changing one l4 entry in a per-cpu l4
      * page.  When we're shadowing those kernels, we have to pin l3
      * shadows so they don't just evaporate on every context switch.
-     * For all other guests, we'd rather use the up-pointer field in l3s. */ 
-    if ( unlikely((v->domain->arch.paging.shadow.opt_flags & 
SHOPT_LINUX_L3_TOPLEVEL) 
+     * For all other guests, we'd rather use the up-pointer field in l3s. */
+    if ( unlikely((v->domain->arch.paging.shadow.opt_flags & 
SHOPT_LINUX_L3_TOPLEVEL)
                   && t == SH_type_l3_64_shadow) )
         return 1;
 #endif
@@ -220,7 +220,7 @@ static inline int sh_type_is_pinnable(struct vcpu *v, 
unsigned int t)
     return 0;
 }
 
-static inline int sh_type_has_up_pointer(struct vcpu *v, unsigned int t) 
+static inline int sh_type_has_up_pointer(struct vcpu *v, unsigned int t)
 {
     /* Multi-page shadows don't have up-pointers */
     if ( t == SH_type_l1_32_shadow
@@ -271,9 +271,9 @@ static inline void sh_terminate_list(struct page_list_head 
*tmp_list)
 
 #define SHF_L1_ANY  (SHF_L1_32|SHF_L1_PAE|SHF_L1_64)
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
 /* Marks a guest L1 page table which is shadowed but not write-protected.
- * If set, then *only* L1 shadows (SHF_L1_*) are allowed. 
+ * If set, then *only* L1 shadows (SHF_L1_*) are allowed.
  *
  * out_of_sync indicates that the shadow tables may not reflect the
  * guest tables.  If it is clear, then the shadow tables *must* reflect
@@ -281,9 +281,9 @@ static inline void sh_terminate_list(struct page_list_head 
*tmp_list)
  *
  * oos_may_write indicates that a page may have writable mappings.
  *
- * Most of the time the flags are synonymous.  There is a short period of time 
- * during resync that oos_may_write is clear but out_of_sync is not.  If a 
- * codepath is called during that time and is sensitive to oos issues, it may 
+ * Most of the time the flags are synonymous.  There is a short period of time
+ * during resync that oos_may_write is clear but out_of_sync is not.  If a
+ * codepath is called during that time and is sensitive to oos issues, it may
  * need to use the second flag.
  */
 #define SHF_out_of_sync (1u<<30)
@@ -303,27 +303,27 @@ static inline int sh_page_has_multiple_shadows(struct 
page_info *pg)
     return ( (shadows & ~(1UL << find_first_set_bit(shadows))) != 0 );
 }
 
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
 /* The caller must verify this is reasonable to call; i.e., valid mfn,
  * domain is translated, &c */
-static inline int page_is_out_of_sync(struct page_info *p) 
+static inline int page_is_out_of_sync(struct page_info *p)
 {
     return (p->count_info & PGC_page_table)
         && (p->shadow_flags & SHF_out_of_sync);
 }
 
-static inline int mfn_is_out_of_sync(mfn_t gmfn) 
+static inline int mfn_is_out_of_sync(mfn_t gmfn)
 {
     return page_is_out_of_sync(mfn_to_page(mfn_x(gmfn)));
 }
 
-static inline int page_oos_may_write(struct page_info *p) 
+static inline int page_oos_may_write(struct page_info *p)
 {
     return (p->count_info & PGC_page_table)
         && (p->shadow_flags & SHF_oos_may_write);
 }
 
-static inline int mfn_oos_may_write(mfn_t gmfn) 
+static inline int mfn_oos_may_write(mfn_t gmfn)
 {
     return page_oos_may_write(mfn_to_page(mfn_x(gmfn)));
 }
@@ -339,14 +339,14 @@ shadow_size(unsigned int shadow_type)
 }
 
 /******************************************************************************
- * Various function declarations 
+ * Various function declarations
  */
 
 /* Hash table functions */
 mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long n, unsigned int t);
-void  shadow_hash_insert(struct vcpu *v, 
+void  shadow_hash_insert(struct vcpu *v,
                          unsigned long n, unsigned int t, mfn_t smfn);
-void  shadow_hash_delete(struct vcpu *v, 
+void  shadow_hash_delete(struct vcpu *v,
                          unsigned long n, unsigned int t, mfn_t smfn);
 
 /* shadow promotion */
@@ -355,7 +355,7 @@ void shadow_demote(struct vcpu *v, mfn_t gmfn, u32 type);
 
 /* Shadow page allocation functions */
 void  shadow_prealloc(struct domain *d, u32 shadow_type, unsigned int count);
-mfn_t shadow_alloc(struct domain *d, 
+mfn_t shadow_alloc(struct domain *d,
                     u32 shadow_type,
                     unsigned long backpointer);
 void  shadow_free(struct domain *d, mfn_t smfn);
@@ -367,11 +367,11 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t 
gl4mfn, mfn_t sl4mfn);
 int sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size);
 
 /* Update the shadows in response to a pagetable write from a HVM guest */
-void sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn, 
+void sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
                                 void *entry, u32 size);
 
 /* Remove all writeable mappings of a guest frame from the shadows.
- * Returns non-zero if we need to flush TLBs. 
+ * Returns non-zero if we need to flush TLBs.
  * level and fault_addr desribe how we found this to be a pagetable;
  * level==0 means we have some other reason for revoking write access. */
 extern int sh_remove_write_access(struct vcpu *v, mfn_t readonly_mfn,
@@ -437,7 +437,7 @@ mfn_t oos_snapshot_lookup(struct vcpu *v, mfn_t gmfn);
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
 
 
-/* Reset the up-pointers of every L3 shadow to 0. 
+/* Reset the up-pointers of every L3 shadow to 0.
  * This is called when l3 shadows stop being pinnable, to clear out all
  * the list-head bits so the up-pointer field is properly inititalised. */
 void sh_reset_l3_up_pointers(struct vcpu *v);
@@ -455,7 +455,7 @@ void sh_reset_l3_up_pointers(struct vcpu *v);
 
 
 /******************************************************************************
- * MFN/page-info handling 
+ * MFN/page-info handling
  */
 
 /* Override macros from asm/page.h to make them work with mfn_t */
@@ -496,9 +496,9 @@ sh_mfn_is_a_page_table(mfn_t gmfn)
         return 0;
 
     owner = page_get_owner(page);
-    if ( owner && shadow_mode_refcounts(owner) 
+    if ( owner && shadow_mode_refcounts(owner)
          && (page->count_info & PGC_page_table) )
-        return 1; 
+        return 1;
 
     type_info = page->u.inuse.type_info & PGT_type_mask;
     return type_info && (type_info <= PGT_l4_page_table);
@@ -511,8 +511,8 @@ sh_map_domain_page(mfn_t mfn)
     return map_domain_page(mfn_x(mfn));
 }
 
-static inline void 
-sh_unmap_domain_page(void *p) 
+static inline void
+sh_unmap_domain_page(void *p)
 {
     unmap_domain_page(p);
 }
@@ -523,8 +523,8 @@ sh_map_domain_page_global(mfn_t mfn)
     return map_domain_page_global(mfn_x(mfn));
 }
 
-static inline void 
-sh_unmap_domain_page_global(void *p) 
+static inline void
+sh_unmap_domain_page_global(void *p)
 {
     unmap_domain_page_global(p);
 }
@@ -534,9 +534,9 @@ sh_unmap_domain_page_global(void *p)
 
 void sh_destroy_shadow(struct vcpu *v, mfn_t smfn);
 
-/* Increase the refcount of a shadow page.  Arguments are the mfn to refcount, 
+/* Increase the refcount of a shadow page.  Arguments are the mfn to refcount,
  * and the physical address of the shadow entry that holds the ref (or zero
- * if the ref is held by something else).  
+ * if the ref is held by something else).
  * Returns 0 for failure, 1 for success. */
 static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
 {
@@ -555,16 +555,16 @@ static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, 
paddr_t entry_pa)
                        __backpointer(sp), mfn_x(smfn));
         return 0;
     }
-    
+
     /* Guarded by the paging lock, so no need for atomic update */
     sp->u.sh.count = nx;
 
     /* We remember the first shadow entry that points to each shadow. */
-    if ( entry_pa != 0 
+    if ( entry_pa != 0
          && sh_type_has_up_pointer(v, sp->u.sh.type)
-         && sp->up == 0 ) 
+         && sp->up == 0 )
         sp->up = entry_pa;
-    
+
     return 1;
 }
 
@@ -581,15 +581,15 @@ static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, 
paddr_t entry_pa)
     ASSERT(!(sp->count_info & PGC_count_mask));
 
     /* If this is the entry in the up-pointer, remove it */
-    if ( entry_pa != 0 
+    if ( entry_pa != 0
          && sh_type_has_up_pointer(v, sp->u.sh.type)
-         && sp->up == entry_pa ) 
+         && sp->up == entry_pa )
         sp->up = 0;
 
     x = sp->u.sh.count;
     nx = x - 1;
 
-    if ( unlikely(x == 0) ) 
+    if ( unlikely(x == 0) )
     {
         SHADOW_ERROR("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
                      mfn_x(smfn), sp->u.sh.count, sp->u.sh.type);
@@ -599,12 +599,12 @@ static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, 
paddr_t entry_pa)
     /* Guarded by the paging lock, so no need for atomic update */
     sp->u.sh.count = nx;
 
-    if ( unlikely(nx == 0) ) 
+    if ( unlikely(nx == 0) )
         sh_destroy_shadow(v, smfn);
 }
 
 
-/* Walk the list of pinned shadows, from the tail forwards, 
+/* Walk the list of pinned shadows, from the tail forwards,
  * skipping the non-head-page entries */
 static inline struct page_info *
 prev_pinned_shadow(const struct page_info *page,
@@ -747,7 +747,7 @@ struct sh_emulate_ctxt {
     mfn_t mfn1, mfn2;
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
-    /* Special case for avoiding having to verify writes: remember 
+    /* Special case for avoiding having to verify writes: remember
      * whether the old value had its low bit (_PAGE_PRESENT) clear. */
     int low_bit_was_clear:1;
 #endif
@@ -762,12 +762,12 @@ struct segment_register *hvm_get_seg_reg(
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
 /**************************************************************************/
-/* Virtual TLB entries 
+/* Virtual TLB entries
  *
- * We keep a cache of virtual-to-physical translations that we have seen 
- * since the last TLB flush.  This is safe to use for frame translations, 
+ * We keep a cache of virtual-to-physical translations that we have seen
+ * since the last TLB flush.  This is safe to use for frame translations,
  * but callers need to re-check the actual guest tables if the lookup fails.
- * 
+ *
  * Lookups and updates are protected by a per-vTLB (and hence per-vcpu)
  * lock.  This lock is held *only* while reading or writing the table,
  * so it is safe to take in any non-interrupt context.  Most lookups
@@ -785,7 +785,7 @@ struct shadow_vtlb {
 };
 
 /* Call whenever the guest flushes hit actual TLB */
-static inline void vtlb_flush(struct vcpu *v) 
+static inline void vtlb_flush(struct vcpu *v)
 {
     spin_lock(&v->arch.paging.vtlb_lock);
     memset(v->arch.paging.vtlb, 0, VTLB_ENTRIES * sizeof (struct shadow_vtlb));
@@ -801,7 +801,7 @@ static inline int vtlb_hash(unsigned long page_number)
 static inline void vtlb_insert(struct vcpu *v, unsigned long page,
                                unsigned long frame, uint32_t pfec)
 {
-    struct shadow_vtlb entry = 
+    struct shadow_vtlb entry =
         { .page_number = page, .frame_number = frame, .pfec = pfec };
     spin_lock(&v->arch.paging.vtlb_lock);
     v->arch.paging.vtlb[vtlb_hash(page)] = entry;
@@ -818,7 +818,7 @@ static inline unsigned long vtlb_lookup(struct vcpu *v,
 
     spin_lock(&v->arch.paging.vtlb_lock);
     if ( v->arch.paging.vtlb[i].pfec != 0
-         && v->arch.paging.vtlb[i].page_number == page_number 
+         && v->arch.paging.vtlb[i].page_number == page_number
          /* Any successful walk that had at least these pfec bits is OK */
          && (v->arch.paging.vtlb[i].pfec & pfec) == pfec )
     {
@@ -833,7 +833,7 @@ static inline int sh_check_page_has_no_refs(struct 
page_info *page)
 {
     unsigned long count = read_atomic(&page->count_info);
     return ( (count & PGC_count_mask) ==
-             ((count & PGC_allocated) ? 1 : 0) ); 
+             ((count & PGC_allocated) ? 1 : 0) );
 }
 
 #endif /* _XEN_SHADOW_PRIVATE_H */
diff --git a/xen/arch/x86/mm/shadow/types.h b/xen/arch/x86/mm/shadow/types.h
index 953d168..7391b3c 100644
--- a/xen/arch/x86/mm/shadow/types.h
+++ b/xen/arch/x86/mm/shadow/types.h
@@ -1,20 +1,20 @@
 /******************************************************************************
  * arch/x86/mm/shadow/types.h
- * 
+ *
  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
@@ -31,7 +31,7 @@
 #define SHADOW_PAGING_LEVELS 3
 #endif
 
-/* 
+/*
  * Define various types for handling pagetabels, based on these options:
  * SHADOW_PAGING_LEVELS : Number of levels of shadow pagetables
  * GUEST_PAGING_LEVELS  : Number of levels of guest pagetables
@@ -101,14 +101,14 @@ static inline shadow_l1e_t
 shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
 { l1e_remove_flags(sl1e, flags); return sl1e; }
 
-static inline shadow_l1e_t shadow_l1e_empty(void) 
+static inline shadow_l1e_t shadow_l1e_empty(void)
 { return l1e_empty(); }
-static inline shadow_l2e_t shadow_l2e_empty(void) 
+static inline shadow_l2e_t shadow_l2e_empty(void)
 { return l2e_empty(); }
-static inline shadow_l3e_t shadow_l3e_empty(void) 
+static inline shadow_l3e_t shadow_l3e_empty(void)
 { return l3e_empty(); }
 #if SHADOW_PAGING_LEVELS >= 4
-static inline shadow_l4e_t shadow_l4e_empty(void) 
+static inline shadow_l4e_t shadow_l4e_empty(void)
 { return l4e_empty(); }
 #endif
 
@@ -141,10 +141,10 @@ static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, 
u32 flags)
 #define shadow_l4_linear_offset(_a)                                           \
         (((_a) & VADDR_MASK) >> SHADOW_L4_PAGETABLE_SHIFT)
 
-/* Where to find each level of the linear mapping.  For PV guests, we use 
- * the shadow linear-map self-entry as many times as we need.  For HVM 
- * guests, the shadow doesn't have a linear-map self-entry so we must use 
- * the monitor-table's linear-map entry N-1 times and then the shadow-map 
+/* Where to find each level of the linear mapping.  For PV guests, we use
+ * the shadow linear-map self-entry as many times as we need.  For HVM
+ * guests, the shadow doesn't have a linear-map self-entry so we must use
+ * the monitor-table's linear-map entry N-1 times and then the shadow-map
  * entry once. */
 #define __sh_linear_l1_table ((shadow_l1e_t *)(SH_LINEAR_PT_VIRT_START))
 #define __sh_linear_l2_table ((shadow_l2e_t *)                               \
@@ -304,12 +304,12 @@ static inline int sh_l1e_is_magic(shadow_l1e_t sl1e)
 }
 
 /* Guest not present: a single magic value */
-static inline shadow_l1e_t sh_l1e_gnp(void) 
+static inline shadow_l1e_t sh_l1e_gnp(void)
 {
     return (shadow_l1e_t){ -1ULL };
 }
 
-static inline int sh_l1e_is_gnp(shadow_l1e_t sl1e) 
+static inline int sh_l1e_is_gnp(shadow_l1e_t sl1e)
 {
     return (sl1e.l1 == sh_l1e_gnp().l1);
 }
@@ -323,24 +323,24 @@ static inline int sh_l1e_is_gnp(shadow_l1e_t sl1e)
 #define SH_L1E_MMIO_GFN_MASK    0x00000000fffffff0ULL
 #define SH_L1E_MMIO_GFN_SHIFT   4
 
-static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags) 
+static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags)
 {
-    return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC 
-                             | (gfn_x(gfn) << SH_L1E_MMIO_GFN_SHIFT) 
+    return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC
+                             | (gfn_x(gfn) << SH_L1E_MMIO_GFN_SHIFT)
                              | (gflags & (_PAGE_USER|_PAGE_RW))) };
 }
 
-static inline int sh_l1e_is_mmio(shadow_l1e_t sl1e) 
+static inline int sh_l1e_is_mmio(shadow_l1e_t sl1e)
 {
     return ((sl1e.l1 & SH_L1E_MMIO_MAGIC_MASK) == SH_L1E_MMIO_MAGIC);
 }
 
-static inline gfn_t sh_l1e_mmio_get_gfn(shadow_l1e_t sl1e) 
+static inline gfn_t sh_l1e_mmio_get_gfn(shadow_l1e_t sl1e)
 {
     return _gfn((sl1e.l1 & SH_L1E_MMIO_GFN_MASK) >> SH_L1E_MMIO_GFN_SHIFT);
 }
 
-static inline u32 sh_l1e_mmio_get_flags(shadow_l1e_t sl1e) 
+static inline u32 sh_l1e_mmio_get_flags(shadow_l1e_t sl1e)
 {
     return (u32)((sl1e.l1 & (_PAGE_USER|_PAGE_RW)));
 }
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.