|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86/shadow: purge {write,cmpxchg}_guest_entry() hooks
commit 36053bdddec20b0209c7f485e33c162410454324
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Fri Mar 24 11:19:37 2023 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Mar 24 11:19:37 2023 +0100
x86/shadow: purge {write,cmpxchg}_guest_entry() hooks
These aren't mode dependent (see 06f04f54ba97 ["x86/shadow:
sh_{write,cmpxchg}_guest_entry() are PV-only"], where they were moved
out of multi.c) and hence there's no need to have pointers to the
functions in struct shadow_paging_mode. Due to include dependencies,
however, the "paging" wrappers need to move out of paging.h; they're
needed from PV memory management code only anyway, so by moving them
their exposure is reduced at the same time.
By carefully placing the (moved and renamed) shadow function
declarations, #ifdef can also be dropped from the "paging" wrappers
(paging_mode_shadow() is constant false when !SHADOW_PAGING).
While moving the code, drop the (largely wrong) comment from
paging_write_guest_entry() and reduce that of
paging_cmpxchg_guest_entry().
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/include/asm/paging.h | 45 ---------------------------------------
xen/arch/x86/include/asm/shadow.h | 6 ++++++
xen/arch/x86/mm/shadow/multi.c | 4 ----
xen/arch/x86/mm/shadow/private.h | 6 ------
xen/arch/x86/mm/shadow/pv.c | 12 +++++------
xen/arch/x86/pv/mm.h | 21 ++++++++++++++++++
6 files changed, 33 insertions(+), 61 deletions(-)
diff --git a/xen/arch/x86/include/asm/paging.h
b/xen/arch/x86/include/asm/paging.h
index 38bec8af2e..289a83156a 100644
--- a/xen/arch/x86/include/asm/paging.h
+++ b/xen/arch/x86/include/asm/paging.h
@@ -98,13 +98,6 @@
struct shadow_paging_mode {
#ifdef CONFIG_SHADOW_PAGING
-#ifdef CONFIG_PV
- void (*write_guest_entry )(struct vcpu *v, intpte_t *p,
- intpte_t new, mfn_t gmfn);
- intpte_t (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p,
- intpte_t old, intpte_t new,
- mfn_t gmfn);
-#endif
#ifdef CONFIG_HVM
int (*guess_wrmap )(struct vcpu *v,
unsigned long vaddr, mfn_t gmfn);
@@ -326,44 +319,6 @@ static inline void paging_update_paging_modes(struct vcpu
*v)
v->domain->arch.paging.update_paging_modes(v);
}
-#ifdef CONFIG_PV
-
-/*
- * Write a new value into the guest pagetable, and update the
- * paging-assistance state appropriately. Returns false if we page-faulted,
- * true for success.
- */
-static inline void paging_write_guest_entry(
- struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
-{
-#ifdef CONFIG_SHADOW_PAGING
- if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
- paging_get_hostmode(v)->shadow.write_guest_entry(v, p, new, gmfn);
- else
-#endif
- write_atomic(p, new);
-}
-
-
-/*
- * Cmpxchg a new value into the guest pagetable, and update the
- * paging-assistance state appropriately. Returns false if we page-faulted,
- * true if not. N.B. caller should check the value of "old" to see if the
- * cmpxchg itself was successful.
- */
-static inline intpte_t paging_cmpxchg_guest_entry(
- struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn)
-{
-#ifdef CONFIG_SHADOW_PAGING
- if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
- return paging_get_hostmode(v)->shadow.cmpxchg_guest_entry(v, p, old,
- new, gmfn);
-#endif
- return cmpxchg(p, old, new);
-}
-
-#endif /* CONFIG_PV */
-
/* Helper function that writes a pte in such a way that a concurrent read
* never sees a half-written entry that has _PAGE_PRESENT set */
static inline void safe_write_pte(l1_pgentry_t *p, l1_pgentry_t new)
diff --git a/xen/arch/x86/include/asm/shadow.h
b/xen/arch/x86/include/asm/shadow.h
index 73122f0312..ba2b0e170b 100644
--- a/xen/arch/x86/include/asm/shadow.h
+++ b/xen/arch/x86/include/asm/shadow.h
@@ -248,6 +248,12 @@ static inline void pv_l1tf_domain_destroy(struct domain *d)
#endif
}
+/* Functions that atomically write PV guest PT entries */
+void shadow_write_guest_entry(
+ struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn);
+intpte_t shadow_cmpxchg_guest_entry(
+ struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn);
+
#endif /* CONFIG_PV */
/* Remove all shadows of the guest mfn. */
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index e4dd50e01c..8699de0501 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4155,10 +4155,6 @@ const struct paging_mode sh_paging_mode = {
#endif
.update_cr3 = sh_update_cr3,
.guest_levels = GUEST_PAGING_LEVELS,
-#ifdef CONFIG_PV
- .shadow.write_guest_entry = sh_write_guest_entry,
- .shadow.cmpxchg_guest_entry = sh_cmpxchg_guest_entry,
-#endif
#ifdef CONFIG_HVM
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
.shadow.guess_wrmap = sh_guess_wrmap,
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 1c3d15fa2e..59443c3171 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -428,12 +428,6 @@ static inline int sh_remove_write_access(struct domain *d,
mfn_t readonly_mfn,
}
#endif
-/* Functions that atomically write PV guest PT entries */
-void cf_check sh_write_guest_entry(
- struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn);
-intpte_t cf_check sh_cmpxchg_guest_entry(
- struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn);
-
/* Unhook the non-Xen mappings in this top-level shadow mfn.
* With user_only == 1, unhooks only the user-mode mappings. */
void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only);
diff --git a/xen/arch/x86/mm/shadow/pv.c b/xen/arch/x86/mm/shadow/pv.c
index ed10d5479c..0ef8e53d12 100644
--- a/xen/arch/x86/mm/shadow/pv.c
+++ b/xen/arch/x86/mm/shadow/pv.c
@@ -28,8 +28,8 @@
* Write a new value into the guest pagetable, and update the shadows
* appropriately.
*/
-void cf_check
-sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
+void
+shadow_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
{
paging_lock(v->domain);
write_atomic(p, new);
@@ -38,13 +38,13 @@ sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t
new, mfn_t gmfn)
}
/*
- * Cmpxchg a new value into the guest pagetable, and update the shadows
+ * Compare and exchange a guest pagetable entry, and update the shadows
* appropriately. Returns the previous entry found, which the caller is
* expected to check to see if the cmpxchg was successful.
*/
-intpte_t cf_check
-sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t old,
- intpte_t new, mfn_t gmfn)
+intpte_t
+shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t old,
+ intpte_t new, mfn_t gmfn)
{
intpte_t t;
diff --git a/xen/arch/x86/pv/mm.h b/xen/arch/x86/pv/mm.h
index 5a117c735d..4564cab9fc 100644
--- a/xen/arch/x86/pv/mm.h
+++ b/xen/arch/x86/pv/mm.h
@@ -1,6 +1,8 @@
#ifndef __PV_MM_H__
#define __PV_MM_H__
+#include <asm/shadow.h>
+
l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn);
int new_guest_cr3(mfn_t mfn);
@@ -31,6 +33,25 @@ static inline l1_pgentry_t guest_get_eff_kern_l1e(unsigned
long linear)
return l1e;
}
+static inline void paging_write_guest_entry(
+ struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
+{
+ if ( unlikely(paging_mode_shadow(v->domain)) )
+ shadow_write_guest_entry(v, p, new, gmfn);
+ else
+ write_atomic(p, new);
+}
+
+
+/* Compare and exchange a guest pagetable entry. Returns the old value. */
+static inline intpte_t paging_cmpxchg_guest_entry(
+ struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn)
+{
+ if ( unlikely(paging_mode_shadow(v->domain)) )
+ return shadow_cmpxchg_guest_entry(v, p, old, new, gmfn);
+ return cmpxchg(p, old, new);
+}
+
/*
* PTE updates can be done with ordinary writes except:
* 1. Debug builds get extra checking by using CMPXCHG[8B].
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |