|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 15/20] x86/shadow: Alter shadow_unhook{_???}_mappings() to take a domain
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
---
xen/arch/x86/mm/shadow/common.c | 12 ++++++------
xen/arch/x86/mm/shadow/multi.c | 13 +++++--------
xen/arch/x86/mm/shadow/multi.h | 6 +++---
xen/arch/x86/mm/shadow/private.h | 2 +-
4 files changed, 15 insertions(+), 18 deletions(-)
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 3810b75..4a9b94b 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1244,20 +1244,20 @@ static unsigned int shadow_min_acceptable_pages(struct
domain *d)
/* Dispatcher function: call the per-mode function that will unhook the
* non-Xen mappings in this top-level shadow mfn. With user_only == 1,
* unhooks only the user-mode mappings. */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn, int user_only)
+void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only)
{
struct page_info *sp = mfn_to_page(smfn);
switch ( sp->u.sh.type )
{
case SH_type_l2_32_shadow:
- SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(v, smfn, user_only);
+ SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(d, smfn, user_only);
break;
case SH_type_l2_pae_shadow:
case SH_type_l2h_pae_shadow:
- SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, 3)(v, smfn, user_only);
+ SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, 3)(d, smfn, user_only);
break;
case SH_type_l4_64_shadow:
- SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(v, smfn, user_only);
+ SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(d, smfn, user_only);
break;
default:
SHADOW_ERROR("top-level shadow has bad type %08x\n", sp->u.sh.type);
@@ -1322,7 +1322,7 @@ static void _shadow_prealloc(
if ( !pagetable_is_null(v2->arch.shadow_table[i]) )
{
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PREALLOC_UNHOOK);
- shadow_unhook_mappings(v,
+ shadow_unhook_mappings(d,
pagetable_get_mfn(v2->arch.shadow_table[i]), 0);
/* See if that freed up enough space */
@@ -1377,7 +1377,7 @@ static void shadow_blow_tables(struct domain *d)
for_each_vcpu(d, v)
for ( i = 0 ; i < 4 ; i++ )
if ( !pagetable_is_null(v->arch.shadow_table[i]) )
- shadow_unhook_mappings(v,
+ shadow_unhook_mappings(d,
pagetable_get_mfn(v->arch.shadow_table[i]), 0);
/* Make sure everyone sees the unshadowings */
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index ab6ebe2..79d8888 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2074,9 +2074,8 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
#if GUEST_PAGING_LEVELS == 2
-void sh_unhook_32b_mappings(struct vcpu *v, mfn_t sl2mfn, int user_only)
+void sh_unhook_32b_mappings(struct domain *d, mfn_t sl2mfn, int user_only)
{
- struct domain *d = v->domain;
shadow_l2e_t *sl2e;
SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
if ( !user_only || (sl2e->l2 & _PAGE_USER) )
@@ -2086,10 +2085,9 @@ void sh_unhook_32b_mappings(struct vcpu *v, mfn_t
sl2mfn, int user_only)
#elif GUEST_PAGING_LEVELS == 3
-void sh_unhook_pae_mappings(struct vcpu *v, mfn_t sl2mfn, int user_only)
+void sh_unhook_pae_mappings(struct domain *d, mfn_t sl2mfn, int user_only)
/* Walk a PAE l2 shadow, unhooking entries from all the subshadows */
{
- struct domain *d = v->domain;
shadow_l2e_t *sl2e;
SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
if ( !user_only || (sl2e->l2 & _PAGE_USER) )
@@ -2099,9 +2097,8 @@ void sh_unhook_pae_mappings(struct vcpu *v, mfn_t sl2mfn,
int user_only)
#elif GUEST_PAGING_LEVELS == 4
-void sh_unhook_64b_mappings(struct vcpu *v, mfn_t sl4mfn, int user_only)
+void sh_unhook_64b_mappings(struct domain *d, mfn_t sl4mfn, int user_only)
{
- struct domain *d = v->domain;
shadow_l4e_t *sl4e;
SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, d, {
if ( !user_only || (sl4e->l4 & _PAGE_USER) )
@@ -4506,7 +4503,7 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t
gpa)
{
gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
- shadow_unhook_mappings(v, smfn, 1/* user pages only */);
+ shadow_unhook_mappings(d, smfn, 1/* user pages only */);
flush = 1;
}
}
@@ -4545,7 +4542,7 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t
gpa)
if ( mfn_valid(smfn) )
{
mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
- shadow_unhook_mappings(v, smfn, 1/* user pages only */);
+ shadow_unhook_mappings(d, smfn, 1/* user pages only */);
/* Now flush the TLB: we removed toplevel mappings. */
flush_tlb_mask(d->domain_dirty_cpumask);
}
diff --git a/xen/arch/x86/mm/shadow/multi.h b/xen/arch/x86/mm/shadow/multi.h
index 8bb8ece..07dffac 100644
--- a/xen/arch/x86/mm/shadow/multi.h
+++ b/xen/arch/x86/mm/shadow/multi.h
@@ -52,13 +52,13 @@ SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, GUEST_LEVELS)(
extern void
SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, GUEST_LEVELS)
- (struct vcpu *v, mfn_t sl2mfn, int user_only);
+ (struct domain *d, mfn_t sl2mfn, int user_only);
extern void
SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, GUEST_LEVELS)
- (struct vcpu *v, mfn_t sl3mfn, int user_only);
+ (struct domain *d, mfn_t sl3mfn, int user_only);
extern void
SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, GUEST_LEVELS)
- (struct vcpu *v, mfn_t sl4mfn, int user_only);
+ (struct domain *d, mfn_t sl4mfn, int user_only);
extern int
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, GUEST_LEVELS)
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index cddfde6..14f5d45 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -394,7 +394,7 @@ void shadow_update_paging_modes(struct vcpu *v);
/* Unhook the non-Xen mappings in this top-level shadow mfn.
* With user_only == 1, unhooks only the user-mode mappings. */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn, int user_only);
+void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Allow a shadowed page to go out of sync */
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |