|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.6] x86/shadow: account for ioreq server pages before complaining about not found mapping
commit d686f012a29727acd923408839c408d19ac3af17
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon May 9 12:59:30 2016 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon May 9 12:59:30 2016 +0200
x86/shadow: account for ioreq server pages before complaining about not
found mapping
prepare_ring_for_helper(), just like share_xen_page_with_guest(),
takes a write reference on the page, and hence should similarly be
accounted for when determining whether to log a complaint.
This requires using recursive locking for the ioreq server lock, as the
offending invocation of sh_remove_all_mappings() is down the call stack
from hvm_set_ioreq_server_state(). (While not strictly needed to be
done in all other instances too, convert all of them for consistency.)
At once improve the usefulness of the shadow error message: Log all
values involved in triggering it as well as the GFN (to aid
understanding which guest page it is that there is a problem with - in
cases like the one here the GFN is invariant across invocations, while
the MFN obviously can [and will] vary).
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
master commit: 77eb5dbeff78bbe549793325520f59ab46a187f8
master date: 2016-05-02 09:20:17 +0200
---
xen/arch/x86/hvm/hvm.c | 68 ++++++++++++++++++++++++++++-------------
xen/arch/x86/mm/shadow/common.c | 29 +++++++++++-------
xen/include/asm-x86/hvm/hvm.h | 1 +
3 files changed, 65 insertions(+), 33 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 09290fa..a24f30f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -637,6 +637,30 @@ static int hvm_map_ioreq_page(
return 0;
}
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page)
+{
+ const struct hvm_ioreq_server *s;
+ bool_t found = 0;
+
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ list_for_each_entry ( s,
+ &d->arch.hvm_domain.ioreq_server.list,
+ list_entry )
+ {
+ if ( (s->ioreq.va && s->ioreq.page == page) ||
+ (s->bufioreq.va && s->bufioreq.page == page) )
+ {
+ found = 1;
+ break;
+ }
+ }
+
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ return found;
+}
+
static void hvm_remove_ioreq_gmfn(
struct domain *d, struct hvm_ioreq_page *iorp)
{
@@ -1118,7 +1142,7 @@ static int hvm_create_ioreq_server(struct domain *d,
domid_t domid,
goto fail1;
domain_pause(d);
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -EEXIST;
if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
@@ -1141,14 +1165,14 @@ static int hvm_create_ioreq_server(struct domain *d,
domid_t domid,
if ( id )
*id = s->id;
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
domain_unpause(d);
return 0;
fail3:
fail2:
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
domain_unpause(d);
xfree(s);
@@ -1161,7 +1185,7 @@ static int hvm_destroy_ioreq_server(struct domain *d,
ioservid_t id)
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each_entry ( s,
@@ -1190,7 +1214,7 @@ static int hvm_destroy_ioreq_server(struct domain *d,
ioservid_t id)
break;
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1203,7 +1227,7 @@ static int hvm_get_ioreq_server_info(struct domain *d,
ioservid_t id,
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each_entry ( s,
@@ -1228,7 +1252,7 @@ static int hvm_get_ioreq_server_info(struct domain *d,
ioservid_t id,
break;
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1239,7 +1263,7 @@ static int hvm_map_io_range_to_ioreq_server(struct domain
*d, ioservid_t id,
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each_entry ( s,
@@ -1279,7 +1303,7 @@ static int hvm_map_io_range_to_ioreq_server(struct domain
*d, ioservid_t id,
}
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1290,7 +1314,7 @@ static int hvm_unmap_io_range_from_ioreq_server(struct
domain *d, ioservid_t id,
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each_entry ( s,
@@ -1330,7 +1354,7 @@ static int hvm_unmap_io_range_from_ioreq_server(struct
domain *d, ioservid_t id,
}
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1341,7 +1365,7 @@ static int hvm_set_ioreq_server_state(struct domain *d,
ioservid_t id,
struct list_head *entry;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each ( entry,
@@ -1370,7 +1394,7 @@ static int hvm_set_ioreq_server_state(struct domain *d,
ioservid_t id,
break;
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1379,7 +1403,7 @@ static int hvm_all_ioreq_servers_add_vcpu(struct domain
*d, struct vcpu *v)
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
list_for_each_entry ( s,
&d->arch.hvm_domain.ioreq_server.list,
@@ -1392,7 +1416,7 @@ static int hvm_all_ioreq_servers_add_vcpu(struct domain
*d, struct vcpu *v)
goto fail;
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return 0;
@@ -1402,7 +1426,7 @@ static int hvm_all_ioreq_servers_add_vcpu(struct domain
*d, struct vcpu *v)
list_entry )
hvm_ioreq_server_remove_vcpu(s, v);
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1411,21 +1435,21 @@ static void hvm_all_ioreq_servers_remove_vcpu(struct
domain *d, struct vcpu *v)
{
struct hvm_ioreq_server *s;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
list_for_each_entry ( s,
&d->arch.hvm_domain.ioreq_server.list,
list_entry )
hvm_ioreq_server_remove_vcpu(s, v);
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
}
static void hvm_destroy_all_ioreq_servers(struct domain *d)
{
struct hvm_ioreq_server *s, *next;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
/* No need to domain_pause() as the domain is being torn down */
@@ -1448,7 +1472,7 @@ static void hvm_destroy_all_ioreq_servers(struct domain
*d)
xfree(s);
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
}
static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
@@ -1472,7 +1496,7 @@ static int hvm_set_dm_domain(struct domain *d, domid_t
domid)
struct hvm_ioreq_server *s;
int rc = 0;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
/*
* Lack of ioreq server is not a failure. HVM_PARAM_DM_DOMAIN will
@@ -1521,7 +1545,7 @@ static int hvm_set_dm_domain(struct domain *d, domid_t
domid)
domain_unpause(d);
done:
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 604ee8c..0eeffb4 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2439,7 +2439,7 @@ int sh_remove_write_access_from_sl1p(struct domain *d,
mfn_t gmfn,
/* Remove all mappings of a guest frame from the shadow tables.
* Returns non-zero if we need to flush TLBs. */
-static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn)
+static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn, gfn_t gfn)
{
struct page_info *page = mfn_to_page(gmfn);
@@ -2491,19 +2491,24 @@ static int sh_remove_all_mappings(struct domain *d,
mfn_t gmfn)
/* If that didn't catch the mapping, something is very wrong */
if ( !sh_check_page_has_no_refs(page) )
{
- /* Don't complain if we're in HVM and there are some extra mappings:
+ /*
+ * Don't complain if we're in HVM and there are some extra mappings:
* The qemu helper process has an untyped mapping of this dom's RAM
* and the HVM restore program takes another.
- * Also allow one typed refcount for xenheap pages, to match
- * share_xen_page_with_guest(). */
+ * Also allow one typed refcount for
+ * - Xen heap pages, to match share_xen_page_with_guest(),
+ * - ioreq server pages, to match prepare_ring_for_helper().
+ */
if ( !(shadow_mode_external(d)
&& (page->count_info & PGC_count_mask) <= 3
&& ((page->u.inuse.type_info & PGT_count_mask)
- == !!is_xen_heap_page(page))) )
+ == (is_xen_heap_page(page) ||
+ is_ioreq_server_page(d, page)))) )
{
- SHADOW_ERROR("can't find all mappings of mfn %lx: "
- "c=%08lx t=%08lx\n", mfn_x(gmfn),
- page->count_info, page->u.inuse.type_info);
+ SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
+ "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn_x(gfn),
+ page->count_info, page->u.inuse.type_info,
+ !!is_xen_heap_page(page), is_ioreq_server_page(d,
page));
}
}
@@ -3371,7 +3376,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d,
unsigned long gfn,
if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
{
sh_remove_all_shadows_and_parents(d, mfn);
- if ( sh_remove_all_mappings(d, mfn) )
+ if ( sh_remove_all_mappings(d, mfn, _gfn(gfn)) )
flush_tlb_mask(d->domain_dirty_cpumask);
}
}
@@ -3406,7 +3411,8 @@ static void sh_unshadow_for_p2m_change(struct domain *d,
unsigned long gfn,
{
/* This GFN->MFN mapping has gone away */
sh_remove_all_shadows_and_parents(d, omfn);
- if ( sh_remove_all_mappings(d, omfn) )
+ if ( sh_remove_all_mappings(d, omfn,
+ _gfn(gfn + (i << PAGE_SHIFT)))
)
cpumask_or(&flushmask, &flushmask,
d->domain_dirty_cpumask);
}
@@ -3622,7 +3628,8 @@ int shadow_track_dirty_vram(struct domain *d,
dirty = 1;
/* TODO: Heuristics for finding the single mapping of
* this gmfn */
- flush_tlb |= sh_remove_all_mappings(d, mfn);
+ flush_tlb |= sh_remove_all_mappings(d, mfn,
+ _gfn(begin_pfn +
i));
}
else
{
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index a2c366d..eba2c11 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -367,6 +367,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void hvm_migrate_timers(struct vcpu *v);
bool_t hvm_io_pending(struct vcpu *v);
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
void hvm_do_resume(struct vcpu *v);
void hvm_migrate_pirqs(struct vcpu *v);
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.6
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |