|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] mem_event: move mem_event_domain out of struct domain
# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1322665733 28800
# Node ID 2cbc53a24683ea7f3b22e5d4f137cd8f1cc615a3
# Parent 31f751ef3e009f20b1515416ebffa6e19221a56b
mem_event: move mem_event_domain out of struct domain
An upcoming change may increase the size of mem_event_domain. The result
is a build failure because struct domain gets larger than a page.
Allocate the room for the three mem_event_domain members at runtime.
v2:
- remove mem_ prefix from members of new struct
Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---
diff -r 31f751ef3e00 -r 2cbc53a24683 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Nov 30 07:06:24 2011 -0800
+++ b/xen/arch/x86/hvm/hvm.c Wed Nov 30 07:08:53 2011 -0800
@@ -4106,7 +4106,7 @@
if ( (p & HVMPME_onchangeonly) && (value == old) )
return 1;
- rc = mem_event_check_ring(d, &d->mem_access);
+ rc = mem_event_check_ring(d, &d->mem_event->access);
if ( rc )
return rc;
@@ -4129,7 +4129,7 @@
req.gla_valid = 1;
}
- mem_event_put_request(d, &d->mem_access, &req);
+ mem_event_put_request(d, &d->mem_event->access, &req);
return 1;
}
diff -r 31f751ef3e00 -r 2cbc53a24683 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c Wed Nov 30 07:06:24 2011 -0800
+++ b/xen/arch/x86/mm/mem_event.c Wed Nov 30 07:08:53 2011 -0800
@@ -265,7 +265,7 @@
{
case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
{
- struct mem_event_domain *med = &d->mem_paging;
+ struct mem_event_domain *med = &d->mem_event->paging;
rc = -EINVAL;
switch( mec->op )
@@ -310,7 +310,7 @@
case XEN_DOMCTL_MEM_EVENT_OP_ACCESS:
{
- struct mem_event_domain *med = &d->mem_access;
+ struct mem_event_domain *med = &d->mem_event->access;
rc = -EINVAL;
switch( mec->op )
@@ -333,7 +333,7 @@
case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
{
if ( med->ring_page )
- rc = mem_event_disable(&d->mem_access);
+ rc = mem_event_disable(med);
}
break;
diff -r 31f751ef3e00 -r 2cbc53a24683 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c Wed Nov 30 07:06:24 2011 -0800
+++ b/xen/arch/x86/mm/mem_sharing.c Wed Nov 30 07:08:53 2011 -0800
@@ -281,12 +281,12 @@
vcpu_pause_nosync(v);
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
- if(mem_event_check_ring(d, &d->mem_share)) return page;
+ if(mem_event_check_ring(d, &d->mem_event->share)) return page;
req.gfn = gfn;
req.p2mt = p2m_ram_shared;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_share, &req);
+ mem_event_put_request(d, &d->mem_event->share, &req);
return page;
}
@@ -301,7 +301,7 @@
mem_event_response_t rsp;
/* Get request off the ring */
- mem_event_get_response(&d->mem_share, &rsp);
+ mem_event_get_response(&d->mem_event->share, &rsp);
/* Unpause domain/vcpu */
if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff -r 31f751ef3e00 -r 2cbc53a24683 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Wed Nov 30 07:06:24 2011 -0800
+++ b/xen/arch/x86/mm/p2m.c Wed Nov 30 07:08:53 2011 -0800
@@ -886,7 +886,7 @@
mem_event_request_t req;
/* Check that there's space on the ring for this request */
- if ( mem_event_check_ring(d, &d->mem_paging) == 0)
+ if ( mem_event_check_ring(d, &d->mem_event->paging) == 0)
{
/* Send release notification to pager */
memset(&req, 0, sizeof(req));
@@ -894,7 +894,7 @@
req.gfn = gfn;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_paging, &req);
+ mem_event_put_request(d, &d->mem_event->paging, &req);
}
}
@@ -929,7 +929,7 @@
struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* Check that there's space on the ring for this request */
- if ( mem_event_check_ring(d, &d->mem_paging) )
+ if ( mem_event_check_ring(d, &d->mem_event->paging) )
return;
memset(&req, 0, sizeof(req));
@@ -960,7 +960,7 @@
else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
{
/* gfn is already on its way back and vcpu is not paused */
- mem_event_put_req_producers(&d->mem_paging);
+ mem_event_put_req_producers(&d->mem_event->paging);
return;
}
@@ -969,7 +969,7 @@
req.p2mt = p2mt;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_paging, &req);
+ mem_event_put_request(d, &d->mem_event->paging, &req);
}
/**
@@ -1049,7 +1049,7 @@
mfn_t mfn;
/* Pull the response off the ring */
- mem_event_get_response(&d->mem_paging, &rsp);
+ mem_event_get_response(&d->mem_event->paging, &rsp);
/* Fix p2m entry if the page was not dropped */
if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
@@ -1104,7 +1104,7 @@
p2m_unlock(p2m);
/* Otherwise, check if there is a memory event listener, and send the
message along */
- res = mem_event_check_ring(d, &d->mem_access);
+ res = mem_event_check_ring(d, &d->mem_event->access);
if ( res < 0 )
{
/* No listener */
@@ -1148,7 +1148,7 @@
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_access, &req);
+ mem_event_put_request(d, &d->mem_event->access, &req);
/* VCPU paused, mem event request sent */
}
@@ -1158,7 +1158,7 @@
struct domain *d = p2m->domain;
mem_event_response_t rsp;
- mem_event_get_response(&d->mem_access, &rsp);
+ mem_event_get_response(&d->mem_event->access, &rsp);
/* Unpause domain */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff -r 31f751ef3e00 -r 2cbc53a24683 xen/common/domain.c
--- a/xen/common/domain.c Wed Nov 30 07:06:24 2011 -0800
+++ b/xen/common/domain.c Wed Nov 30 07:08:53 2011 -0800
@@ -304,6 +304,10 @@
init_status |= INIT_gnttab;
poolid = 0;
+
+ d->mem_event = xzalloc(struct mem_event_per_domain);
+ if ( !d->mem_event )
+ goto fail;
}
if ( arch_domain_create(d, domcr_flags) != 0 )
@@ -335,6 +339,7 @@
fail:
d->is_dying = DOMDYING_dead;
atomic_set(&d->refcnt, DOMAIN_DESTROYED);
+ xfree(d->mem_event);
if ( init_status & INIT_arch )
arch_domain_destroy(d);
if ( init_status & INIT_gnttab )
diff -r 31f751ef3e00 -r 2cbc53a24683 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Wed Nov 30 07:06:24 2011 -0800
+++ b/xen/include/xen/sched.h Wed Nov 30 07:08:53 2011 -0800
@@ -194,6 +194,16 @@
int xen_port;
};
+struct mem_event_per_domain
+{
+ /* Memory sharing support */
+ struct mem_event_domain share;
+ /* Memory paging support */
+ struct mem_event_domain paging;
+ /* Memory access support */
+ struct mem_event_domain access;
+};
+
struct domain
{
domid_t domain_id;
@@ -318,12 +328,8 @@
/* Non-migratable and non-restoreable? */
bool_t disable_migrate;
- /* Memory sharing support */
- struct mem_event_domain mem_share;
- /* Memory paging support */
- struct mem_event_domain mem_paging;
- /* Memory access support */
- struct mem_event_domain mem_access;
+ /* Various mem_events */
+ struct mem_event_per_domain *mem_event;
/* Currently computed from union of all vcpu cpu-affinity masks. */
nodemask_t node_affinity;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |