[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] mem_event: move mem_event_domain out of struct domain


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Thu, 24 Nov 2011 21:17:21 +0100
  • Delivery-date: Thu, 24 Nov 2011 20:18:22 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1322165728 -3600
# Node ID dfa1e22034ca627961c311b1b6bb231276b7e6e6
# Parent  1027e7d13d02143048c7d48d7960967c5b1657a8
mem_event: move mem_event_domain out of struct domain

An upcoming change may increase the size of mem_event_domain. The result
is a build failure because struct domain gets larger than a page.
Allocate the room for the three mem_event_domain members at runtime.

v2:
 - remove mem_ prefix from members of new struct

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r 1027e7d13d02 -r dfa1e22034ca xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4106,7 +4106,7 @@ static int hvm_memory_event_traps(long p
     if ( (p & HVMPME_onchangeonly) && (value == old) )
         return 1;
     
-    rc = mem_event_check_ring(d, &d->mem_access);
+    rc = mem_event_check_ring(d, &d->mem_event->access);
     if ( rc )
         return rc;
     
@@ -4129,7 +4129,7 @@ static int hvm_memory_event_traps(long p
         req.gla_valid = 1;
     }
     
-    mem_event_put_request(d, &d->mem_access, &req);
+    mem_event_put_request(d, &d->mem_event->access, &req);
     
     return 1;
 }
diff -r 1027e7d13d02 -r dfa1e22034ca xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -265,7 +265,7 @@ int mem_event_domctl(struct domain *d, x
     {
     case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
     {
-        struct mem_event_domain *med = &d->mem_paging;
+        struct mem_event_domain *med = &d->mem_event->paging;
         rc = -EINVAL;
 
         switch( mec->op )
@@ -310,7 +310,7 @@ int mem_event_domctl(struct domain *d, x
 
     case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: 
     {
-        struct mem_event_domain *med = &d->mem_access;
+        struct mem_event_domain *med = &d->mem_event->access;
         rc = -EINVAL;
 
         switch( mec->op )
@@ -333,7 +333,7 @@ int mem_event_domctl(struct domain *d, x
         case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
         {
             if ( med->ring_page )
-                rc = mem_event_disable(&d->mem_access);
+                rc = mem_event_disable(med);
         }
         break;
 
diff -r 1027e7d13d02 -r dfa1e22034ca xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -281,12 +281,12 @@ static struct page_info* mem_sharing_all
     vcpu_pause_nosync(v);
     req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
 
-    if(mem_event_check_ring(d, &d->mem_share)) return page;
+    if(mem_event_check_ring(d, &d->mem_event->share)) return page;
 
     req.gfn = gfn;
     req.p2mt = p2m_ram_shared;
     req.vcpu_id = v->vcpu_id;
-    mem_event_put_request(d, &d->mem_share, &req);
+    mem_event_put_request(d, &d->mem_event->share, &req);
 
     return page;
 }
@@ -301,7 +301,7 @@ int mem_sharing_sharing_resume(struct do
     mem_event_response_t rsp;
 
     /* Get request off the ring */
-    mem_event_get_response(&d->mem_share, &rsp);
+    mem_event_get_response(&d->mem_event->share, &rsp);
 
     /* Unpause domain/vcpu */
     if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff -r 1027e7d13d02 -r dfa1e22034ca xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -886,7 +886,7 @@ void p2m_mem_paging_drop_page(struct dom
     mem_event_request_t req;
 
     /* Check that there's space on the ring for this request */
-    if ( mem_event_check_ring(d, &d->mem_paging) == 0)
+    if ( mem_event_check_ring(d, &d->mem_event->paging) == 0)
     {
         /* Send release notification to pager */
         memset(&req, 0, sizeof(req));
@@ -894,7 +894,7 @@ void p2m_mem_paging_drop_page(struct dom
         req.gfn = gfn;
         req.vcpu_id = v->vcpu_id;
 
-        mem_event_put_request(d, &d->mem_paging, &req);
+        mem_event_put_request(d, &d->mem_event->paging, &req);
     }
 }
 
@@ -929,7 +929,7 @@ void p2m_mem_paging_populate(struct doma
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Check that there's space on the ring for this request */
-    if ( mem_event_check_ring(d, &d->mem_paging) )
+    if ( mem_event_check_ring(d, &d->mem_event->paging) )
         return;
 
     memset(&req, 0, sizeof(req));
@@ -960,7 +960,7 @@ void p2m_mem_paging_populate(struct doma
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
-        mem_event_put_req_producers(&d->mem_paging);
+        mem_event_put_req_producers(&d->mem_event->paging);
         return;
     }
 
@@ -969,7 +969,7 @@ void p2m_mem_paging_populate(struct doma
     req.p2mt = p2mt;
     req.vcpu_id = v->vcpu_id;
 
-    mem_event_put_request(d, &d->mem_paging, &req);
+    mem_event_put_request(d, &d->mem_event->paging, &req);
 }
 
 /**
@@ -1049,7 +1049,7 @@ void p2m_mem_paging_resume(struct domain
     mfn_t mfn;
 
     /* Pull the response off the ring */
-    mem_event_get_response(&d->mem_paging, &rsp);
+    mem_event_get_response(&d->mem_event->paging, &rsp);
 
     /* Fix p2m entry if the page was not dropped */
     if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
@@ -1104,7 +1104,7 @@ void p2m_mem_access_check(unsigned long 
     p2m_unlock(p2m);
 
     /* Otherwise, check if there is a memory event listener, and send the 
message along */
-    res = mem_event_check_ring(d, &d->mem_access);
+    res = mem_event_check_ring(d, &d->mem_event->access);
     if ( res < 0 ) 
     {
         /* No listener */
@@ -1148,7 +1148,7 @@ void p2m_mem_access_check(unsigned long 
     
     req.vcpu_id = v->vcpu_id;
 
-    mem_event_put_request(d, &d->mem_access, &req);
+    mem_event_put_request(d, &d->mem_event->access, &req);
 
     /* VCPU paused, mem event request sent */
 }
@@ -1158,7 +1158,7 @@ void p2m_mem_access_resume(struct p2m_do
     struct domain *d = p2m->domain;
     mem_event_response_t rsp;
 
-    mem_event_get_response(&d->mem_access, &rsp);
+    mem_event_get_response(&d->mem_event->access, &rsp);
 
     /* Unpause domain */
     if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff -r 1027e7d13d02 -r dfa1e22034ca xen/common/domain.c
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -304,6 +304,10 @@ struct domain *domain_create(
         init_status |= INIT_gnttab;
 
         poolid = 0;
+
+        d->mem_event = xzalloc(struct mem_event_per_domain);
+        if ( !d->mem_event )
+            goto fail;
     }
 
     if ( arch_domain_create(d, domcr_flags) != 0 )
@@ -335,6 +339,7 @@ struct domain *domain_create(
  fail:
     d->is_dying = DOMDYING_dead;
     atomic_set(&d->refcnt, DOMAIN_DESTROYED);
+    xfree(d->mem_event);
     if ( init_status & INIT_arch )
         arch_domain_destroy(d);
     if ( init_status & INIT_gnttab )
diff -r 1027e7d13d02 -r dfa1e22034ca xen/include/xen/sched.h
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -194,6 +194,16 @@ struct mem_event_domain
     int xen_port;
 };
 
+struct mem_event_per_domain
+{
+    /* Memory sharing support */
+    struct mem_event_domain share;
+    /* Memory paging support */
+    struct mem_event_domain paging;
+    /* Memory access support */
+    struct mem_event_domain access;
+};
+
 struct domain
 {
     domid_t          domain_id;
@@ -318,12 +328,8 @@ struct domain
     /* Non-migratable and non-restoreable? */
     bool_t disable_migrate;
 
-    /* Memory sharing support */
-    struct mem_event_domain mem_share;
-    /* Memory paging support */
-    struct mem_event_domain mem_paging;
-    /* Memory access support */
-    struct mem_event_domain mem_access;
+    /* Various mem_events */
+    struct mem_event_per_domain *mem_event;
 
     /* Currently computed from union of all vcpu cpu-affinity masks. */
     nodemask_t node_affinity;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.