[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2 of 2] mem_event: use C99 initializers for mem_event_request_t users


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Mon, 20 Feb 2012 22:18:02 +0100
  • Delivery-date: Mon, 20 Feb 2012 21:18:36 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1329772592 -3600
# Node ID fcb66fe4134321fa089132ee0e9b24e21600404c
# Parent  e1a866546aef8ec1395858c4d2c8f28ff0d0502f
mem_event: use C99 initializers for mem_event_request_t users

Use C99 initializers for mem_event_request_t users to make sure req is
always cleared, even with local debug patches that shuffle code around
to have a single exit point.
The common case is to use and send req, so it does not add significant
overhead to always clear req.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r e1a866546aef -r fcb66fe41343 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4302,7 +4302,7 @@ static int hvm_memory_event_traps(long p
 {
     struct vcpu* v = current;
     struct domain *d = v->domain;
-    mem_event_request_t req;
+    mem_event_request_t req = { .reason = reason };
     int rc;
 
     if ( !(p & HVMPME_MODE_MASK) ) 
@@ -4321,9 +4321,6 @@ static int hvm_memory_event_traps(long p
     else if ( rc < 0 )
         return rc;
 
-    memset(&req, 0, sizeof(req));
-    req.reason = reason;
-
     if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync ) 
     {
         req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;    
diff -r e1a866546aef -r fcb66fe41343 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -913,7 +913,7 @@ int p2m_mem_paging_evict(struct domain *
 void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
                                 p2m_type_t p2mt)
 {
-    mem_event_request_t req;
+    mem_event_request_t req = { .gfn = gfn };
 
     /* We allow no ring in this unique case, because it won't affect
      * correctness of the guest execution at this point.  If this is the only
@@ -924,8 +924,6 @@ void p2m_mem_paging_drop_page(struct dom
         return;
 
     /* Send release notification to pager */
-    memset(&req, 0, sizeof(req));
-    req.gfn = gfn;
     req.flags = MEM_EVENT_FLAG_DROP_PAGE;
 
     /* Update stats unless the page hasn't yet been evicted */
@@ -962,7 +960,7 @@ void p2m_mem_paging_drop_page(struct dom
 void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
 {
     struct vcpu *v = current;
-    mem_event_request_t req;
+    mem_event_request_t req = { .gfn = gfn };
     p2m_type_t p2mt;
     p2m_access_t a;
     mfn_t mfn;
@@ -980,8 +978,6 @@ void p2m_mem_paging_populate(struct doma
     else if ( rc < 0 )
         return;
 
-    memset(&req, 0, sizeof(req));
-
     /* Fix p2m mapping */
     gfn_lock(p2m, gfn, 0);
     mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
@@ -1011,7 +1007,6 @@ void p2m_mem_paging_populate(struct doma
     }
 
     /* Send request to pager */
-    req.gfn = gfn;
     req.p2mt = p2mt;
     req.vcpu_id = v->vcpu_id;
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.