[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1 of 3] mem_event: move mem_event_domain out of struct domain



>>> On 22.11.11 at 22:13, Olaf Hering <olaf@xxxxxxxxx> wrote:
> # HG changeset patch
> # User Olaf Hering <olaf@xxxxxxxxx>
> # Date 1321996135 -3600
> # Node ID d347a8a36d2e7951f98a3d22866dce004484d95f
> # Parent  d3859e348951cde6b211c5afb610ac1f12a909ec
> mem_event: move mem_event_domain out of struct domain
> 
> An upcoming change increases the size of mem_event_domain. The result is a
> build failure because struct domain gets larger than a page. Allocate the 
> room
> for the three mem_event_domain members at runtime.

This looks like a good general cleanup thing to me. One comment
below.

> Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
> 
> diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -4098,7 +4098,7 @@ static int hvm_memory_event_traps(long p
>      if ( (p & HVMPME_onchangeonly) && (value == old) )
>          return 1;
>      
> -    rc = mem_event_check_ring(d, &d->mem_access);
> +    rc = mem_event_check_ring(d, &d->mem_event->mem_access);
>      if ( rc )
>          return rc;
>      
> @@ -4121,7 +4121,7 @@ static int hvm_memory_event_traps(long p
>          req.gla_valid = 1;
>      }
>      
> -    mem_event_put_request(d, &d->mem_access, &req);
> +    mem_event_put_request(d, &d->mem_event->mem_access, &req);
>      
>      return 1;
>  }
> diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/mm/mem_event.c
> --- a/xen/arch/x86/mm/mem_event.c
> +++ b/xen/arch/x86/mm/mem_event.c
> @@ -265,7 +265,7 @@ int mem_event_domctl(struct domain *d, x
>      {
>      case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
>      {
> -        struct mem_event_domain *med = &d->mem_paging;
> +        struct mem_event_domain *med = &d->mem_event->mem_paging;
>          rc = -EINVAL;
>  
>          switch( mec->op )
> @@ -310,7 +310,7 @@ int mem_event_domctl(struct domain *d, x
>  
>      case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: 
>      {
> -        struct mem_event_domain *med = &d->mem_access;
> +        struct mem_event_domain *med = &d->mem_event->mem_access;
>          rc = -EINVAL;
>  
>          switch( mec->op )
> @@ -333,7 +333,7 @@ int mem_event_domctl(struct domain *d, x
>          case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
>          {
>              if ( med->ring_page )
> -                rc = mem_event_disable(&d->mem_access);
> +                rc = mem_event_disable(med);
>          }
>          break;
>  
> diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/mm/mem_sharing.c
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -281,12 +281,12 @@ static struct page_info* mem_sharing_all
>      vcpu_pause_nosync(v);
>      req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
>  
> -    if(mem_event_check_ring(d, &d->mem_share)) return page;
> +    if(mem_event_check_ring(d, &d->mem_event->mem_share)) return page;
>  
>      req.gfn = gfn;
>      req.p2mt = p2m_ram_shared;
>      req.vcpu_id = v->vcpu_id;
> -    mem_event_put_request(d, &d->mem_share, &req);
> +    mem_event_put_request(d, &d->mem_event->mem_share, &req);
>  
>      return page;
>  }
> @@ -301,7 +301,7 @@ int mem_sharing_sharing_resume(struct do
>      mem_event_response_t rsp;
>  
>      /* Get request off the ring */
> -    mem_event_get_response(&d->mem_share, &rsp);
> +    mem_event_get_response(&d->mem_event->mem_share, &rsp);
>  
>      /* Unpause domain/vcpu */
>      if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
> diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/mm/p2m.c
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -885,7 +885,7 @@ void p2m_mem_paging_drop_page(struct dom
>      mem_event_request_t req;
>  
>      /* Check that there's space on the ring for this request */
> -    if ( mem_event_check_ring(d, &d->mem_paging) == 0)
> +    if ( mem_event_check_ring(d, &d->mem_event->mem_paging) == 0)
>      {
>          /* Send release notification to pager */
>          memset(&req, 0, sizeof(req));
> @@ -893,7 +893,7 @@ void p2m_mem_paging_drop_page(struct dom
>          req.gfn = gfn;
>          req.vcpu_id = v->vcpu_id;
>  
> -        mem_event_put_request(d, &d->mem_paging, &req);
> +        mem_event_put_request(d, &d->mem_event->mem_paging, &req);
>      }
>  }
>  
> @@ -928,7 +928,7 @@ void p2m_mem_paging_populate(struct doma
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
>  
>      /* Check that there's space on the ring for this request */
> -    if ( mem_event_check_ring(d, &d->mem_paging) )
> +    if ( mem_event_check_ring(d, &d->mem_event->mem_paging) )
>          return;
>  
>      memset(&req, 0, sizeof(req));
> @@ -959,7 +959,7 @@ void p2m_mem_paging_populate(struct doma
>      else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
>      {
>          /* gfn is already on its way back and vcpu is not paused */
> -        mem_event_put_req_producers(&d->mem_paging);
> +        mem_event_put_req_producers(&d->mem_event->mem_paging);
>          return;
>      }
>  
> @@ -968,7 +968,7 @@ void p2m_mem_paging_populate(struct doma
>      req.p2mt = p2mt;
>      req.vcpu_id = v->vcpu_id;
>  
> -    mem_event_put_request(d, &d->mem_paging, &req);
> +    mem_event_put_request(d, &d->mem_event->mem_paging, &req);
>  }
>  
>  /**
> @@ -1048,7 +1048,7 @@ void p2m_mem_paging_resume(struct domain
>      mfn_t mfn;
>  
>      /* Pull the response off the ring */
> -    mem_event_get_response(&d->mem_paging, &rsp);
> +    mem_event_get_response(&d->mem_event->mem_paging, &rsp);
>  
>      /* Fix p2m entry if the page was not dropped */
>      if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
> @@ -1101,7 +1101,7 @@ void p2m_mem_access_check(unsigned long 
>      p2m_unlock(p2m);
>  
>      /* Otherwise, check if there is a memory event listener, and send the 
> message along */
> -    res = mem_event_check_ring(d, &d->mem_access);
> +    res = mem_event_check_ring(d, &d->mem_event->mem_access);
>      if ( res < 0 ) 
>      {
>          /* No listener */
> @@ -1145,7 +1145,7 @@ void p2m_mem_access_check(unsigned long 
>      
>      req.vcpu_id = v->vcpu_id;
>  
> -    mem_event_put_request(d, &d->mem_access, &req);
> +    mem_event_put_request(d, &d->mem_event->mem_access, &req);
>  
>      /* VCPU paused, mem event request sent */
>  }
> @@ -1155,7 +1155,7 @@ void p2m_mem_access_resume(struct p2m_do
>      struct domain *d = p2m->domain;
>      mem_event_response_t rsp;
>  
> -    mem_event_get_response(&d->mem_access, &rsp);
> +    mem_event_get_response(&d->mem_event->mem_access, &rsp);
>  
>      /* Unpause domain */
>      if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
> diff -r d3859e348951 -r d347a8a36d2e xen/common/domain.c
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -304,6 +304,10 @@ struct domain *domain_create(
>          init_status |= INIT_gnttab;
>  
>          poolid = 0;
> +
> +        d->mem_event = xzalloc(struct mem_event_per_domain);
> +        if ( !d->mem_event )
> +            goto fail;
>      }
>  
>      if ( arch_domain_create(d, domcr_flags) != 0 )
> @@ -335,6 +339,7 @@ struct domain *domain_create(
>   fail:
>      d->is_dying = DOMDYING_dead;
>      atomic_set(&d->refcnt, DOMAIN_DESTROYED);
> +    xfree(d->mem_event);
>      if ( init_status & INIT_arch )
>          arch_domain_destroy(d);
>      if ( init_status & INIT_gnttab )
> diff -r d3859e348951 -r d347a8a36d2e xen/include/xen/sched.h
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -194,6 +194,16 @@ struct mem_event_domain
>      int xen_port;
>  };
>  
> +struct mem_event_per_domain
> +{
> +    /* Memory sharing support */
> +    struct mem_event_domain mem_share;
> +    /* Memory paging support */
> +    struct mem_event_domain mem_paging;
> +    /* Memory access support */
> +    struct mem_event_domain mem_access;

Could we drop the mem_ prefixes here? Reduces typing as well as line
wrapping pressure.

Jan

> +};
> +
>  struct domain
>  {
>      domid_t          domain_id;
> @@ -318,12 +328,8 @@ struct domain
>      /* Non-migratable and non-restoreable? */
>      bool_t disable_migrate;
>  
> -    /* Memory sharing support */
> -    struct mem_event_domain mem_share;
> -    /* Memory paging support */
> -    struct mem_event_domain mem_paging;
> -    /* Memory access support */
> -    struct mem_event_domain mem_access;
> +    /* Various mem_events */
> +    struct mem_event_per_domain *mem_event;
>  
>      /* Currently computed from union of all vcpu cpu-affinity masks. */
>      nodemask_t node_affinity;
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx 
> http://lists.xensource.com/xen-devel 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.