[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/2] x86/PoD: clean up types



On Tue, Aug 14, 2012 at 9:46 AM, Jan Beulich <JBeulich@xxxxxxxx> wrote:
> GMFN values must undoubtedly be "unsigned long". "count" and
> "entry_count", since they are signed types, should also be "long" as
> otherwise they can't fit all values that can fit into "d->tot_pages"
> (which currently is "uint32_t").
>
> Beyond that, the patch doesn't convert everything to "long" as in many
> places it is clear that "int" suffices. In places where "long" is being
> used partially already, the change is however being done.
>
> Furthermore, page order values have no use of being "long".
>
> Finally, in the course of updating a few printk messages anyway, some
> also get slightly shortened (to focus on the relevant information).
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Looks good, thanks.

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>


>
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -66,7 +66,7 @@ static inline void unlock_page_alloc(str
>  static int
>  p2m_pod_cache_add(struct p2m_domain *p2m,
>                    struct page_info *page,
> -                  unsigned long order)
> +                  unsigned int order)
>  {
>      int i;
>      struct page_info *p;
> @@ -80,7 +80,7 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>      /* Check to make sure this is a contiguous region */
>      if( mfn_x(mfn) & ((1 << order) - 1) )
>      {
> -        printk("%s: mfn %lx not aligned order %lu! (mask %lx)\n",
> +        printk("%s: mfn %lx not aligned order %u! (mask %lx)\n",
>                 __func__, mfn_x(mfn), order, ((1UL << order) - 1));
>          return -1;
>      }
> @@ -146,7 +146,7 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>   * down 2-meg pages into singleton pages automatically.  Returns null if
>   * a superpage is requested and no superpages are available. */
>  static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m,
> -                                            unsigned long order)
> +                                            unsigned int order)
>  {
>      struct page_info *p = NULL;
>      int i;
> @@ -234,7 +234,7 @@ p2m_pod_set_cache_target(struct p2m_doma
>                  goto retry;
>              }
>
> -            printk("%s: Unable to allocate domheap page for pod cache.  
> target %lu cachesize %d\n",
> +            printk("%s: Unable to allocate page for PoD cache (target=%lu 
> cache=%ld)\n",
>                     __func__, pod_target, p2m->pod.count);
>              ret = -ENOMEM;
>              goto out;
> @@ -337,10 +337,9 @@ out:
>  int
>  p2m_pod_set_mem_target(struct domain *d, unsigned long target)
>  {
> -    unsigned pod_target;
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
>      int ret = 0;
> -    unsigned long populated;
> +    unsigned long populated, pod_target;
>
>      pod_lock(p2m);
>
> @@ -633,7 +632,8 @@ out_unlock:
>  void p2m_pod_dump_data(struct domain *d)
>  {
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -    printk("    PoD entries=%d cachesize=%d\n",
> +
> +    printk("    PoD entries=%ld cachesize=%ld\n",
>             p2m->pod.entry_count, p2m->pod.count);
>  }
>
> @@ -1071,8 +1071,9 @@ p2m_pod_demand_populate(struct p2m_domai
>  out_of_memory:
>      pod_unlock(p2m);
>
> -    printk("%s: Out of populate-on-demand memory! tot_pages %" PRIu32 " 
> pod_entries %" PRIi32 "\n",
> -           __func__, d->tot_pages, p2m->pod.entry_count);
> +    printk("%s: Dom%d out of PoD memory! (tot=%"PRIu32" ents=%ld dom%d)\n",
> +           __func__, d->domain_id, d->tot_pages, p2m->pod.entry_count,
> +           current->domain->domain_id);
>      domain_crash(d);
>      return -1;
>  out_fail:
> @@ -1111,10 +1112,9 @@ guest_physmap_mark_populate_on_demand(st
>                                        unsigned int order)
>  {
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -    unsigned long i;
> +    unsigned long i, pod_count = 0;
>      p2m_type_t ot;
>      mfn_t omfn;
> -    int pod_count = 0;
>      int rc = 0;
>
>      BUG_ON(!paging_mode_translate(d));
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -965,8 +965,7 @@ static void p2m_change_type_global(struc
>  #if P2M_AUDIT
>  long p2m_pt_audit_p2m(struct p2m_domain *p2m)
>  {
> -    int entry_count = 0;
> -    unsigned long pmbad = 0;
> +    unsigned long entry_count = 0, pmbad = 0;
>      unsigned long mfn, gfn, m2pfn;
>      int test_linear;
>      struct domain *d = p2m->domain;
> @@ -1126,7 +1125,7 @@ long p2m_pt_audit_p2m(struct p2m_domain
>
>      if ( entry_count != p2m->pod.entry_count )
>      {
> -        printk("%s: refcounted entry count %d, audit count %d!\n",
> +        printk("%s: refcounted entry count %ld, audit count %lu!\n",
>                 __func__,
>                 p2m->pod.entry_count,
>                 entry_count);
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -282,10 +282,10 @@ struct p2m_domain {
>      struct {
>          struct page_list_head super,   /* List of superpages                
> */
>                           single;       /* Non-super lists                   
> */
> -        int              count,        /* # of pages in cache lists         
> */
> +        long             count,        /* # of pages in cache lists         
> */
>                           entry_count;  /* # of pages in p2m marked pod      
> */
> -        unsigned         reclaim_single; /* Last gpfn of a scan */
> -        unsigned         max_guest;    /* gpfn of max guest demand-populate 
> */
> +        unsigned long    reclaim_single; /* Last gpfn of a scan */
> +        unsigned long    max_guest;    /* gpfn of max guest demand-populate 
> */
>  #define POD_HISTORY_MAX 128
>          /* gpfn of last guest superpage demand-populated */
>          unsigned long    last_populated[POD_HISTORY_MAX];
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
>

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.