[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 07/16] xen/x86: p2m-pod: Use typesafe gfn in p2m_pod_decrease_reservation



On 09/21/2017 01:40 PM, Julien Grall wrote:
> Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>

> 
> ---
> 
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> 
>     Changes in v2:
>         - Add Andrew's acked-by
> ---
>  xen/arch/arm/p2m.c           |  3 +--
>  xen/arch/x86/mm/p2m-pod.c    | 20 +++++++++-----------
>  xen/common/memory.c          |  3 ++-
>  xen/include/asm-arm/p2m.h    | 13 -------------
>  xen/include/asm-x86/p2m.h    |  7 -------
>  xen/include/xen/p2m-common.h | 13 +++++++++++++
>  6 files changed, 25 insertions(+), 34 deletions(-)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 192a1c329d..0410b1e86b 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -393,8 +393,7 @@ int guest_physmap_mark_populate_on_demand(struct domain 
> *d,
>      return -ENOSYS;
>  }
>  
> -int p2m_pod_decrease_reservation(struct domain *d,
> -                                 xen_pfn_t gpfn,
> +int p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
>                                   unsigned int order)
>  {
>      return -ENOSYS;
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index 34f5239b6d..eb74e5c01f 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -511,9 +511,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, 
> unsigned long gfn);
>   * allow decrease_reservation() to handle everything else.
>   */
>  int
> -p2m_pod_decrease_reservation(struct domain *d,
> -                             xen_pfn_t gpfn,
> -                             unsigned int order)
> +p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
>  {
>      int ret = 0;
>      unsigned long i, n;
> @@ -521,7 +519,7 @@ p2m_pod_decrease_reservation(struct domain *d,
>      bool_t steal_for_cache;
>      long pod, nonpod, ram;
>  
> -    gfn_lock(p2m, gpfn, order);
> +    gfn_lock(p2m, gfn, order);
>      pod_lock(p2m);
>  
>      /*
> @@ -545,7 +543,7 @@ p2m_pod_decrease_reservation(struct domain *d,
>          p2m_type_t t;
>          unsigned int cur_order;
>  
> -        p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
> +        p2m->get_entry(p2m, gfn_x(gfn) + i, &t, &a, 0, &cur_order, NULL);
>          n = 1UL << min(order, cur_order);
>          if ( t == p2m_populate_on_demand )
>              pod += n;
> @@ -567,7 +565,7 @@ p2m_pod_decrease_reservation(struct domain *d,
>           * All PoD: Mark the whole region invalid and tell caller
>           * we're done.
>           */
> -        p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
> +        p2m_set_entry(p2m, gfn_x(gfn), INVALID_MFN, order, p2m_invalid,
>                        p2m->default_access);
>          p2m->pod.entry_count -= 1UL << order;
>          BUG_ON(p2m->pod.entry_count < 0);
> @@ -584,7 +582,7 @@ p2m_pod_decrease_reservation(struct domain *d,
>       * - not all of the pages were RAM (now knowing order < SUPERPAGE_ORDER)
>       */
>      if ( steal_for_cache && order < SUPERPAGE_ORDER && ram == (1UL << order) 
> &&
> -         p2m_pod_zero_check_superpage(p2m, gpfn & ~(SUPERPAGE_PAGES - 1)) )
> +         p2m_pod_zero_check_superpage(p2m, gfn_x(gfn) & ~(SUPERPAGE_PAGES - 
> 1)) )
>      {
>          pod = 1UL << order;
>          ram = nonpod = 0;
> @@ -605,13 +603,13 @@ p2m_pod_decrease_reservation(struct domain *d,
>          p2m_access_t a;
>          unsigned int cur_order;
>  
> -        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
> +        mfn = p2m->get_entry(p2m, gfn_x(gfn) + i, &t, &a, 0, &cur_order, 
> NULL);
>          if ( order < cur_order )
>              cur_order = order;
>          n = 1UL << cur_order;
>          if ( t == p2m_populate_on_demand )
>          {
> -            p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
> +            p2m_set_entry(p2m, gfn_x(gfn) + i, INVALID_MFN, cur_order,
>                            p2m_invalid, p2m->default_access);
>              p2m->pod.entry_count -= n;
>              BUG_ON(p2m->pod.entry_count < 0);
> @@ -633,7 +631,7 @@ p2m_pod_decrease_reservation(struct domain *d,
>  
>              page = mfn_to_page(mfn);
>  
> -            p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
> +            p2m_set_entry(p2m, gfn_x(gfn) + i, INVALID_MFN, cur_order,
>                            p2m_invalid, p2m->default_access);
>              p2m_tlb_flush_sync(p2m);
>              for ( j = 0; j < n; ++j )
> @@ -663,7 +661,7 @@ out_entry_check:
>  
>  out_unlock:
>      pod_unlock(p2m);
> -    gfn_unlock(p2m, gpfn, order);
> +    gfn_unlock(p2m, gfn, order);
>      return ret;
>  }
>  
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index a2abf554e3..ad987e0f29 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -417,7 +417,8 @@ static void decrease_reservation(struct memop_args *a)
>  
>          /* See if populate-on-demand wants to handle this */
>          if ( is_hvm_domain(a->domain)
> -             && p2m_pod_decrease_reservation(a->domain, gmfn, 
> a->extent_order) )
> +             && p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
> +                                             a->extent_order) )
>              continue;
>  
>          for ( j = 0; j < (1 << a->extent_order); j++ )
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index bc5bbf0db7..faadcfe8fe 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -266,19 +266,6 @@ static inline int guest_physmap_add_page(struct domain 
> *d,
>  
>  mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
>  
> -/*
> - * Populate-on-demand
> - */
> -
> -/*
> - * Call when decreasing memory reservation to handle PoD entries properly.
> - * Will return '1' if all entries were handled and nothing more need be done.
> - */
> -int
> -p2m_pod_decrease_reservation(struct domain *d,
> -                             xen_pfn_t gpfn,
> -                             unsigned int order);
> -
>  /* Look up a GFN and take a reference count on the backing page. */
>  typedef unsigned int p2m_query_t;
>  #define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 10cdfc09a9..8f3409b400 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -643,13 +643,6 @@ int p2m_pod_empty_cache(struct domain *d);
>   * domain matches target */
>  int p2m_pod_set_mem_target(struct domain *d, unsigned long target);
>  
> -/* Call when decreasing memory reservation to handle PoD entries properly.
> - * Will return '1' if all entries were handled and nothing more need be 
> done.*/
> -int
> -p2m_pod_decrease_reservation(struct domain *d,
> -                             xen_pfn_t gpfn,
> -                             unsigned int order);
> -
>  /* Scan pod cache when offline/broken page triggered */
>  int
>  p2m_pod_offline_or_broken_hit(struct page_info *p);
> diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
> index 2b5696cf33..27f89208f5 100644
> --- a/xen/include/xen/p2m-common.h
> +++ b/xen/include/xen/p2m-common.h
> @@ -20,4 +20,17 @@ int unmap_mmio_regions(struct domain *d,
>                         unsigned long nr,
>                         mfn_t mfn);
>  
> +/*
> + * Populate-on-Demand
> + */
> +
> +/*
> + * Call when decreasing memory reservation to handle PoD entries properly.
> + * Will return '1' if all entries were handled and nothing more need be done.
> + */
> +int
> +p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
> +                             unsigned int order);
> +
> +
>  #endif /* _XEN_P2M_COMMON_H */
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.