[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCHv4 10/14] xen/gntdev: convert priv->lock to a mutex



On Mon, 26 Jan 2015, David Vrabel wrote:
> Unmapping may require sleeping and we unmap while holding priv->lock, so
> convert it to a mutex.
> 
> Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
> ---
>  drivers/xen/gntdev.c |   40 ++++++++++++++++++++--------------------
>  1 file changed, 20 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
> index a28807a..c1a03fa 100644
> --- a/drivers/xen/gntdev.c
> +++ b/drivers/xen/gntdev.c
> @@ -67,7 +67,7 @@ struct gntdev_priv {
>        * Only populated if populate_freeable_maps == 1 */
>       struct list_head freeable_maps;
>       /* lock protects maps and freeable_maps */
> -     spinlock_t lock;
> +     struct mutex lock;
>       struct mm_struct *mm;
>       struct mmu_notifier mn;
>  };
> @@ -221,9 +221,9 @@ static void gntdev_put_map(struct gntdev_priv *priv, 
> struct grant_map *map)
>       }
>  
>       if (populate_freeable_maps && priv) {
> -             spin_lock(&priv->lock);
> +             mutex_lock(&priv->lock);
>               list_del(&map->next);
> -             spin_unlock(&priv->lock);
> +             mutex_unlock(&priv->lock);
>       }
>  
>       if (map->pages && !use_ptemod)
> @@ -397,9 +397,9 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
>                * not do any unmapping, since that has been done prior to
>                * closing the vma, but it may still iterate the unmap_ops list.
>                */
> -             spin_lock(&priv->lock);
> +             mutex_lock(&priv->lock);
>               map->vma = NULL;
> -             spin_unlock(&priv->lock);
> +             mutex_unlock(&priv->lock);
>       }
>       vma->vm_private_data = NULL;
>       gntdev_put_map(priv, map);
> @@ -443,14 +443,14 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
>       struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
>       struct grant_map *map;
>  
> -     spin_lock(&priv->lock);
> +     mutex_lock(&priv->lock);
>       list_for_each_entry(map, &priv->maps, next) {
>               unmap_if_in_range(map, start, end);
>       }
>       list_for_each_entry(map, &priv->freeable_maps, next) {
>               unmap_if_in_range(map, start, end);
>       }
> -     spin_unlock(&priv->lock);
> +     mutex_unlock(&priv->lock);
>  }

I don't think that mmu_notifier callbacks are allowed to sleep:

https://lkml.org/lkml/2010/1/25/187


>  static void mn_invl_page(struct mmu_notifier *mn,
> @@ -467,7 +467,7 @@ static void mn_release(struct mmu_notifier *mn,
>       struct grant_map *map;
>       int err;
>  
> -     spin_lock(&priv->lock);
> +     mutex_lock(&priv->lock);
>       list_for_each_entry(map, &priv->maps, next) {
>               if (!map->vma)
>                       continue;
> @@ -486,7 +486,7 @@ static void mn_release(struct mmu_notifier *mn,
>               err = unmap_grant_pages(map, /* offset */ 0, map->count);
>               WARN_ON(err);
>       }
> -     spin_unlock(&priv->lock);
> +     mutex_unlock(&priv->lock);
>  }
>  
>  static struct mmu_notifier_ops gntdev_mmu_ops = {
> @@ -508,7 +508,7 @@ static int gntdev_open(struct inode *inode, struct file 
> *flip)
>  
>       INIT_LIST_HEAD(&priv->maps);
>       INIT_LIST_HEAD(&priv->freeable_maps);
> -     spin_lock_init(&priv->lock);
> +     mutex_init(&priv->lock);
>  
>       if (use_ptemod) {
>               priv->mm = get_task_mm(current);
> @@ -582,10 +582,10 @@ static long gntdev_ioctl_map_grant_ref(struct 
> gntdev_priv *priv,
>               return -EFAULT;
>       }
>  
> -     spin_lock(&priv->lock);
> +     mutex_lock(&priv->lock);
>       gntdev_add_map(priv, map);
>       op.index = map->index << PAGE_SHIFT;
> -     spin_unlock(&priv->lock);
> +     mutex_unlock(&priv->lock);
>  
>       if (copy_to_user(u, &op, sizeof(op)) != 0)
>               return -EFAULT;
> @@ -604,7 +604,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct 
> gntdev_priv *priv,
>               return -EFAULT;
>       pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
>  
> -     spin_lock(&priv->lock);
> +     mutex_lock(&priv->lock);
>       map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
>       if (map) {
>               list_del(&map->next);
> @@ -612,7 +612,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct 
> gntdev_priv *priv,
>                       list_add_tail(&map->next, &priv->freeable_maps);
>               err = 0;
>       }
> -     spin_unlock(&priv->lock);
> +     mutex_unlock(&priv->lock);
>       if (map)
>               gntdev_put_map(priv, map);
>       return err;
> @@ -680,7 +680,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, 
> void __user *u)
>       out_flags = op.action;
>       out_event = op.event_channel_port;
>  
> -     spin_lock(&priv->lock);
> +     mutex_lock(&priv->lock);
>  
>       list_for_each_entry(map, &priv->maps, next) {
>               uint64_t begin = map->index << PAGE_SHIFT;
> @@ -708,7 +708,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, 
> void __user *u)
>       rc = 0;
>  
>   unlock_out:
> -     spin_unlock(&priv->lock);
> +     mutex_unlock(&priv->lock);
>  
>       /* Drop the reference to the event channel we did not save in the map */
>       if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
> @@ -758,7 +758,7 @@ static int gntdev_mmap(struct file *flip, struct 
> vm_area_struct *vma)
>       pr_debug("map %d+%d at %lx (pgoff %lx)\n",
>                       index, count, vma->vm_start, vma->vm_pgoff);
>  
> -     spin_lock(&priv->lock);
> +     mutex_lock(&priv->lock);
>       map = gntdev_find_map_index(priv, index, count);
>       if (!map)
>               goto unlock_out;
> @@ -793,7 +793,7 @@ static int gntdev_mmap(struct file *flip, struct 
> vm_area_struct *vma)
>                       map->flags |= GNTMAP_readonly;
>       }
>  
> -     spin_unlock(&priv->lock);
> +     mutex_unlock(&priv->lock);
>  
>       if (use_ptemod) {
>               err = apply_to_page_range(vma->vm_mm, vma->vm_start,
> @@ -821,11 +821,11 @@ static int gntdev_mmap(struct file *flip, struct 
> vm_area_struct *vma)
>       return 0;
>  
>  unlock_out:
> -     spin_unlock(&priv->lock);
> +     mutex_unlock(&priv->lock);
>       return err;
>  
>  out_unlock_put:
> -     spin_unlock(&priv->lock);
> +     mutex_unlock(&priv->lock);
>  out_put_map:
>       if (use_ptemod)
>               map->vma = NULL;
> -- 
> 1.7.10.4
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.