[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] qemu-upstream: add discard support for xen_disk



On Tue, 28 Jan 2014, Olaf Hering wrote:
> Implement discard support for xen_disk. It makes use of the existing
> discard code in qemu.
> 
> The discard support is enabled unconditionally. The tool stack may provide a
> property "discard_enable" in the backend node to optionally disable discard
> support.  This is helpful in case the backing file was intentionally created
> non-sparse to avoid fragmentation.
> 
> Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

I think that the patch is fine, thank you.
Just a small comment below but it is just a matter of taste.


>  hw/block/xen_blkif.h | 12 ++++++++++++
>  hw/block/xen_disk.c  | 28 ++++++++++++++++++++++++++++
>  2 files changed, 40 insertions(+)
> 
> diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h
> index c0f4136..711b692 100644
> --- a/hw/block/xen_blkif.h
> +++ b/hw/block/xen_blkif.h
> @@ -79,6 +79,12 @@ static inline void blkif_get_x86_32_req(blkif_request_t 
> *dst, blkif_x86_32_reque
>       dst->handle = src->handle;
>       dst->id = src->id;
>       dst->sector_number = src->sector_number;
> +     if (src->operation == BLKIF_OP_DISCARD) {
> +             struct blkif_request_discard *s = (void *)src;
> +             struct blkif_request_discard *d = (void *)dst;
> +             d->nr_sectors = s->nr_sectors;
> +             return;
> +     }
>       if (n > src->nr_segments)
>               n = src->nr_segments;
>       for (i = 0; i < n; i++)
> @@ -94,6 +100,12 @@ static inline void blkif_get_x86_64_req(blkif_request_t 
> *dst, blkif_x86_64_reque
>       dst->handle = src->handle;
>       dst->id = src->id;
>       dst->sector_number = src->sector_number;
> +     if (src->operation == BLKIF_OP_DISCARD) {
> +             struct blkif_request_discard *s = (void *)src;
> +             struct blkif_request_discard *d = (void *)dst;
> +             d->nr_sectors = s->nr_sectors;
> +             return;
> +     }
>       if (n > src->nr_segments)
>               n = src->nr_segments;
>       for (i = 0; i < n; i++)
> diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> index 03e30d7..539f2ed 100644
> --- a/hw/block/xen_disk.c
> +++ b/hw/block/xen_disk.c
> @@ -114,6 +114,7 @@ struct XenBlkDev {
>      int                 requests_finished;
>  
>      /* Persistent grants extension */
> +    gboolean            feature_discard;
>      gboolean            feature_persistent;
>      GTree               *persistent_gnts;
>      unsigned int        persistent_gnt_count;
> @@ -253,6 +254,8 @@ static int ioreq_parse(struct ioreq *ioreq)
>      case BLKIF_OP_WRITE:
>          ioreq->prot = PROT_READ; /* from memory */
>          break;
> +    case BLKIF_OP_DISCARD:
> +        return 0;
>      default:
>          xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
>                        ioreq->req.operation);
> @@ -490,6 +493,7 @@ static void qemu_aio_complete(void *opaque, int ret)
>  static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
>  {
>      struct XenBlkDev *blkdev = ioreq->blkdev;
> +    struct blkif_request_discard *discard_req = (void *)&ioreq->req;

Given that ioreq->req might not be a struct blkif_request_discard*, I
would rather make the assignment under the case BLKIF_OP_DISCARD below.


>      if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
>          goto err_no_map;
> @@ -521,6 +525,13 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
>                          &ioreq->v, ioreq->v.size / BLOCK_SIZE,
>                          qemu_aio_complete, ioreq);
>          break;
> +    case BLKIF_OP_DISCARD:
> +        bdrv_acct_start(blkdev->bs, &ioreq->acct, discard_req->nr_sectors * 
> BLOCK_SIZE, BDRV_ACCT_WRITE);
> +        ioreq->aio_inflight++;
> +        bdrv_aio_discard(blkdev->bs,
> +                        discard_req->sector_number, discard_req->nr_sectors,
> +                        qemu_aio_complete, ioreq);
> +        break;
>      default:
>          /* unknown operation (shouldn't happen -- parse catches this) */
>          goto err;
> @@ -699,6 +710,19 @@ static void blk_alloc(struct XenDevice *xendev)
>      }
>  }
>  
> +static void blk_parse_discard(struct XenBlkDev *blkdev)
> +{
> +    int enable;
> +
> +    blkdev->feature_discard = true;
> +
> +    if (xenstore_read_be_int(&blkdev->xendev, "discard_enable", &enable) == 
> 0)
> +         blkdev->feature_discard = !!enable;
> +
> +    if (blkdev->feature_discard)
> +         xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
> +}
> +
>  static int blk_init(struct XenDevice *xendev)
>  {
>      struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, 
> xendev);
> @@ -766,6 +790,8 @@ static int blk_init(struct XenDevice *xendev)
>      xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
>      xenstore_write_be_int(&blkdev->xendev, "info", info);
>  
> +    blk_parse_discard(blkdev);
> +
>      g_free(directiosafe);
>      return 0;
>  
> @@ -801,6 +827,8 @@ static int blk_connect(struct XenDevice *xendev)
>          qflags |= BDRV_O_RDWR;
>          readonly = false;
>      }
> +    if (blkdev->feature_discard)
> +        qflags |= BDRV_O_UNMAP;
>  
>      /* init qemu block driver */
>      index = (blkdev->xendev.dev - 202 * 256) / 16;
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.