|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v4 06/10] xen/blkback: separate ring information out of struct xen_blkif
On Mon, Nov 02, 2015 at 12:21:42PM +0800, Bob Liu wrote:
> Split per ring information to an new structure "xen_blkif_ring", so that one
> vbd
> device can associate with one or more rings/hardware queues.
s/can associate/can be associated/
>
> Introduce 'pers_gnts_lock' to protect the pool of persistent grants since we
> may have multi backend threads.
>
> This patch is a preparation for supporting multi hardware queues/rings.
>
> Signed-off-by: Arianna Avanzini <avanzini.arianna@xxxxxxxxx>
> Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
> ---
> drivers/block/xen-blkback/blkback.c | 233
> ++++++++++++++++++++----------------
> drivers/block/xen-blkback/common.h | 64 ++++++----
> drivers/block/xen-blkback/xenbus.c | 107 ++++++++++-------
> 3 files changed, 234 insertions(+), 170 deletions(-)
>
> diff --git a/drivers/block/xen-blkback/blkback.c
> b/drivers/block/xen-blkback/blkback.c
> index 6a685ae..eaf7ec0 100644
> --- a/drivers/block/xen-blkback/blkback.c
> +++ b/drivers/block/xen-blkback/blkback.c
> @@ -173,11 +173,11 @@ static inline void shrink_free_pagepool(struct
> xen_blkif *blkif, int num)
>
> #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
>
> -static int do_block_io_op(struct xen_blkif *blkif);
> -static int dispatch_rw_block_io(struct xen_blkif *blkif,
> +static int do_block_io_op(struct xen_blkif_ring *ring);
> +static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
> struct blkif_request *req,
> struct pending_req *pending_req);
> -static void make_response(struct xen_blkif *blkif, u64 id,
> +static void make_response(struct xen_blkif_ring *ring, u64 id,
> unsigned short op, int st);
>
> #define foreach_grant_safe(pos, n, rbtree, node) \
> @@ -189,14 +189,8 @@ static void make_response(struct xen_blkif *blkif, u64
> id,
>
>
> /*
> - * We don't need locking around the persistent grant helpers
> - * because blkback uses a single-thread for each backed, so we
> - * can be sure that this functions will never be called recursively.
> - *
> - * The only exception to that is put_persistent_grant, that can be called
> - * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
> - * bit operations to modify the flags of a persistent grant and to count
> - * the number of used grants.
> + * pers_gnts_lock must be used around all the persistent grant helpers
> + * because blkback may use multi-thread/queue for each backend.
> */
Would it make sense to have an ASSERT on the holding of the
pers_gnts_lock in this functioni (and also in get_persistent_gnt,
put_persistent_gnt, free_persistent_gnts?
> static int add_persistent_gnt(struct xen_blkif *blkif,
> struct persistent_gnt *persistent_gnt)
> @@ -322,11 +316,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct
> *work)
> int segs_to_unmap = 0;
> struct xen_blkif *blkif = container_of(work, typeof(*blkif),
> persistent_purge_work);
> struct gntab_unmap_queue_data unmap_data;
> + unsigned long flags;
>
> unmap_data.pages = pages;
> unmap_data.unmap_ops = unmap;
> unmap_data.kunmap_ops = NULL;
>
> + spin_lock_irqsave(&blkif->pers_gnts_lock, flags);
> while(!list_empty(&blkif->persistent_purge_list)) {
> persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
> struct persistent_gnt,
> @@ -348,6 +344,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct
> *work)
> }
> kfree(persistent_gnt);
> }
> + spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
> if (segs_to_unmap > 0) {
> unmap_data.count = segs_to_unmap;
> BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
> @@ -362,16 +359,18 @@ static void purge_persistent_gnt(struct xen_blkif
> *blkif)
> unsigned int num_clean, total;
> bool scan_used = false, clean_used = false;
> struct rb_root *root;
> + unsigned long flags;
>
> + spin_lock_irqsave(&blkif->pers_gnts_lock, flags);
> if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
> (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
> !blkif->vbd.overflow_max_grants)) {
> - return;
> + goto out;
> }
>
> if (work_busy(&blkif->persistent_purge_work)) {
Hm, looking at 'work_busy' it says that you should hold on a pool mutex.
But that is a seperate bug
> pr_alert_ratelimited("Scheduled work from previous purge is
> still busy, cannot purge list\n");
> - return;
> + goto out;
> }
>
> num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
> @@ -379,7 +378,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
> num_clean = min(blkif->persistent_gnt_c, num_clean);
> if ((num_clean == 0) ||
> (num_clean > (blkif->persistent_gnt_c -
> atomic_read(&blkif->persistent_gnt_in_use))))
> - return;
> + goto out;
>
> /*
> * At this point, we can assure that there will be no calls
> @@ -436,29 +435,35 @@ finished:
> }
>
> blkif->persistent_gnt_c -= (total - num_clean);
> + spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
> blkif->vbd.overflow_max_grants = 0;
>
> /* We can defer this work */
> schedule_work(&blkif->persistent_purge_work);
> pr_debug("Purged %u/%u\n", (total - num_clean), total);
> return;
> +
> +out:
> + spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
> +
> + return;
> }
>
> /*
> * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
> */
> -static struct pending_req *alloc_req(struct xen_blkif *blkif)
> +static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
> {
> struct pending_req *req = NULL;
> unsigned long flags;
>
> - spin_lock_irqsave(&blkif->pending_free_lock, flags);
> - if (!list_empty(&blkif->pending_free)) {
> - req = list_entry(blkif->pending_free.next, struct pending_req,
> + spin_lock_irqsave(&ring->pending_free_lock, flags);
> + if (!list_empty(&ring->pending_free)) {
> + req = list_entry(ring->pending_free.next, struct pending_req,
> free_list);
> list_del(&req->free_list);
> }
> - spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
> + spin_unlock_irqrestore(&ring->pending_free_lock, flags);
> return req;
> }
>
> @@ -466,17 +471,17 @@ static struct pending_req *alloc_req(struct xen_blkif
> *blkif)
> * Return the 'pending_req' structure back to the freepool. We also
> * wake up the thread if it was waiting for a free page.
> */
> -static void free_req(struct xen_blkif *blkif, struct pending_req *req)
> +static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
> {
> unsigned long flags;
> int was_empty;
>
> - spin_lock_irqsave(&blkif->pending_free_lock, flags);
> - was_empty = list_empty(&blkif->pending_free);
> - list_add(&req->free_list, &blkif->pending_free);
> - spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
> + spin_lock_irqsave(&ring->pending_free_lock, flags);
> + was_empty = list_empty(&ring->pending_free);
> + list_add(&req->free_list, &ring->pending_free);
> + spin_unlock_irqrestore(&ring->pending_free_lock, flags);
> if (was_empty)
> - wake_up(&blkif->pending_free_wq);
> + wake_up(&ring->pending_free_wq);
> }
>
> /*
> @@ -556,10 +561,10 @@ abort:
> /*
> * Notification from the guest OS.
> */
> -static void blkif_notify_work(struct xen_blkif *blkif)
> +static void blkif_notify_work(struct xen_blkif_ring *ring)
> {
> - blkif->waiting_reqs = 1;
> - wake_up(&blkif->wq);
> + ring->waiting_reqs = 1;
> + wake_up(&ring->wq);
> }
>
> irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
> @@ -590,7 +595,8 @@ static void print_stats(struct xen_blkif *blkif)
>
> int xen_blkif_schedule(void *arg)
> {
> - struct xen_blkif *blkif = arg;
> + struct xen_blkif_ring *ring = arg;
> + struct xen_blkif *blkif = ring->blkif;
> struct xen_vbd *vbd = &blkif->vbd;
> unsigned long timeout;
> int ret;
> @@ -606,27 +612,27 @@ int xen_blkif_schedule(void *arg)
> timeout = msecs_to_jiffies(LRU_INTERVAL);
>
> timeout = wait_event_interruptible_timeout(
> - blkif->wq,
> - blkif->waiting_reqs || kthread_should_stop(),
> + ring->wq,
> + ring->waiting_reqs || kthread_should_stop(),
> timeout);
> if (timeout == 0)
> goto purge_gnt_list;
> timeout = wait_event_interruptible_timeout(
> - blkif->pending_free_wq,
> - !list_empty(&blkif->pending_free) ||
> + ring->pending_free_wq,
> + !list_empty(&ring->pending_free) ||
> kthread_should_stop(),
> timeout);
> if (timeout == 0)
> goto purge_gnt_list;
>
> - blkif->waiting_reqs = 0;
> + ring->waiting_reqs = 0;
> smp_mb(); /* clear flag *before* checking for work */
>
> - ret = do_block_io_op(blkif);
> + ret = do_block_io_op(ring);
> if (ret > 0)
> - blkif->waiting_reqs = 1;
> + ring->waiting_reqs = 1;
> if (ret == -EACCES)
> - wait_event_interruptible(blkif->shutdown_wq,
> + wait_event_interruptible(ring->shutdown_wq,
> kthread_should_stop());
>
> purge_gnt_list:
> @@ -649,7 +655,7 @@ purge_gnt_list:
> if (log_stats)
> print_stats(blkif);
>
> - blkif->xenblkd = NULL;
> + ring->xenblkd = NULL;
> xen_blkif_put(blkif);
>
> return 0;
> @@ -658,32 +664,40 @@ purge_gnt_list:
> /*
> * Remove persistent grants and empty the pool of free pages
> */
> -void xen_blkbk_free_caches(struct xen_blkif *blkif)
> +void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
> {
> + struct xen_blkif *blkif = ring->blkif;
> + unsigned long flags;
> +
> /* Free all persistent grant pages */
> + spin_lock_irqsave(&blkif->pers_gnts_lock, flags);
> if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
> free_persistent_gnts(blkif, &blkif->persistent_gnts,
> blkif->persistent_gnt_c);
>
> BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
> blkif->persistent_gnt_c = 0;
> + spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
>
> /* Since we are shutting down remove all pages from the buffer */
> shrink_free_pagepool(blkif, 0 /* All */);
> }
>
> static unsigned int xen_blkbk_unmap_prepare(
> - struct xen_blkif *blkif,
> + struct xen_blkif_ring *ring,
> struct grant_page **pages,
> unsigned int num,
> struct gnttab_unmap_grant_ref *unmap_ops,
> struct page **unmap_pages)
> {
> unsigned int i, invcount = 0;
> + unsigned long flags;
>
> for (i = 0; i < num; i++) {
> if (pages[i]->persistent_gnt != NULL) {
> - put_persistent_gnt(blkif, pages[i]->persistent_gnt);
> + spin_lock_irqsave(&ring->blkif->pers_gnts_lock, flags);
> + put_persistent_gnt(ring->blkif,
> pages[i]->persistent_gnt);
> + spin_unlock_irqrestore(&ring->blkif->pers_gnts_lock,
> flags);
> continue;
> }
> if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
> @@ -700,17 +714,18 @@ static unsigned int xen_blkbk_unmap_prepare(
>
> static void xen_blkbk_unmap_and_respond_callback(int result, struct
> gntab_unmap_queue_data *data)
> {
> - struct pending_req* pending_req = (struct pending_req*) (data->data);
> - struct xen_blkif *blkif = pending_req->blkif;
> + struct pending_req *pending_req = (struct pending_req *)(data->data);
> + struct xen_blkif_ring *ring = pending_req->ring;
> + struct xen_blkif *blkif = ring->blkif;
>
> /* BUG_ON used to reproduce existing behaviour,
> but is this the best way to deal with this? */
> BUG_ON(result);
>
> put_free_pages(blkif, data->pages, data->count);
> - make_response(blkif, pending_req->id,
> + make_response(ring, pending_req->id,
> pending_req->operation, pending_req->status);
> - free_req(blkif, pending_req);
> + free_req(ring, pending_req);
> /*
> * Make sure the request is freed before releasing blkif,
> * or there could be a race between free_req and the
> @@ -723,7 +738,7 @@ static void xen_blkbk_unmap_and_respond_callback(int
> result, struct gntab_unmap_
> * pending_free_wq if there's a drain going on, but it has
> * to be taken into account if the current model is changed.
> */
> - if (atomic_dec_and_test(&blkif->inflight) &&
> atomic_read(&blkif->drain)) {
> + if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain))
> {
> complete(&blkif->drain_complete);
> }
> xen_blkif_put(blkif);
> @@ -732,11 +747,11 @@ static void xen_blkbk_unmap_and_respond_callback(int
> result, struct gntab_unmap_
> static void xen_blkbk_unmap_and_respond(struct pending_req *req)
> {
> struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
> - struct xen_blkif *blkif = req->blkif;
> + struct xen_blkif_ring *ring = req->ring;
> struct grant_page **pages = req->segments;
> unsigned int invcount;
>
> - invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_segs,
> + invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
> req->unmap, req->unmap_pages);
>
> work->data = req;
> @@ -757,7 +772,7 @@ static void xen_blkbk_unmap_and_respond(struct
> pending_req *req)
> * of hypercalls, but since this is only used in error paths there's
> * no real need.
> */
> -static void xen_blkbk_unmap(struct xen_blkif *blkif,
> +static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
> struct grant_page *pages[],
> int num)
> {
> @@ -768,20 +783,20 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
>
> while (num) {
> unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
> -
> - invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
> +
> + invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
> unmap, unmap_pages);
> if (invcount) {
> ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
> invcount);
> BUG_ON(ret);
> - put_free_pages(blkif, unmap_pages, invcount);
> + put_free_pages(ring->blkif, unmap_pages, invcount);
> }
> pages += batch;
> num -= batch;
> }
> }
>
> -static int xen_blkbk_map(struct xen_blkif *blkif,
> +static int xen_blkbk_map(struct xen_blkif_ring *ring,
> struct grant_page *pages[],
> int num, bool ro)
> {
> @@ -794,6 +809,8 @@ static int xen_blkbk_map(struct xen_blkif *blkif,
> int ret = 0;
> int last_map = 0, map_until = 0;
> int use_persistent_gnts;
> + struct xen_blkif *blkif = ring->blkif;
> + unsigned long irq_flags;
>
> use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
>
> @@ -806,10 +823,13 @@ again:
> for (i = map_until; i < num; i++) {
> uint32_t flags;
>
> - if (use_persistent_gnts)
> + if (use_persistent_gnts) {
> + spin_lock_irqsave(&blkif->pers_gnts_lock, irq_flags);
> persistent_gnt = get_persistent_gnt(
> blkif,
> pages[i]->gref);
> + spin_unlock_irqrestore(&blkif->pers_gnts_lock,
> irq_flags);
> + }
>
> if (persistent_gnt) {
> /*
> @@ -880,8 +900,10 @@ again:
> persistent_gnt->gnt = map[new_map_idx].ref;
> persistent_gnt->handle = map[new_map_idx].handle;
> persistent_gnt->page = pages[seg_idx]->page;
> + spin_lock_irqsave(&blkif->pers_gnts_lock, irq_flags);
> if (add_persistent_gnt(blkif,
> persistent_gnt)) {
> + spin_unlock_irqrestore(&blkif->pers_gnts_lock,
> irq_flags);
> kfree(persistent_gnt);
> persistent_gnt = NULL;
> goto next;
> @@ -890,6 +912,7 @@ again:
> pr_debug("grant %u added to the tree of persistent
> grants, using %u/%u\n",
> persistent_gnt->gnt, blkif->persistent_gnt_c,
> xen_blkif_max_pgrants);
> + spin_unlock_irqrestore(&blkif->pers_gnts_lock,
> irq_flags);
> goto next;
> }
> if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
> @@ -921,7 +944,7 @@ static int xen_blkbk_map_seg(struct pending_req
> *pending_req)
> {
> int rc;
>
> - rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
> + rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
> pending_req->nr_segs,
> (pending_req->operation != BLKIF_OP_READ));
>
> @@ -934,7 +957,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request
> *req,
> struct phys_req *preq)
> {
> struct grant_page **pages = pending_req->indirect_pages;
> - struct xen_blkif *blkif = pending_req->blkif;
> + struct xen_blkif_ring *ring = pending_req->ring;
> int indirect_grefs, rc, n, nseg, i;
> struct blkif_request_segment *segments = NULL;
>
> @@ -945,7 +968,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request
> *req,
> for (i = 0; i < indirect_grefs; i++)
> pages[i]->gref = req->u.indirect.indirect_grefs[i];
>
> - rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
> + rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
> if (rc)
> goto unmap;
>
> @@ -972,15 +995,16 @@ static int xen_blkbk_parse_indirect(struct
> blkif_request *req,
> unmap:
> if (segments)
> kunmap_atomic(segments);
> - xen_blkbk_unmap(blkif, pages, indirect_grefs);
> + xen_blkbk_unmap(ring, pages, indirect_grefs);
> return rc;
> }
>
> -static int dispatch_discard_io(struct xen_blkif *blkif,
> +static int dispatch_discard_io(struct xen_blkif_ring *ring,
> struct blkif_request *req)
> {
> int err = 0;
> int status = BLKIF_RSP_OKAY;
> + struct xen_blkif *blkif = ring->blkif;
> struct block_device *bdev = blkif->vbd.bdev;
> unsigned long secure;
> struct phys_req preq;
> @@ -997,7 +1021,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
> preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
> goto fail_response;
> }
> - blkif->st_ds_req++;
> + ring->st_ds_req++;
>
> secure = (blkif->vbd.discard_secure &&
> (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
> @@ -1013,26 +1037,28 @@ fail_response:
> } else if (err)
> status = BLKIF_RSP_ERROR;
>
> - make_response(blkif, req->u.discard.id, req->operation, status);
> + make_response(ring, req->u.discard.id, req->operation, status);
> xen_blkif_put(blkif);
> return err;
> }
>
> -static int dispatch_other_io(struct xen_blkif *blkif,
> +static int dispatch_other_io(struct xen_blkif_ring *ring,
> struct blkif_request *req,
> struct pending_req *pending_req)
> {
> - free_req(blkif, pending_req);
> - make_response(blkif, req->u.other.id, req->operation,
> + free_req(ring, pending_req);
> + make_response(ring, req->u.other.id, req->operation,
> BLKIF_RSP_EOPNOTSUPP);
> return -EIO;
> }
>
> -static void xen_blk_drain_io(struct xen_blkif *blkif)
> +static void xen_blk_drain_io(struct xen_blkif_ring *ring)
> {
> + struct xen_blkif *blkif = ring->blkif;
> +
> atomic_set(&blkif->drain, 1);
> do {
> - if (atomic_read(&blkif->inflight) == 0)
> + if (atomic_read(&ring->inflight) == 0)
> break;
> wait_for_completion_interruptible_timeout(
> &blkif->drain_complete, HZ);
> @@ -1053,12 +1079,12 @@ static void __end_block_io_op(struct pending_req
> *pending_req, int error)
> if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
> (error == -EOPNOTSUPP)) {
> pr_debug("flush diskcache op failed, not supported\n");
> - xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
> + xen_blkbk_flush_diskcache(XBT_NIL,
> pending_req->ring->blkif->be, 0);
> pending_req->status = BLKIF_RSP_EOPNOTSUPP;
> } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
> (error == -EOPNOTSUPP)) {
> pr_debug("write barrier op failed, not supported\n");
> - xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
> + xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
> pending_req->status = BLKIF_RSP_EOPNOTSUPP;
> } else if (error) {
> pr_debug("Buffer not up-to-date at end of operation,"
> @@ -1092,9 +1118,9 @@ static void end_block_io_op(struct bio *bio)
> * and transmute it to the block API to hand it over to the proper block
> disk.
> */
> static int
> -__do_block_io_op(struct xen_blkif *blkif)
> +__do_block_io_op(struct xen_blkif_ring *ring)
> {
> - union blkif_back_rings *blk_rings = &blkif->blk_rings;
> + union blkif_back_rings *blk_rings = &ring->blk_rings;
> struct blkif_request req;
> struct pending_req *pending_req;
> RING_IDX rc, rp;
> @@ -1107,7 +1133,7 @@ __do_block_io_op(struct xen_blkif *blkif)
> if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
> rc = blk_rings->common.rsp_prod_pvt;
> pr_warn("Frontend provided bogus ring requests (%d - %d = %d).
> Halting ring processing on dev=%04x\n",
> - rp, rc, rp - rc, blkif->vbd.pdevice);
> + rp, rc, rp - rc, ring->blkif->vbd.pdevice);
> return -EACCES;
> }
> while (rc != rp) {
> @@ -1120,14 +1146,14 @@ __do_block_io_op(struct xen_blkif *blkif)
> break;
> }
>
> - pending_req = alloc_req(blkif);
> + pending_req = alloc_req(ring);
> if (NULL == pending_req) {
> - blkif->st_oo_req++;
> + ring->st_oo_req++;
> more_to_do = 1;
> break;
> }
>
> - switch (blkif->blk_protocol) {
> + switch (ring->blkif->blk_protocol) {
> case BLKIF_PROTOCOL_NATIVE:
> memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
> sizeof(req));
> break;
> @@ -1151,16 +1177,16 @@ __do_block_io_op(struct xen_blkif *blkif)
> case BLKIF_OP_WRITE_BARRIER:
> case BLKIF_OP_FLUSH_DISKCACHE:
> case BLKIF_OP_INDIRECT:
> - if (dispatch_rw_block_io(blkif, &req, pending_req))
> + if (dispatch_rw_block_io(ring, &req, pending_req))
> goto done;
> break;
> case BLKIF_OP_DISCARD:
> - free_req(blkif, pending_req);
> - if (dispatch_discard_io(blkif, &req))
> + free_req(ring, pending_req);
> + if (dispatch_discard_io(ring, &req))
> goto done;
> break;
> default:
> - if (dispatch_other_io(blkif, &req, pending_req))
> + if (dispatch_other_io(ring, &req, pending_req))
> goto done;
> break;
> }
> @@ -1173,13 +1199,13 @@ done:
> }
>
> static int
> -do_block_io_op(struct xen_blkif *blkif)
> +do_block_io_op(struct xen_blkif_ring *ring)
> {
> - union blkif_back_rings *blk_rings = &blkif->blk_rings;
> + union blkif_back_rings *blk_rings = &ring->blk_rings;
> int more_to_do;
>
> do {
> - more_to_do = __do_block_io_op(blkif);
> + more_to_do = __do_block_io_op(ring);
> if (more_to_do)
> break;
>
> @@ -1192,7 +1218,7 @@ do_block_io_op(struct xen_blkif *blkif)
> * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
> * and call the 'submit_bio' to pass it to the underlying storage.
> */
> -static int dispatch_rw_block_io(struct xen_blkif *blkif,
> +static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
> struct blkif_request *req,
> struct pending_req *pending_req)
> {
> @@ -1219,17 +1245,17 @@ static int dispatch_rw_block_io(struct xen_blkif
> *blkif,
>
> switch (req_operation) {
> case BLKIF_OP_READ:
> - blkif->st_rd_req++;
> + ring->st_rd_req++;
> operation = READ;
> break;
> case BLKIF_OP_WRITE:
> - blkif->st_wr_req++;
> + ring->st_wr_req++;
> operation = WRITE_ODIRECT;
> break;
> case BLKIF_OP_WRITE_BARRIER:
> drain = true;
> case BLKIF_OP_FLUSH_DISKCACHE:
> - blkif->st_f_req++;
> + ring->st_f_req++;
> operation = WRITE_FLUSH;
> break;
> default:
> @@ -1254,7 +1280,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
>
> preq.nr_sects = 0;
>
> - pending_req->blkif = blkif;
> + pending_req->ring = ring;
> pending_req->id = req->u.rw.id;
> pending_req->operation = req_operation;
> pending_req->status = BLKIF_RSP_OKAY;
> @@ -1281,12 +1307,12 @@ static int dispatch_rw_block_io(struct xen_blkif
> *blkif,
> goto fail_response;
> }
>
> - if (xen_vbd_translate(&preq, blkif, operation) != 0) {
> + if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
> pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
> operation == READ ? "read" : "write",
> preq.sector_number,
> preq.sector_number + preq.nr_sects,
> - blkif->vbd.pdevice);
> + ring->blkif->vbd.pdevice);
> goto fail_response;
> }
>
> @@ -1298,7 +1324,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
> if (((int)preq.sector_number|(int)seg[i].nsec) &
> ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
> pr_debug("Misaligned I/O request from domain %d\n",
> - blkif->domid);
> + ring->blkif->domid);
> goto fail_response;
> }
> }
> @@ -1307,7 +1333,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
> * issue the WRITE_FLUSH.
> */
> if (drain)
> - xen_blk_drain_io(pending_req->blkif);
> + xen_blk_drain_io(pending_req->ring);
>
> /*
> * If we have failed at this point, we need to undo the M2P override,
> @@ -1322,8 +1348,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
> * This corresponding xen_blkif_put is done in __end_block_io_op, or
> * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
> */
> - xen_blkif_get(blkif);
> - atomic_inc(&blkif->inflight);
> + xen_blkif_get(ring->blkif);
> + atomic_inc(&ring->inflight);
>
> for (i = 0; i < nseg; i++) {
> while ((bio == NULL) ||
> @@ -1371,19 +1397,19 @@ static int dispatch_rw_block_io(struct xen_blkif
> *blkif,
> blk_finish_plug(&plug);
>
> if (operation == READ)
> - blkif->st_rd_sect += preq.nr_sects;
> + ring->st_rd_sect += preq.nr_sects;
> else if (operation & WRITE)
> - blkif->st_wr_sect += preq.nr_sects;
> + ring->st_wr_sect += preq.nr_sects;
>
> return 0;
>
> fail_flush:
> - xen_blkbk_unmap(blkif, pending_req->segments,
> + xen_blkbk_unmap(ring, pending_req->segments,
> pending_req->nr_segs);
> fail_response:
> /* Haven't submitted any bio's yet. */
> - make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
> - free_req(blkif, pending_req);
> + make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
> + free_req(ring, pending_req);
> msleep(1); /* back off a bit */
> return -EIO;
>
> @@ -1401,21 +1427,22 @@ static int dispatch_rw_block_io(struct xen_blkif
> *blkif,
> /*
> * Put a response on the ring on how the operation fared.
> */
> -static void make_response(struct xen_blkif *blkif, u64 id,
> +static void make_response(struct xen_blkif_ring *ring, u64 id,
> unsigned short op, int st)
> {
> struct blkif_response resp;
> unsigned long flags;
> - union blkif_back_rings *blk_rings = &blkif->blk_rings;
> + union blkif_back_rings *blk_rings;
> int notify;
>
> resp.id = id;
> resp.operation = op;
> resp.status = st;
>
> - spin_lock_irqsave(&blkif->blk_ring_lock, flags);
> + spin_lock_irqsave(&ring->blk_ring_lock, flags);
> + blk_rings = &ring->blk_rings;
> /* Place on the response ring for the relevant domain. */
> - switch (blkif->blk_protocol) {
> + switch (ring->blkif->blk_protocol) {
> case BLKIF_PROTOCOL_NATIVE:
> memcpy(RING_GET_RESPONSE(&blk_rings->native,
> blk_rings->native.rsp_prod_pvt),
> &resp, sizeof(resp));
> @@ -1433,9 +1460,9 @@ static void make_response(struct xen_blkif *blkif, u64
> id,
> }
> blk_rings->common.rsp_prod_pvt++;
> RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
> - spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
> + spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
> if (notify)
> - notify_remote_via_irq(blkif->irq);
> + notify_remote_via_irq(ring->irq);
> }
>
> static int __init xen_blkif_init(void)
> diff --git a/drivers/block/xen-blkback/common.h
> b/drivers/block/xen-blkback/common.h
> index 45a044a..f0dd69a 100644
> --- a/drivers/block/xen-blkback/common.h
> +++ b/drivers/block/xen-blkback/common.h
> @@ -260,34 +260,60 @@ struct persistent_gnt {
> struct list_head remove_node;
> };
>
> +/* Per-ring information */
Missing full stop.
> +struct xen_blkif_ring {
> + /* Physical parameters of the comms window. */
> + unsigned int irq;
> + union blkif_back_rings blk_rings;
> + void *blk_ring;
> + /* Private fields. */
> + spinlock_t blk_ring_lock;
> +
> + wait_queue_head_t wq;
> + atomic_t inflight;
> + /* One thread per blkif ring. */
> + struct task_struct *xenblkd;
> + unsigned int waiting_reqs;
> +
> + /* List of all 'pending_req' available */
> + struct list_head pending_free;
> + /* And its spinlock. */
> + spinlock_t pending_free_lock;
> + wait_queue_head_t pending_free_wq;
> +
> + /* statistics */
> + unsigned long st_print;
This one is on the same line as the other.
> + unsigned long long st_rd_req;
But these are not?
Also you didn't pull these out of 'struct xen_blkif'?
Or is that meant to be done in another patch?
> + unsigned long long st_wr_req;
> + unsigned long long st_oo_req;
> + unsigned long long st_f_req;
> + unsigned long long st_ds_req;
> + unsigned long long st_rd_sect;
> + unsigned long long st_wr_sect;
> +
> + struct work_struct free_work;
> + /* Thread shutdown wait queue. */
> + wait_queue_head_t shutdown_wq;
> + struct xen_blkif *blkif;
> +};
> +
> struct xen_blkif {
> /* Unique identifier for this interface. */
> domid_t domid;
> unsigned int handle;
> - /* Physical parameters of the comms window. */
> - unsigned int irq;
> /* Comms information. */
> enum blkif_protocol blk_protocol;
> - union blkif_back_rings blk_rings;
> - void *blk_ring;
> /* The VBD attached to this interface. */
> struct xen_vbd vbd;
> /* Back pointer to the backend_info. */
> struct backend_info *be;
> - /* Private fields. */
> - spinlock_t blk_ring_lock;
> atomic_t refcnt;
> -
> - wait_queue_head_t wq;
> /* for barrier (drain) requests */
> struct completion drain_complete;
> atomic_t drain;
> - atomic_t inflight;
> - /* One thread per one blkif. */
> - struct task_struct *xenblkd;
> - unsigned int waiting_reqs;
>
> /* tree to store persistent grants */
> + spinlock_t pers_gnts_lock;
> struct rb_root persistent_gnts;
> unsigned int persistent_gnt_c;
> atomic_t persistent_gnt_in_use;
> @@ -302,12 +328,6 @@ struct xen_blkif {
> int free_pages_num;
> struct list_head free_pages;
>
> - /* List of all 'pending_req' available */
> - struct list_head pending_free;
> - /* And its spinlock. */
> - spinlock_t pending_free_lock;
> - wait_queue_head_t pending_free_wq;
> -
> /* statistics */
> unsigned long st_print;
> unsigned long long st_rd_req;
> @@ -319,9 +339,9 @@ struct xen_blkif {
> unsigned long long st_wr_sect;
>
> struct work_struct free_work;
> - /* Thread shutdown wait queue. */
> - wait_queue_head_t shutdown_wq;
> unsigned int nr_ring_pages;
> + /* All rings for this device */
Missing full stop.
> + struct xen_blkif_ring ring;
> };
>
> struct seg_buf {
> @@ -343,7 +363,7 @@ struct grant_page {
> * response queued for it, with the saved 'id' passed back.
> */
> struct pending_req {
> - struct xen_blkif *blkif;
> + struct xen_blkif_ring *ring;
> u64 id;
> int nr_segs;
> atomic_t pendcnt;
> @@ -385,7 +405,7 @@ int xen_blkif_xenbus_init(void);
> irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
> int xen_blkif_schedule(void *arg);
> int xen_blkif_purge_persistent(void *arg);
> -void xen_blkbk_free_caches(struct xen_blkif *blkif);
> +void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
>
> int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
> struct backend_info *be, int state);
> diff --git a/drivers/block/xen-blkback/xenbus.c
> b/drivers/block/xen-blkback/xenbus.c
> index 7676575..7bdd5fd 100644
> --- a/drivers/block/xen-blkback/xenbus.c
> +++ b/drivers/block/xen-blkback/xenbus.c
> @@ -88,7 +88,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
> char name[BLKBACK_NAME_LEN];
>
> /* Not ready to connect? */
> - if (!blkif->irq || !blkif->vbd.bdev)
> + if (!blkif->ring.irq || !blkif->vbd.bdev)
> return;
>
> /* Already connected? */
> @@ -113,10 +113,10 @@ static void xen_update_blkif_status(struct xen_blkif
> *blkif)
> }
> invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
>
> - blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, "%s", name);
> - if (IS_ERR(blkif->xenblkd)) {
> - err = PTR_ERR(blkif->xenblkd);
> - blkif->xenblkd = NULL;
> + blkif->ring.xenblkd = kthread_run(xen_blkif_schedule, &blkif->ring,
> "%s", name);
> + if (IS_ERR(blkif->ring.xenblkd)) {
> + err = PTR_ERR(blkif->ring.xenblkd);
> + blkif->ring.xenblkd = NULL;
> xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
> return;
> }
> @@ -125,6 +125,7 @@ static void xen_update_blkif_status(struct xen_blkif
> *blkif)
> static struct xen_blkif *xen_blkif_alloc(domid_t domid)
> {
> struct xen_blkif *blkif;
> + struct xen_blkif_ring *ring;
>
> BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
>
> @@ -133,41 +134,45 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
> return ERR_PTR(-ENOMEM);
>
> blkif->domid = domid;
> - spin_lock_init(&blkif->blk_ring_lock);
> atomic_set(&blkif->refcnt, 1);
> - init_waitqueue_head(&blkif->wq);
> init_completion(&blkif->drain_complete);
> atomic_set(&blkif->drain, 0);
> - blkif->st_print = jiffies;
> - blkif->persistent_gnts.rb_node = NULL;
> + INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
> spin_lock_init(&blkif->free_pages_lock);
> INIT_LIST_HEAD(&blkif->free_pages);
> - INIT_LIST_HEAD(&blkif->persistent_purge_list);
> blkif->free_pages_num = 0;
> + blkif->persistent_gnts.rb_node = NULL;
> + INIT_LIST_HEAD(&blkif->persistent_purge_list);
> atomic_set(&blkif->persistent_gnt_in_use, 0);
> - atomic_set(&blkif->inflight, 0);
> INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
>
> - INIT_LIST_HEAD(&blkif->pending_free);
> - INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
> - spin_lock_init(&blkif->pending_free_lock);
> - init_waitqueue_head(&blkif->pending_free_wq);
> - init_waitqueue_head(&blkif->shutdown_wq);
> + ring = &blkif->ring;
> + ring->blkif = blkif;
> + spin_lock_init(&ring->blk_ring_lock);
> + init_waitqueue_head(&ring->wq);
> + ring->st_print = jiffies;
> + atomic_set(&ring->inflight, 0);
> +
> + INIT_LIST_HEAD(&ring->pending_free);
> + spin_lock_init(&ring->pending_free_lock);
> + init_waitqueue_head(&ring->pending_free_wq);
> + init_waitqueue_head(&ring->shutdown_wq);
>
> return blkif;
> }
>
> -static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
> +static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
> unsigned int nr_grefs, unsigned int evtchn)
> {
> int err;
> + struct xen_blkif *blkif = ring->blkif;
>
> /* Already connected through? */
> - if (blkif->irq)
> + if (ring->irq)
> return 0;
>
> err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
> - &blkif->blk_ring);
> + &ring->blk_ring);
> if (err < 0)
> return err;
>
> @@ -175,22 +180,22 @@ static int xen_blkif_map(struct xen_blkif *blkif,
> grant_ref_t *gref,
> case BLKIF_PROTOCOL_NATIVE:
> {
> struct blkif_sring *sring;
> - sring = (struct blkif_sring *)blkif->blk_ring;
> - BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE *
> nr_grefs);
> + sring = (struct blkif_sring *)ring->blk_ring;
> + BACK_RING_INIT(&ring->blk_rings.native, sring, PAGE_SIZE *
> nr_grefs);
> break;
> }
> case BLKIF_PROTOCOL_X86_32:
> {
> struct blkif_x86_32_sring *sring_x86_32;
> - sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring;
> - BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32,
> PAGE_SIZE * nr_grefs);
> + sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring;
> + BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32, PAGE_SIZE
> * nr_grefs);
> break;
> }
> case BLKIF_PROTOCOL_X86_64:
> {
> struct blkif_x86_64_sring *sring_x86_64;
> - sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring;
> - BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64,
> PAGE_SIZE * nr_grefs);
> + sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring;
> + BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64, PAGE_SIZE
> * nr_grefs);
> break;
> }
> default:
> @@ -199,13 +204,13 @@ static int xen_blkif_map(struct xen_blkif *blkif,
> grant_ref_t *gref,
>
> err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
> xen_blkif_be_int, 0,
> - "blkif-backend", blkif);
> + "blkif-backend", ring);
> if (err < 0) {
> - xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
> - blkif->blk_rings.common.sring = NULL;
> + xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
> + ring->blk_rings.common.sring = NULL;
> return err;
> }
> - blkif->irq = err;
> + ring->irq = err;
>
> return 0;
> }
> @@ -214,35 +219,36 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
> {
> struct pending_req *req, *n;
> int i = 0, j;
> + struct xen_blkif_ring *ring = &blkif->ring;
>
> - if (blkif->xenblkd) {
> - kthread_stop(blkif->xenblkd);
> - wake_up(&blkif->shutdown_wq);
> - blkif->xenblkd = NULL;
> + if (ring->xenblkd) {
> + kthread_stop(ring->xenblkd);
> + wake_up(&ring->shutdown_wq);
> + ring->xenblkd = NULL;
> }
>
> /* The above kthread_stop() guarantees that at this point we
> * don't have any discard_io or other_io requests. So, checking
> * for inflight IO is enough.
> */
> - if (atomic_read(&blkif->inflight) > 0)
> + if (atomic_read(&ring->inflight) > 0)
> return -EBUSY;
>
> - if (blkif->irq) {
> - unbind_from_irqhandler(blkif->irq, blkif);
> - blkif->irq = 0;
> + if (ring->irq) {
> + unbind_from_irqhandler(ring->irq, ring);
> + ring->irq = 0;
> }
>
> - if (blkif->blk_rings.common.sring) {
> - xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
> - blkif->blk_rings.common.sring = NULL;
> + if (ring->blk_rings.common.sring) {
> + xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
> + ring->blk_rings.common.sring = NULL;
> }
>
> /* Remove all persistent grants and the cache of ballooned pages. */
> - xen_blkbk_free_caches(blkif);
> + xen_blkbk_free_caches(ring);
>
> /* Check that there is no request in use */
> - list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
> + list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
> list_del(&req->free_list);
>
> for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
> @@ -300,6 +306,16 @@ int __init xen_blkif_interface_init(void)
> { \
> struct xenbus_device *dev = to_xenbus_device(_dev); \
> struct backend_info *be = dev_get_drvdata(&dev->dev); \
> + struct xen_blkif *blkif = be->blkif; \
> + struct xen_blkif_ring *ring = &blkif->ring; \
> + \
> + blkif->st_oo_req = ring->st_oo_req; \
> + blkif->st_rd_req = ring->st_rd_req; \
> + blkif->st_wr_req = ring->st_wr_req; \
> + blkif->st_f_req = ring->st_f_req; \
> + blkif->st_ds_req = ring->st_ds_req; \
> + blkif->st_rd_sect = ring->st_rd_sect; \
> + blkif->st_wr_sect = ring->st_wr_sect; \
If I read 'oo_req' I end up populating the 'blkif->st_oo_req'
,'blkif->st_rd_req', 'blkif->st_wr_req', and so every time.
Please rework this.
> \
> return sprintf(buf, format, ##args); \
> } \
> @@ -832,6 +848,7 @@ static int connect_ring(struct backend_info *be)
> char protocol[64] = "";
> struct pending_req *req, *n;
> int err, i, j;
> + struct xen_blkif_ring *ring = &be->blkif->ring;
>
> pr_debug("%s %s\n", __func__, dev->otherend);
>
> @@ -920,7 +937,7 @@ static int connect_ring(struct backend_info *be)
> req = kzalloc(sizeof(*req), GFP_KERNEL);
> if (!req)
> goto fail;
> - list_add_tail(&req->free_list, &be->blkif->pending_free);
> + list_add_tail(&req->free_list, &ring->pending_free);
> for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
> req->segments[j] = kzalloc(sizeof(*req->segments[0]),
> GFP_KERNEL);
> if (!req->segments[j])
> @@ -935,7 +952,7 @@ static int connect_ring(struct backend_info *be)
> }
>
> /* Map the shared frame, irq etc. */
> - err = xen_blkif_map(be->blkif, ring_ref, nr_grefs, evtchn);
> + err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
> if (err) {
> xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
> return err;
> @@ -944,7 +961,7 @@ static int connect_ring(struct backend_info *be)
> return 0;
>
> fail:
> - list_for_each_entry_safe(req, n, &be->blkif->pending_free, free_list) {
> + list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
> list_del(&req->free_list);
> for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
> if (!req->segments[j])
> --
> 1.8.3.1
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |