[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] blkback: streamline main processing loop



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1332940883 -7200
# Node ID 80a3bc79579e55efaece795656a0c08d854f8280
# Parent  56e7b5175d61e16d4da6ad89c2929513da479f6c
blkback: streamline main processing loop

- move stats updates into dispatch_rw_block_io(), allowing to fold
  common cases
- don't alloc pending_req_t instance when none is going to be needed
  (particularly relevant since they are a global resource)
- use type-safe assignment rather than memcpy() for obtaining native
  requests from ring

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 56e7b5175d61 -r 80a3bc79579e drivers/xen/blkback/blkback.c
--- a/drivers/xen/blkback/blkback.c     Wed Mar 28 15:07:51 2012 +0200
+++ b/drivers/xen/blkback/blkback.c     Wed Mar 28 15:21:23 2012 +0200
@@ -315,32 +315,21 @@ static int _do_block_io_op(blkif_t *blki
        blkif_request_t req;
        pending_req_t *pending_req;
        RING_IDX rc, rp;
-       int more_to_do = 0;
 
        rc = blk_rings->common.req_cons;
        rp = blk_rings->common.sring->req_prod;
        rmb(); /* Ensure we see queued requests up to 'rp'. */
 
-       while ((rc != rp)) {
-
+       while (rc != rp) {
                if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
                        break;
 
-               if (kthread_should_stop()) {
-                       more_to_do = 1;
-                       break;
-               }
-
-               pending_req = alloc_req();
-               if (NULL == pending_req) {
-                       blkif->st_oo_req++;
-                       more_to_do = 1;
-                       break;
-               }
+               if (kthread_should_stop())
+                       return 1;
 
                switch (blkif->blk_protocol) {
                case BLKIF_PROTOCOL_NATIVE:
-                       memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), 
sizeof(req));
+                       req = *RING_GET_REQUEST(&blk_rings->native, rc);
                        break;
                case BLKIF_PROTOCOL_X86_32:
                        blkif_get_x86_32_req(&req, 
RING_GET_REQUEST(&blk_rings->x86_32, rc));
@@ -350,33 +339,39 @@ static int _do_block_io_op(blkif_t *blki
                        break;
                default:
                        BUG();
+                       return 0; /* make compiler happy */
                }
-               blk_rings->common.req_cons = ++rc; /* before make_response() */
 
-               /* Apply all sanity checks to /private copy/ of request. */
-               barrier();
+               ++rc;
 
                switch (req.operation) {
                case BLKIF_OP_READ:
-                       blkif->st_rd_req++;
-                       dispatch_rw_block_io(blkif, &req, pending_req);
-                       break;
+               case BLKIF_OP_WRITE:
                case BLKIF_OP_WRITE_BARRIER:
-                       blkif->st_br_req++;
-                       /* fall through */
-               case BLKIF_OP_WRITE:
-                       blkif->st_wr_req++;
+                       pending_req = alloc_req();
+                       if (!pending_req) {
+                               blkif->st_oo_req++;
+                               return 1;
+                       }
+
+                       /* before make_response() */
+                       blk_rings->common.req_cons = rc;
+
+                       /* Apply all sanity checks to /private copy/ of 
request. */
+                       barrier();
+
                        dispatch_rw_block_io(blkif, &req, pending_req);
                        break;
                default:
                        /* A good sign something is wrong: sleep for a while to
                         * avoid excessive CPU consumption by a bad guest. */
                        msleep(1);
+                       blk_rings->common.req_cons = rc;
+                       barrier();
                        DPRINTK("error: unknown block io operation [%d]\n",
                                req.operation);
                        make_response(blkif, req.id, req.operation,
                                      BLKIF_RSP_ERROR);
-                       free_req(pending_req);
                        break;
                }
 
@@ -384,7 +379,7 @@ static int _do_block_io_op(blkif_t *blki
                cond_resched();
        }
 
-       return more_to_do;
+       return 0;
 }
 
 static int
@@ -421,12 +416,15 @@ static void dispatch_rw_block_io(blkif_t
 
        switch (req->operation) {
        case BLKIF_OP_READ:
+               blkif->st_rd_req++;
                operation = READ;
                break;
        case BLKIF_OP_WRITE:
+               blkif->st_wr_req++;
                operation = WRITE;
                break;
        case BLKIF_OP_WRITE_BARRIER:
+               blkif->st_br_req++;
                operation = WRITE_BARRIER;
                break;
        default:
@@ -559,7 +557,7 @@ static void dispatch_rw_block_io(blkif_t
 
        if (operation == READ)
                blkif->st_rd_sect += preq.nr_sects;
-       else if (operation == WRITE || operation == WRITE_BARRIER)
+       else
                blkif->st_wr_sect += preq.nr_sects;
 
        return;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.