[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 7/8] xen-blkback: frontend feature control
Toolstack may write values to the "require" subdirectory in the backend main directory (e.g. backend/vbd/X/Y/). Read these values and use them when announcing those to the frontend. When backend scans frontend features the values set in the require directory take precedence, hence making no significant changes in feature parsing. xenbus_read_feature() reads from require subdirectory and prints that value and otherwise writing a default_val in the entry. We then replace all instances of xenbus_printf to use these previously seeded features. A backend_features struct is introduced and all values set there are used in place of the module parameters being used. Note, however that feature-barrier, feature-flush-support and feature-discard aren't probed because first two are physical device dependent and feature-discard already has tunables to adjust. Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx> --- drivers/block/xen-blkback/blkback.c | 2 +- drivers/block/xen-blkback/common.h | 1 + drivers/block/xen-blkback/xenbus.c | 66 ++++++++++++++++++++++++++++++++----- 3 files changed, 60 insertions(+), 9 deletions(-) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index c90e933330b6..05b3f124c871 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1271,7 +1271,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, unlikely((req->operation != BLKIF_OP_INDIRECT) && (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || unlikely((req->operation == BLKIF_OP_INDIRECT) && - (nseg > MAX_INDIRECT_SEGMENTS))) { + (nseg > ring->blkif->vbd.max_indirect_segs))) { pr_debug("Bad number of segments in request (%d)\n", nseg); /* Haven't submitted any bio's yet. */ goto fail_response; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index a7832428e0da..ff12f2d883b9 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -229,6 +229,7 @@ struct xen_vbd { unsigned int discard_secure:1; unsigned int feature_gnt_persistent:1; unsigned int overflow_max_grants:1; + unsigned int max_indirect_segs; }; struct backend_info; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 48d796ea3626..31683f29d5fb 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -25,11 +25,19 @@ /* On the XenBus the max length of 'ring-ref%u'. */ #define RINGREF_NAME_LEN (20) +#define REQUIRE_PATH_LEN (256) + +struct backend_features { + unsigned max_queues; + unsigned max_ring_order; + unsigned pers_grants; +}; struct backend_info { struct xenbus_device *dev; struct xen_blkif *blkif; struct xenbus_watch backend_watch; + struct backend_features features; unsigned major; unsigned minor; char *mode; @@ -602,6 +610,40 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt, return err; } +static int xenbus_read_feature(const char *dir, const char *node, + unsigned int default_val) +{ + char reqnode[REQUIRE_PATH_LEN]; + unsigned int val; + + snprintf(reqnode, REQUIRE_PATH_LEN, "%s/require", dir); + val = xenbus_read_unsigned(reqnode, node, default_val); + return val; +} + +static void xen_blkbk_probe_features(struct xenbus_device *dev, + struct backend_info *be) +{ + struct backend_features *ft = &be->features; + struct xen_vbd *vbd = &be->blkif->vbd; + + vbd->max_indirect_segs = xenbus_read_feature(dev->nodename, + "feature-max-indirect-segments", + MAX_INDIRECT_SEGMENTS); + + ft->max_queues = xenbus_read_feature(dev->nodename, + "multi-queue-max-queues", + xenblk_max_queues); + + ft->max_ring_order = xenbus_read_feature(dev->nodename, + "max-ring-page-order", + xen_blkif_max_ring_order); + + ft->pers_grants = xenbus_read_feature(dev->nodename, + "feature-persistent", + 1); +} + /* * Entry point to this code when a new device is created. Allocate the basic * structures, and watch the store waiting for the hotplug scripts to tell us @@ -613,6 +655,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev, int err; struct backend_info *be = kzalloc(sizeof(struct backend_info), GFP_KERNEL); + struct backend_features *ft; /* match the pr_debug in xen_blkbk_remove */ pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); @@ -633,9 +676,12 @@ static int xen_blkbk_probe(struct xenbus_device *dev, goto fail; } + xen_blkbk_probe_features(dev, be); + ft = &be->features; + err = xenbus_printf(XBT_NIL, dev->nodename, "feature-max-indirect-segments", "%u", - MAX_INDIRECT_SEGMENTS); + be->blkif->vbd.max_indirect_segs); if (err) dev_warn(&dev->dev, "writing %s/feature-max-indirect-segments (%d)", @@ -643,7 +689,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev, /* Multi-queue: advertise how many queues are supported by us.*/ err = xenbus_printf(XBT_NIL, dev->nodename, - "multi-queue-max-queues", "%u", xenblk_max_queues); + "multi-queue-max-queues", "%u", ft->max_queues); if (err) pr_warn("Error writing multi-queue-max-queues\n"); @@ -656,7 +702,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev, goto fail; err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u", - xen_blkif_max_ring_order); + ft->max_ring_order); if (err) pr_warn("%s write out 'max-ring-page-order' failed\n", __func__); @@ -849,6 +895,7 @@ static void frontend_changed(struct xenbus_device *dev, */ static void connect(struct backend_info *be) { + struct backend_features *ft = &be->features; struct xenbus_transaction xbt; int err; struct xenbus_device *dev = be->dev; @@ -870,7 +917,8 @@ static void connect(struct backend_info *be) xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support); - err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); + err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", + ft->pers_grants); if (err) { xenbus_dev_fatal(dev, err, "writing %s/feature-persistent", dev->nodename); @@ -933,6 +981,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) struct pending_req *req, *n; int err, i, j; struct xen_blkif *blkif = ring->blkif; + struct backend_features *ft = &blkif->be->features; struct xenbus_device *dev = blkif->be->dev; unsigned int ring_page_order, nr_grefs, evtchn; @@ -957,7 +1006,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) } else { unsigned int i; - if (ring_page_order > xen_blkif_max_ring_order) { + if (ring_page_order > ft->max_ring_order) { err = -EINVAL; xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d", dir, ring_page_order, @@ -1030,6 +1079,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) static int connect_ring(struct backend_info *be) { + struct backend_features *ft = &be->features; struct xenbus_device *dev = be->dev; unsigned int pers_grants; char protocol[64] = ""; @@ -1058,7 +1108,7 @@ static int connect_ring(struct backend_info *be) } pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent", 0); - be->blkif->vbd.feature_gnt_persistent = pers_grants; + be->blkif->vbd.feature_gnt_persistent = (pers_grants && ft->pers_grants); be->blkif->vbd.overflow_max_grants = 0; /* @@ -1067,12 +1117,12 @@ static int connect_ring(struct backend_info *be) requested_num_queues = xenbus_read_unsigned(dev->otherend, "multi-queue-num-queues", 1); - if (requested_num_queues > xenblk_max_queues + if (requested_num_queues > ft->max_queues || requested_num_queues == 0) { /* Buggy or malicious guest. */ xenbus_dev_fatal(dev, err, "guest requested %u queues, exceeding the maximum of %u.", - requested_num_queues, xenblk_max_queues); + requested_num_queues, ft->max_queues); return -ENOSYS; } be->blkif->nr_rings = requested_num_queues; -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |