[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 06/27] drbd: cleanup decide_on_discard_support



Sanitize the calling conventions and use a goto label to cleanup the
code flow.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 drivers/block/drbd/drbd_nl.c | 68 +++++++++++++++++++-----------------
 1 file changed, 35 insertions(+), 33 deletions(-)

diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 02030c9c4d3b1..40bb0b356a6d6 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1204,38 +1204,42 @@ static unsigned int drbd_max_discard_sectors(struct 
drbd_connection *connection)
 }
 
 static void decide_on_discard_support(struct drbd_device *device,
-                       struct request_queue *q,
-                       struct request_queue *b,
-                       bool discard_zeroes_if_aligned)
+               struct drbd_backing_dev *bdev)
 {
-       /* q = drbd device queue (device->rq_queue)
-        * b = backing device queue 
(device->ldev->backing_bdev->bd_disk->queue),
-        *     or NULL if diskless
-        */
-       struct drbd_connection *connection = 
first_peer_device(device)->connection;
-       bool can_do = b ? blk_queue_discard(b) : true;
-
-       if (can_do && connection->cstate >= C_CONNECTED && 
!(connection->agreed_features & DRBD_FF_TRIM)) {
-               can_do = false;
-               drbd_info(connection, "peer DRBD too old, does not support 
TRIM: disabling discards\n");
-       }
-       if (can_do) {
-               /* We don't care for the granularity, really.
-                * Stacking limits below should fix it for the local
-                * device.  Whether or not it is a suitable granularity
-                * on the remote device is not our problem, really. If
-                * you care, you need to use devices with similar
-                * topology on all peers. */
-               blk_queue_discard_granularity(q, 512);
-               q->limits.max_discard_sectors = 
drbd_max_discard_sectors(connection);
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
-               q->limits.max_write_zeroes_sectors = 
drbd_max_discard_sectors(connection);
-       } else {
-               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
-               blk_queue_discard_granularity(q, 0);
-               q->limits.max_discard_sectors = 0;
-               q->limits.max_write_zeroes_sectors = 0;
+       struct drbd_connection *connection =
+               first_peer_device(device)->connection;
+       struct request_queue *q = device->rq_queue;
+
+       if (bdev && !blk_queue_discard(bdev->backing_bdev->bd_disk->queue))
+               goto not_supported;
+
+       if (connection->cstate >= C_CONNECTED &&
+           !(connection->agreed_features & DRBD_FF_TRIM)) {
+               drbd_info(connection,
+                       "peer DRBD too old, does not support TRIM: disabling 
discards\n");
+               goto not_supported;
        }
+
+       /*
+        * We don't care for the granularity, really.
+        *
+        * Stacking limits below should fix it for the local device.  Whether or
+        * not it is a suitable granularity on the remote device is not our
+        * problem, really. If you care, you need to use devices with similar
+        * topology on all peers.
+        */
+       blk_queue_discard_granularity(q, 512);
+       q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+       q->limits.max_write_zeroes_sectors =
+               drbd_max_discard_sectors(connection);
+       return;
+
+not_supported:
+       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
+       blk_queue_discard_granularity(q, 0);
+       q->limits.max_discard_sectors = 0;
+       q->limits.max_write_zeroes_sectors = 0;
 }
 
 static void fixup_discard_if_not_supported(struct request_queue *q)
@@ -1273,7 +1277,6 @@ static void drbd_setup_queue_param(struct drbd_device 
*device, struct drbd_backi
        unsigned int max_segments = 0;
        struct request_queue *b = NULL;
        struct disk_conf *dc;
-       bool discard_zeroes_if_aligned = true;
 
        if (bdev) {
                b = bdev->backing_bdev->bd_disk->queue;
@@ -1282,7 +1285,6 @@ static void drbd_setup_queue_param(struct drbd_device 
*device, struct drbd_backi
                rcu_read_lock();
                dc = rcu_dereference(device->ldev->disk_conf);
                max_segments = dc->max_bio_bvecs;
-               discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
                rcu_read_unlock();
 
                blk_set_stacking_limits(&q->limits);
@@ -1292,7 +1294,7 @@ static void drbd_setup_queue_param(struct drbd_device 
*device, struct drbd_backi
        /* This is the workaround for "bio would need to, but cannot, be split" 
*/
        blk_queue_max_segments(q, max_segments ? max_segments : 
BLK_MAX_SEGMENTS);
        blk_queue_segment_boundary(q, PAGE_SIZE-1);
-       decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
+       decide_on_discard_support(device, bdev);
 
        if (b) {
                blk_stack_limits(&q->limits, &b->limits, 0);
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.