[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 18/26] block: move the synchronous flag to queue_limits



Move the synchronous flag into the queue_limits feature field so that it
can be set atomically and all I/O is frozen when changing the flag.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 block/blk-mq-debugfs.c        | 1 -
 drivers/block/brd.c           | 2 +-
 drivers/block/zram/zram_drv.c | 4 ++--
 drivers/nvdimm/btt.c          | 3 +--
 drivers/nvdimm/pmem.c         | 4 ++--
 include/linux/blkdev.h        | 7 ++++---
 6 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index eb73f1d348e5a9..957774e40b1d0c 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -85,7 +85,6 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(SAME_COMP),
        QUEUE_FLAG_NAME(FAIL_IO),
        QUEUE_FLAG_NAME(NOXMERGES),
-       QUEUE_FLAG_NAME(SYNCHRONOUS),
        QUEUE_FLAG_NAME(SAME_FORCE),
        QUEUE_FLAG_NAME(INIT_DONE),
        QUEUE_FLAG_NAME(POLL),
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index b25dc463b5e3a6..d77deb571dbd06 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -335,6 +335,7 @@ static int brd_alloc(int i)
                .max_hw_discard_sectors = UINT_MAX,
                .max_discard_segments   = 1,
                .discard_granularity    = PAGE_SIZE,
+               .features               = BLK_FEAT_SYNCHRONOUS,
        };
 
        list_for_each_entry(brd, &brd_devices, brd_list)
@@ -366,7 +367,6 @@ static int brd_alloc(int i)
        strscpy(disk->disk_name, buf, DISK_NAME_LEN);
        set_capacity(disk, rd_size * 2);
        
-       blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
        blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
        err = add_disk(disk);
        if (err)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f8f1b5b54795ac..efcb8d9d274c31 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2208,7 +2208,8 @@ static int zram_add(void)
 #if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
                .max_write_zeroes_sectors       = UINT_MAX,
 #endif
-               .features                       = BLK_FEAT_STABLE_WRITES,
+               .features                       = BLK_FEAT_STABLE_WRITES |
+                                                 BLK_FEAT_SYNCHRONOUS,
        };
        struct zram *zram;
        int ret, device_id;
@@ -2246,7 +2247,6 @@ static int zram_add(void)
 
        /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
        set_capacity(zram->disk, 0);
-       blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
        ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
        if (ret)
                goto out_cleanup_disk;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index e474afa8e9f68d..e79c06d65bb77b 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1501,6 +1501,7 @@ static int btt_blk_init(struct btt *btt)
                .logical_block_size     = btt->sector_size,
                .max_hw_sectors         = UINT_MAX,
                .max_integrity_segments = 1,
+               .features               = BLK_FEAT_SYNCHRONOUS,
        };
        int rc;
 
@@ -1518,8 +1519,6 @@ static int btt_blk_init(struct btt *btt)
        btt->btt_disk->fops = &btt_fops;
        btt->btt_disk->private_data = btt;
 
-       blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
-
        set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
        rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
        if (rc)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 501cf226df0187..b821dcf018f6ae 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -455,7 +455,8 @@ static int pmem_attach_disk(struct device *dev,
                .logical_block_size     = pmem_sector_size(ndns),
                .physical_block_size    = PAGE_SIZE,
                .max_hw_sectors         = UINT_MAX,
-               .features               = BLK_FEAT_WRITE_CACHE,
+               .features               = BLK_FEAT_WRITE_CACHE |
+                                         BLK_FEAT_SYNCHRONOUS,
        };
        int nid = dev_to_node(dev), fua;
        struct resource *res = &nsio->res;
@@ -546,7 +547,6 @@ static int pmem_attach_disk(struct device *dev,
        }
        pmem->virt_addr = addr;
 
-       blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
        if (pmem->pfn_flags & PFN_MAP)
                blk_queue_flag_set(QUEUE_FLAG_DAX, q);
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index db14c61791e022..4d908e29c760da 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -301,6 +301,9 @@ enum {
 
        /* don't modify data until writeback is done */
        BLK_FEAT_STABLE_WRITES                  = (1u << 5),
+
+       /* always completes in submit context */
+       BLK_FEAT_SYNCHRONOUS                    = (1u << 6),
 };
 
 /*
@@ -566,7 +569,6 @@ struct request_queue {
 #define QUEUE_FLAG_SAME_COMP   4       /* complete on same CPU-group */
 #define QUEUE_FLAG_FAIL_IO     5       /* fake timeout */
 #define QUEUE_FLAG_NOXMERGES   9       /* No extended merges */
-#define QUEUE_FLAG_SYNCHRONOUS 11      /* always completes in submit context */
 #define QUEUE_FLAG_SAME_FORCE  12      /* force complete on same CPU */
 #define QUEUE_FLAG_INIT_DONE   14      /* queue is initialized */
 #define QUEUE_FLAG_POLL                16      /* IO polling enabled if set */
@@ -1315,8 +1317,7 @@ static inline bool bdev_nonrot(struct block_device *bdev)
 
 static inline bool bdev_synchronous(struct block_device *bdev)
 {
-       return test_bit(QUEUE_FLAG_SYNCHRONOUS,
-                       &bdev_get_queue(bdev)->queue_flags);
+       return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
 }
 
 static inline bool bdev_stable_writes(struct block_device *bdev)
-- 
2.43.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.