|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT PATCH v3 12/14] plat/drivers: Flush requests for virtio block
This patch introduces the flush requests.
This operation garantees that all previous write requests are finished.
Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
---
plat/drivers/include/virtio/virtio_blk.h | 11 ++++
plat/drivers/virtio/virtio_blk.c | 76 +++++++++++++++++++-----
2 files changed, 73 insertions(+), 14 deletions(-)
diff --git a/plat/drivers/include/virtio/virtio_blk.h
b/plat/drivers/include/virtio/virtio_blk.h
index 8b50d889..662418c6 100644
--- a/plat/drivers/include/virtio/virtio_blk.h
+++ b/plat/drivers/include/virtio/virtio_blk.h
@@ -45,6 +45,10 @@
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
+/* Legacy feature bits */
+#define VIRTIO_BLK_F_FLUSH 9 /* Flush command supported */
+#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in
config */
+
struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
__u64 capacity;
@@ -56,6 +60,10 @@ struct virtio_blk_config {
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
__u32 blk_size;
+
+ /* writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
+ __u8 wce;
+
/* number of vqs, only available when VIRTIO_BLK_F_MQ is set */
__u16 num_queues;
} __packed;
@@ -68,6 +76,9 @@ struct virtio_blk_config {
#define VIRTIO_BLK_T_IN 0
#define VIRTIO_BLK_T_OUT 1
+
+/* Cache flush command */
+#define VIRTIO_BLK_T_FLUSH 4
/*
* This comes first in the read scatter-gather list.
* For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
diff --git a/plat/drivers/virtio/virtio_blk.c b/plat/drivers/virtio/virtio_blk.c
index 6bdefd9c..520cdfbe 100644
--- a/plat/drivers/virtio/virtio_blk.c
+++ b/plat/drivers/virtio/virtio_blk.c
@@ -25,6 +25,7 @@
#include <uk/print.h>
#include <errno.h>
#include <fcntl.h>
+#include <stdbool.h>
#include <virtio/virtio_bus.h>
#include <virtio/virtio_ids.h>
#include <uk/blkdev.h>
@@ -49,11 +50,13 @@
* Multi-queue,
* Maximum size of a segment for requests,
* Maximum number of segments per request,
+ * Flush
**/
#define VIRTIO_BLK_DRV_FEATURES(features) \
(VIRTIO_FEATURES_UPDATE(features, VIRTIO_BLK_F_RO | \
VIRTIO_BLK_F_BLK_SIZE | VIRTIO_BLK_F_MQ | \
- VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_SIZE_MAX))
+ VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_SIZE_MAX | \
+ VIRTIO_BLK_F_CONFIG_WCE | VIRTIO_BLK_F_FLUSH))
static struct uk_alloc *a;
static const char *drv_name = DRIVER_NAME;
@@ -77,6 +80,8 @@ struct virtio_blk_device {
__u32 max_segments;
/* Maximum size of a segment */
__u32 max_size_segment;
+ /* If it is set then flush request is allowed */
+ __u8 writeback;
};
struct uk_blkdev_queue {
@@ -108,7 +113,8 @@ struct virtio_blkdev_request {
static int virtio_blkdev_request_set_sglist(struct uk_blkdev_queue *queue,
struct virtio_blkdev_request *virtio_blk_req,
- __sector sector_size)
+ __sector sector_size,
+ bool have_data)
{
struct virtio_blk_device *vbdev;
struct uk_blkreq *req;
@@ -137,19 +143,23 @@ static int virtio_blkdev_request_set_sglist(struct
uk_blkdev_queue *queue,
goto out;
}
- for (idx = 0; idx < data_size; idx += segment_max_size) {
- segment_size = data_size - idx;
- segment_size = (segment_size > segment_max_size) ?
+ /* Append to sglist chunks of `segment_max_size` size
+ * Only for read / write operations
+ **/
+ if (have_data)
+ for (idx = 0; idx < data_size; idx += segment_max_size) {
+ segment_size = data_size - idx;
+ segment_size = (segment_size > segment_max_size) ?
segment_max_size : segment_size;
- rc = uk_sglist_append(&queue->sg,
- (void *)(start_data + idx),
- segment_size);
- if (unlikely(rc != 0)) {
- uk_pr_err("Failed to append to sg list %d\n",
- rc);
- goto out;
+ rc = uk_sglist_append(&queue->sg,
+ (void *)(start_data + idx),
+ segment_size);
+ if (unlikely(rc != 0)) {
+ uk_pr_err("Failed to append to sg list %d\n",
+ rc);
+ goto out;
+ }
}
- }
rc = uk_sglist_append(&queue->sg, &virtio_blk_req->status,
sizeof(uint8_t));
@@ -194,7 +204,7 @@ static int virtio_blkdev_request_write(struct
uk_blkdev_queue *queue,
return -EINVAL;
rc = virtio_blkdev_request_set_sglist(queue, virtio_blk_req,
- cap->ssize);
+ cap->ssize, true);
if (rc) {
uk_pr_err("Failed to set sglist %d\n", rc);
goto out;
@@ -214,6 +224,39 @@ out:
return rc;
}
+static int virtio_blkdev_request_flush(struct uk_blkdev_queue *queue,
+ struct virtio_blkdev_request *virtio_blk_req,
+ __u16 *read_segs, __u16 *write_segs)
+{
+ struct virtio_blk_device *vbdev;
+ int rc = 0;
+
+ UK_ASSERT(queue);
+ UK_ASSERT(virtio_blk_req);
+
+ vbdev = queue->vbd;
+ if (!vbdev->writeback)
+ return -ENOTSUP;
+
+ if (virtio_blk_req->virtio_blk_outhdr.sector) {
+ uk_pr_warn("Start sector should be 0 for flush request\n");
+ virtio_blk_req->virtio_blk_outhdr.sector = 0;
+ }
+
+ rc = virtio_blkdev_request_set_sglist(queue, virtio_blk_req, 0, false);
+ if (rc) {
+ uk_pr_err("Failed to set sglist %d\n", rc);
+ goto out;
+ }
+
+ *read_segs = 1;
+ *write_segs = 1;
+ virtio_blk_req->virtio_blk_outhdr.type = VIRTIO_BLK_T_FLUSH;
+
+out:
+ return rc;
+}
+
static int virtio_blkdev_queue_enqueue(struct uk_blkdev_queue *queue,
struct uk_blkreq *req)
{
@@ -240,6 +283,9 @@ static int virtio_blkdev_queue_enqueue(struct
uk_blkdev_queue *queue,
req->operation == UK_BLKDEV_READ)
rc = virtio_blkdev_request_write(queue, virtio_blk_req,
&read_segs, &write_segs);
+ else if (req->operation == UK_BLKDEV_FFLUSH)
+ rc = virtio_blkdev_request_flush(queue, virtio_blk_req,
+ &read_segs, &write_segs);
else
return -EINVAL;
@@ -719,6 +765,8 @@ static int virtio_blkdev_feature_negotiate(struct
virtio_blk_device *vbdev)
vbdev->max_vqueue_pairs = num_queues;
vbdev->max_segments = max_segments;
vbdev->max_size_segment = max_size_segment;
+ vbdev->writeback = virtio_has_features(host_features,
+ VIRTIO_BLK_F_FLUSH);
/**
* Mask out features supported by both driver and device.
--
2.17.1
_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |