|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC v1 4/5] VBD: enlarge max segment per request in blkfront
per disk pending_req list
Signed-off-by: Ronghui Duan <ronghui.duan@xxxxxxxxx>
diff --git a/drivers/block/xen-blkback/blkback.c
b/drivers/block/xen-blkback/blkback.c
index b4767f5..45eda98 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -64,49 +64,6 @@ MODULE_PARM_DESC(reqs, "Number of blkback requests to
allocate");
static unsigned int log_stats;
module_param(log_stats, int, 0644);
-struct seg_buf {
- unsigned long buf;
- unsigned int nsec;
-};
-
-/*
- * Each outstanding request that we've passed to the lower device layers has a
- * 'pending_req' allocated to it. Each buffer_head that completes decrements
- * the pendcnt towards zero. When it hits zero, the specified domain has a
- * response queued for it, with the saved 'id' passed back.
- */
-struct pending_req {
- struct xen_blkif *blkif;
- u64 id;
- int nr_pages;
- atomic_t pendcnt;
- unsigned short operation;
- int status;
- struct list_head free_list;
- struct gnttab_map_grant_ref *map;
- struct gnttab_unmap_grant_ref *unmap;
- struct seg_buf *seg;
- struct bio **biolist;
- struct page **pages;
-};
-
-#define BLKBACK_INVALID_HANDLE (~0)
-
-struct xen_blkbk {
- struct pending_req *pending_reqs;
- /* List of all 'pending_req' available */
- struct list_head pending_free;
- /* And its spinlock. */
- spinlock_t pending_free_lock;
- wait_queue_head_t pending_free_wq;
- /* The list of all pages that are available. */
- struct page **pending_pages;
- /* And the grant handles that are available. */
- grant_handle_t *pending_grant_handles;
-};
-
-static struct xen_blkbk *blkbk;
-
/*
* Little helpful macro to figure out the index and virtual address of the
* pending_pages[..]. For each 'pending_req' we have have up to
@@ -115,20 +72,20 @@ static struct xen_blkbk *blkbk;
*/
static inline int vaddr_pagenr(struct pending_req *req, int seg)
{
- return (req - blkbk->pending_reqs) *
- BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
+ return (req - req->blkif->blkbk->pending_reqs) *
+ req->blkif->ops->max_seg + seg;
}
#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
static inline unsigned long vaddr(struct pending_req *req, int seg)
{
- unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
+ unsigned long pfn = page_to_pfn(req->blkif->blkbk->pending_page(req,
seg));
return (unsigned long)pfn_to_kaddr(pfn);
}
#define pending_handle(_req, _seg) \
- (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
+ (_req->blkif->blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
static int do_block_io_op(struct xen_blkif *blkif);
@@ -143,6 +100,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
*/
static void free_req(struct pending_req *req)
{
+ struct xen_blkbk *blkbk = req->blkif->blkbk;
unsigned long flags;
int was_empty;
@@ -162,8 +120,9 @@ static void free_req(struct pending_req *req)
/*
* Retrieve from the 'pending_reqs' a free pending_req structure to be used.
*/
-static struct pending_req *alloc_req(void)
+static struct pending_req *alloc_req(struct xen_blkif *blkif)
{
+ struct xen_blkbk *blkbk = blkif->blkbk;
struct pending_req *req = NULL;
unsigned long flags;
unsigned int max_seg = BLKIF_MAX_SEGMENTS_PER_REQUEST;
@@ -173,6 +132,7 @@ static struct pending_req *alloc_req(void)
req = list_entry(blkbk->pending_free.next, struct pending_req,
free_list);
list_del(&req->free_list);
+ req->blkif = blkif;
}
spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
@@ -319,8 +279,8 @@ int xen_blkif_schedule(void *arg)
blkif->wq,
blkif->waiting_reqs || kthread_should_stop());
wait_event_interruptible(
- blkbk->pending_free_wq,
- !list_empty(&blkbk->pending_free) ||
+ blkif->blkbk->pending_free_wq,
+ !list_empty(&blkif->blkbk->pending_free) ||
kthread_should_stop());
blkif->waiting_reqs = 0;
@@ -395,7 +355,8 @@ static int xen_blkbk_map(struct blkif_request *req,
pending_req->blkif->domid);
}
- ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0),
nseg);
+ ret = gnttab_map_refs(map, NULL,
+ &pending_req->blkif->blkbk->pending_page(pending_req, 0), nseg);
BUG_ON(ret);
/*
@@ -580,7 +541,7 @@ __do_block_io_op(struct xen_blkif *blkif)
break;
}
- pending_req = alloc_req();
+ pending_req = alloc_req(blkif);
if (NULL == pending_req) {
blkif->st_oo_req++;
more_to_do = 1;
@@ -742,7 +703,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
(bio_add_page(bio,
- blkbk->pending_page(pending_req, i),
+ blkif->blkbk->pending_page(pending_req, i),
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
@@ -867,35 +828,34 @@ static void make_response(struct xen_blkif *blkif, u64 id,
notify_remote_via_irq(blkif->irq);
}
-static int __init xen_blkif_init(void)
+int xen_blkif_init_blkbk(struct xen_blkif *blkif)
{
- int i, mmap_pages;
int rc = 0;
+ int i, mmap_pages;
+ struct xen_blkbk *blkbk;
- if (!xen_domain())
- return -ENODEV;
-
- blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
- if (!blkbk) {
+ blkif->blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
+ if (!blkif->blkbk) {
pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
return -ENOMEM;
}
- mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
-
- blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) *
- xen_blkif_reqs, GFP_KERNEL);
- blkbk->pending_grant_handles =
kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
- mmap_pages, GFP_KERNEL);
- blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
- mmap_pages, GFP_KERNEL);
-
- if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
- !blkbk->pending_pages) {
- rc = -ENOMEM;
+ blkbk = blkif->blkbk;
+ mmap_pages = xen_blkif_reqs * blkif->ops->max_seg;
+
+ blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) *
+ xen_blkif_reqs, GFP_KERNEL);
+ blkbk->pending_grant_handles =
kzalloc(sizeof(blkbk->pending_grant_handles[0])
+ * mmap_pages, GFP_KERNEL);
+ blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
+ mmap_pages, GFP_KERNEL);
+
+ if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
+ !blkbk->pending_pages) {
+ rc = -ENOMEM;
goto out_of_memory;
}
-
+
for (i = 0; i < mmap_pages; i++) {
blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
@@ -904,10 +864,6 @@ static int __init xen_blkif_init(void)
goto out_of_memory;
}
}
- rc = xen_blkif_interface_init();
- if (rc)
- goto failed_init;
-
INIT_LIST_HEAD(&blkbk->pending_free);
spin_lock_init(&blkbk->pending_free_lock);
init_waitqueue_head(&blkbk->pending_free_wq);
@@ -916,15 +872,10 @@ static int __init xen_blkif_init(void)
list_add_tail(&blkbk->pending_reqs[i].free_list,
&blkbk->pending_free);
- rc = xen_blkif_xenbus_init();
- if (rc)
- goto failed_init;
-
return 0;
- out_of_memory:
+out_of_memory:
pr_alert(DRV_PFX "%s: out of memory\n", __func__);
- failed_init:
kfree(blkbk->pending_reqs);
kfree(blkbk->pending_grant_handles);
if (blkbk->pending_pages) {
@@ -935,7 +886,7 @@ static int __init xen_blkif_init(void)
kfree(blkbk->pending_pages);
}
kfree(blkbk);
- blkbk = NULL;
+ blkif->blkbk = NULL;
return rc;
}
@@ -947,6 +898,24 @@ struct blkback_ring_operation blkback_ring_ops = {
.max_seg = BLKIF_MAX_SEGMENTS_PER_REQUEST,
};
+static int __init xen_blkif_init(void)
+{
+ int rc = 0;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ rc = xen_blkif_interface_init();
+ if (rc)
+ return rc;
+
+ rc = xen_blkif_xenbus_init();
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
module_init(xen_blkif_init);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/block/xen-blkback/common.h
b/drivers/block/xen-blkback/common.h
index ce5556a..80e8acc 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -169,6 +169,7 @@ struct xen_vbd {
struct backend_info;
struct xen_blkif;
+struct xen_blkbk;
struct blkback_ring_operation {
void *(*get_back_ring) (struct xen_blkif *blkif);
@@ -190,6 +191,7 @@ struct xen_blkif {
enum blkif_backring_type blk_backring_type;
union blkif_back_rings blk_rings;
void *blk_ring;
+ struct xen_blkbk *blkbk;
/* The VBD attached to this interface. */
struct xen_vbd vbd;
/* Back pointer to the backend_info. */
@@ -221,6 +223,46 @@ struct xen_blkif {
wait_queue_head_t waiting_to_free;
};
+struct seg_buf {
+ unsigned long buf;
+ unsigned int nsec;
+};
+
+/*
+ * Each outstanding request that we've passed to the lower device layers has a
+ * 'pending_req' allocated to it. Each buffer_head that completes decrements
+ * the pendcnt towards zero. When it hits zero, the specified domain has a
+ * response queued for it, with the saved 'id' passed back.
+ */
+struct pending_req {
+ struct xen_blkif *blkif;
+ u64 id;
+ int nr_pages;
+ atomic_t pendcnt;
+ unsigned short operation;
+ int status;
+ struct list_head free_list;
+ struct gnttab_map_grant_ref *map;
+ struct gnttab_unmap_grant_ref *unmap;
+ struct seg_buf *seg;
+ struct bio **biolist;
+ struct page **pages;
+};
+
+#define BLKBACK_INVALID_HANDLE (~0)
+
+struct xen_blkbk {
+ struct pending_req *pending_reqs;
+ /* List of all 'pending_req' available */
+ struct list_head pending_free;
+ /* And its spinlock. */
+ spinlock_t pending_free_lock;
+ wait_queue_head_t pending_free_wq;
+ /* The list of all pages that are available. */
+ struct page **pending_pages;
+ /* And the grant handles that are available. */
+ grant_handle_t *pending_grant_handles;
+};
#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
(_v)->bdev->bd_part->nr_sects : \
@@ -243,6 +285,8 @@ int xen_blkif_interface_init(void);
int xen_blkif_xenbus_init(void);
+int xen_blkif_init_blkbk(struct xen_blkif *blkif);
+
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
diff --git a/drivers/block/xen-blkback/xenbus.c
b/drivers/block/xen-blkback/xenbus.c
index 850ecad..8b0d496 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -769,6 +769,12 @@ static int connect_ring(struct backend_info *be)
return err;
}
+ err = xen_blkif_init_blkbk(be->blkif);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "xen blkif init blkbk fails\n");
+ return err;
+ }
+
return 0;
}
-ronghui
Attachment:
vbd_enlarge_segments_04.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |