[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 07 of 12] blktap2: use blk_rq_map_sg() here too
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1244108814 -3600 # Node ID c5271142c79724bc366b91885737a4c0343c5a19 # Parent d39f82ea033d05bd9cfa4a2fbcfca55f3880498b blktap2: use blk_rq_map_sg() here too Just like in blkfront, not doing so can cause the maximum number of segments check to trigger. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> linux-2.6-pvops: * Rebase against git-fbbc8527 (wean off of use of the BlkBack pageflag) * Fixed scatterlist access. Signed-off-by: Daniel Stodden <daniel.stodden@xxxxxxxxxx> diff -r d39f82ea033d -r c5271142c797 drivers/xen/blktap/blktap.h --- a/drivers/xen/blktap/blktap.h Thu Jun 04 10:33:52 2009 +0100 +++ b/drivers/xen/blktap/blktap.h Thu Jun 04 10:46:54 2009 +0100 @@ -4,6 +4,7 @@ #include <linux/mm.h> #include <linux/fs.h> #include <linux/cdev.h> +#include <linux/scatterlist.h> #include <xen/blkif.h> #include <xen/grant_table.h> @@ -173,6 +174,7 @@ int pending_cnt; struct blktap_request *pending_requests[MAX_PENDING_REQS]; + struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; wait_queue_head_t wq; struct list_head deferred_queue; diff -r d39f82ea033d -r c5271142c797 drivers/xen/blktap/device.c --- a/drivers/xen/blktap/device.c Thu Jun 04 10:33:52 2009 +0100 +++ b/drivers/xen/blktap/device.c Thu Jun 04 10:46:54 2009 +0100 @@ -579,10 +579,9 @@ struct request *req) { struct page *page; - struct bio_vec *bvec; - int usr_idx, err; - struct req_iterator iter; + int i, usr_idx, err; struct blktap_ring *ring; + struct scatterlist *sg; struct blktap_grant_table table; unsigned int fsect, lsect, nr_sects; unsigned long offset, uvaddr, kvaddr; @@ -609,42 +608,39 @@ nr_sects = 0; request->nr_pages = 0; - blkif_req.nr_segments = 0; - rq_for_each_segment(bvec, req, iter) { - BUG_ON(blkif_req.nr_segments == - BLKIF_MAX_SEGMENTS_PER_REQUEST); + blkif_req.nr_segments = blk_rq_map_sg(req->q, req, tap->sg); + BUG_ON(blkif_req.nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); + for (i = 0; i < blkif_req.nr_segments; ++i) { + sg = tap->sg + i; + fsect = sg->offset >> 9; + lsect = fsect + (sg->length >> 9) - 1; + nr_sects += sg->length >> 9; - fsect = bvec->bv_offset >> 9; - lsect = fsect + (bvec->bv_len >> 9) - 1; - nr_sects += bvec->bv_len >> 9; - - blkif_req.seg[blkif_req.nr_segments] = + blkif_req.seg[i] = (struct blkif_request_segment) { .gref = 0, .first_sect = fsect, .last_sect = lsect }; - if (blkback_pagemap_contains_page(bvec->bv_page)) { + if (blkback_pagemap_contains_page(sg_page(sg))) { /* foreign page -- use xen */ if (blktap_prep_foreign(tap, request, &blkif_req, - blkif_req.nr_segments, - bvec->bv_page, + i, + sg_page(sg), &table)) goto out; } else { /* do it the old fashioned way */ blktap_map(tap, request, - blkif_req.nr_segments, - bvec->bv_page); + i, + sg_page(sg)); } - uvaddr = MMAP_VADDR(ring->user_vstart, - usr_idx, blkif_req.nr_segments); - kvaddr = request_to_kaddr(request, - blkif_req.nr_segments); + uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i); + kvaddr = request_to_kaddr(request, i); offset = (uvaddr - ring->vma->vm_start) >> PAGE_SHIFT; page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT); ring->foreign_map.map[offset] = page; @@ -654,10 +650,9 @@ uvaddr, page, __pa(kvaddr) >> PAGE_SHIFT); BTDBG("offset: 0x%08lx, pending_req: %p, seg: %d, " "page: %p, kvaddr: 0x%08lx, uvaddr: 0x%08lx\n", - offset, request, blkif_req.nr_segments, + offset, request, i, page, kvaddr, uvaddr); - blkif_req.nr_segments++; request->nr_pages++; } _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |