[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH qemu-xen] Fix after blkif.h update



24875:a59c1dcfe968 made an incompatible change to the interface headers which
needs reflecting here. A compatibility definition is included in order to keep
building with older versions of the headers.

IMHO qemu should contain a copy of the interface headers of its own so that it
can update at its own pace (like most guest OSes do).

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 hw/xen_blkif.h |   13 +++++++++----
 hw/xen_disk.c  |   12 ++++++------
 2 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/hw/xen_blkif.h b/hw/xen_blkif.h
index c0f4136..5db8319 100644
--- a/hw/xen_blkif.h
+++ b/hw/xen_blkif.h
@@ -5,6 +5,11 @@
 #include <xen/io/blkif.h>
 #include <xen/io/protocols.h>
 
+/* Compatibility with older blkif headers */
+#ifndef BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK
+#define BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK BLKIF_MAX_SEGMENTS_PER_REQUEST
+#endif
+
 /* Not a real protocol.  Used to generate ring structs which contain
  * the elements common to all protocols only.  This way we get a
  * compiler-checkable way to use common struct elements, so we can
@@ -24,7 +29,7 @@ struct blkif_x86_32_request {
        blkif_vdev_t   handle;       /* only for read/write requests         */
        uint64_t       id;           /* private guest value, echoed in resp  */
        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
 };
 struct blkif_x86_32_response {
        uint64_t        id;              /* copied from request */
@@ -42,7 +47,7 @@ struct blkif_x86_64_request {
        blkif_vdev_t   handle;       /* only for read/write requests         */
        uint64_t       __attribute__((__aligned__(8))) id;
        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
 };
 struct blkif_x86_64_response {
        uint64_t       __attribute__((__aligned__(8))) id;
@@ -72,7 +77,7 @@ enum blkif_protocol {
 
 static inline void blkif_get_x86_32_req(blkif_request_t *dst, 
blkif_x86_32_request_t *src)
 {
-       int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+       int i, n = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
 
        dst->operation = src->operation;
        dst->nr_segments = src->nr_segments;
@@ -87,7 +92,7 @@ static inline void blkif_get_x86_32_req(blkif_request_t *dst, 
blkif_x86_32_reque
 
 static inline void blkif_get_x86_64_req(blkif_request_t *dst, 
blkif_x86_64_request_t *src)
 {
-       int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+       int i, n = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
 
        dst->operation = src->operation;
        dst->nr_segments = src->nr_segments;
diff --git a/hw/xen_disk.c b/hw/xen_disk.c
index 286bbac..0f22bc6 100644
--- a/hw/xen_disk.c
+++ b/hw/xen_disk.c
@@ -54,7 +54,7 @@ static int use_aio      = 1;
 /* ------------------------------------------------------------- */
 
 #define BLOCK_SIZE  512
-#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
+#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK + 2)
 
 struct ioreq {
     blkif_request_t     req;
@@ -67,10 +67,10 @@ struct ioreq {
     int                 postsync;
 
     /* grant mapping */
-    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
+    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
     int                 prot;
-    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    void                *page[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK];
     void                *pages;
 
     /* aio status */
@@ -128,7 +128,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
         ioreq = g_malloc0(sizeof(*ioreq));
         ioreq->blkdev = blkdev;
         blkdev->requests_total++;
-        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK);
     } else {
         /* get one from freelist */
         ioreq = QLIST_FIRST(&blkdev->freelist);
@@ -210,7 +210,7 @@ static int ioreq_parse(struct ioreq *ioreq)
 
     ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
     for (i = 0; i < ioreq->req.nr_segments; i++) {
-        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+        if (i == BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK) {
             xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
             goto err;
         }
-- 
1.7.2.5




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.