[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 5/8] ioreq-server: add support for multiple servers



The previous single ioreq server that was created on demand now
becomes the default server and an API is created to allow secondary
servers, which handle specific IO ranges or PCI devices, to be added.

When the guest issues an IO the list of secondary servers is checked
for a matching IO range or PCI device. If none is found then the IO
is passed to the default server.

Secondary servers use guest pages to communicate with emulators, in
the same way as the default server. These pages need to be in the
guest physmap otherwise there is no suitable reference that can be
queried by an emulator in order to map them. Therefore a pool of
pages in the current E820 reserved region, just below the special
pages is used. Secondary servers allocate from and free to this pool
as they are created and destroyed.

The size of the pool is currently hardcoded in the domain build at a
value of 8. This should be sufficient for now and both the location and
size of the pool can be modified in future without any need to change the
API.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
---
 tools/libxc/xc_domain.c          |  175 +++++++
 tools/libxc/xc_domain_restore.c  |   27 +
 tools/libxc/xc_domain_save.c     |   24 +
 tools/libxc/xc_hvm_build_x86.c   |   30 +-
 tools/libxc/xenctrl.h            |   52 ++
 tools/libxc/xg_save_restore.h    |    2 +
 xen/arch/x86/hvm/hvm.c           | 1035 +++++++++++++++++++++++++++++++++++---
 xen/arch/x86/hvm/io.c            |    2 +-
 xen/include/asm-x86/hvm/domain.h |   34 +-
 xen/include/asm-x86/hvm/hvm.h    |    3 +-
 xen/include/asm-x86/hvm/io.h     |    2 +-
 xen/include/public/hvm/hvm_op.h  |   70 +++
 xen/include/public/hvm/ioreq.h   |    1 +
 xen/include/public/hvm/params.h  |    5 +-
 14 files changed, 1383 insertions(+), 79 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 369c3f3..8cec171 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1284,6 +1284,181 @@ int xc_get_hvm_param(xc_interface *handle, domid_t dom, 
int param, unsigned long
     return rc;
 }
 
+int xc_hvm_create_ioreq_server(xc_interface *xch,
+                               domid_t domid,
+                               ioservid_t *id)
+{
+    DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    hypercall.op     = __HYPERVISOR_hvm_op;
+    hypercall.arg[0] = HVMOP_create_ioreq_server;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+    arg->domid = domid;
+    rc = do_xen_hypercall(xch, &hypercall);
+    *id = arg->id;
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+}
+
+int xc_hvm_get_ioreq_server_info(xc_interface *xch,
+                                 domid_t domid,
+                                 ioservid_t id,
+                                 xen_pfn_t *ioreq_pfn,
+                                 xen_pfn_t *bufioreq_pfn,
+                                 evtchn_port_t *bufioreq_port)
+{
+    DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    hypercall.op     = __HYPERVISOR_hvm_op;
+    hypercall.arg[0] = HVMOP_get_ioreq_server_info;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+    arg->domid = domid;
+    arg->id = id;
+    rc = do_xen_hypercall(xch, &hypercall);
+    if ( rc != 0 )
+        goto done;
+
+    if ( ioreq_pfn )
+        *ioreq_pfn = arg->ioreq_pfn;
+
+    if ( bufioreq_pfn )
+        *bufioreq_pfn = arg->bufioreq_pfn;
+
+    if ( bufioreq_port )
+        *bufioreq_port = arg->bufioreq_port;
+
+done:
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+}
+
+int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t domid,
+                                        ioservid_t id, int is_mmio,
+                                        uint64_t start, uint64_t end)
+{
+    DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_map_io_range_to_ioreq_server_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    hypercall.op     = __HYPERVISOR_hvm_op;
+    hypercall.arg[0] = HVMOP_map_io_range_to_ioreq_server;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+    arg->domid = domid;
+    arg->id = id;
+    arg->is_mmio = is_mmio;
+    arg->start = start;
+    arg->end = end;
+    rc = do_xen_hypercall(xch, &hypercall);
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+}
+
+int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
+                                            ioservid_t id, int is_mmio,
+                                            uint64_t start)
+{
+    DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_unmap_io_range_from_ioreq_server_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    hypercall.op     = __HYPERVISOR_hvm_op;
+    hypercall.arg[0] = HVMOP_unmap_io_range_from_ioreq_server;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+    arg->domid = domid;
+    arg->id = id;
+    arg->is_mmio = is_mmio;
+    arg->start = start;
+    rc = do_xen_hypercall(xch, &hypercall);
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+}
+
+int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
+                                      ioservid_t id, uint16_t bdf)
+{
+    DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_map_pcidev_to_ioreq_server_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    hypercall.op     = __HYPERVISOR_hvm_op;
+    hypercall.arg[0] = HVMOP_map_pcidev_to_ioreq_server;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+    arg->domid = domid;
+    arg->id = id;
+    arg->bdf = bdf;
+    rc = do_xen_hypercall(xch, &hypercall);
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+}
+
+int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t domid,
+                                          ioservid_t id, uint16_t bdf)
+{
+    DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_unmap_pcidev_from_ioreq_server_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    hypercall.op     = __HYPERVISOR_hvm_op;
+    hypercall.arg[0] = HVMOP_unmap_pcidev_from_ioreq_server;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+    arg->domid = domid;
+    arg->id = id;
+    arg->bdf = bdf;
+    rc = do_xen_hypercall(xch, &hypercall);
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+}
+
+int xc_hvm_destroy_ioreq_server(xc_interface *xch,
+                                domid_t domid,
+                                ioservid_t id)
+{
+    DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    hypercall.op     = __HYPERVISOR_hvm_op;
+    hypercall.arg[0] = HVMOP_destroy_ioreq_server;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+    arg->domid = domid;
+    arg->id = id;
+    rc = do_xen_hypercall(xch, &hypercall);
+    xc_hypercall_buffer_free(xch, arg);
+    return rc;
+}
+
 int xc_domain_setdebugging(xc_interface *xch,
                            uint32_t domid,
                            unsigned int enable)
diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
index bcb0ae0..7350b31 100644
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -740,6 +740,8 @@ typedef struct {
     uint64_t acpi_ioport_location;
     uint64_t viridian;
     uint64_t vm_generationid_addr;
+    uint64_t ioreq_server_pfn;
+    uint64_t nr_ioreq_server_pages;
 
     struct toolstack_data_t tdata;
 } pagebuf_t;
@@ -990,6 +992,26 @@ static int pagebuf_get_one(xc_interface *xch, struct 
restore_ctx *ctx,
         DPRINTF("read generation id buffer address");
         return pagebuf_get_one(xch, ctx, buf, fd, dom);
 
+    case XC_SAVE_ID_HVM_IOREQ_SERVER_PFN:
+        /* Skip padding 4 bytes then read the ioreq server gmfn base. */
+        if ( RDEXACT(fd, &buf->ioreq_server_pfn, sizeof(uint32_t)) ||
+             RDEXACT(fd, &buf->ioreq_server_pfn, sizeof(uint64_t)) )
+        {
+            PERROR("error read the ioreq server gmfn base");
+            return -1;
+        }
+        return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
+    case XC_SAVE_ID_HVM_NR_IOREQ_SERVER_PAGES:
+        /* Skip padding 4 bytes then read the ioreq server gmfn count. */
+        if ( RDEXACT(fd, &buf->nr_ioreq_server_pages, sizeof(uint32_t)) ||
+             RDEXACT(fd, &buf->nr_ioreq_server_pages, sizeof(uint64_t)) )
+        {
+            PERROR("error read the ioreq server gmfn count");
+            return -1;
+        }
+        return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
     default:
         if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
             ERROR("Max batch size exceeded (%d). Giving up.", count);
@@ -1748,6 +1770,11 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
uint32_t dom,
     if (pagebuf.viridian != 0)
         xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
 
+    xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_SERVER_PFN,
+                     pagebuf.ioreq_server_pfn);
+    xc_set_hvm_param(xch, dom, HVM_PARAM_NR_IOREQ_SERVER_PAGES, 
+                     pagebuf.nr_ioreq_server_pages);
+
     if (pagebuf.acpi_ioport_location == 1) {
         DBGPRINTF("Use new firmware ioport from the checkpoint\n");
         xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
diff --git a/tools/libxc/xc_domain_save.c b/tools/libxc/xc_domain_save.c
index 71f9b59..acf3685 100644
--- a/tools/libxc/xc_domain_save.c
+++ b/tools/libxc/xc_domain_save.c
@@ -1737,6 +1737,30 @@ int xc_domain_save(xc_interface *xch, int io_fd, 
uint32_t dom, uint32_t max_iter
             PERROR("Error when writing the viridian flag");
             goto out;
         }
+
+        chunk.id = XC_SAVE_ID_HVM_IOREQ_SERVER_PFN;
+        chunk.data = 0;
+        xc_get_hvm_param(xch, dom, HVM_PARAM_IOREQ_SERVER_PFN,
+                         (unsigned long *)&chunk.data);
+
+        if ( (chunk.data != 0) &&
+             wrexact(io_fd, &chunk, sizeof(chunk)) )
+        {
+            PERROR("Error when writing the ioreq server gmfn base");
+            goto out;
+        }
+
+        chunk.id = XC_SAVE_ID_HVM_NR_IOREQ_SERVER_PAGES;
+        chunk.data = 0;
+        xc_get_hvm_param(xch, dom, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+                         (unsigned long *)&chunk.data);
+
+        if ( (chunk.data != 0) &&
+             wrexact(io_fd, &chunk, sizeof(chunk)) )
+        {
+            PERROR("Error when writing the ioreq server gmfn count");
+            goto out;
+        }
     }
 
     if ( callbacks != NULL && callbacks->toolstack_save != NULL )
diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
index dd3b522..3564e8b 100644
--- a/tools/libxc/xc_hvm_build_x86.c
+++ b/tools/libxc/xc_hvm_build_x86.c
@@ -49,6 +49,9 @@
 #define NR_SPECIAL_PAGES     8
 #define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
 
+#define NR_IOREQ_SERVER_PAGES 8
+#define ioreq_server_pfn(x) (special_pfn(0) - NR_IOREQ_SERVER_PAGES + (x))
+
 #define VGA_HOLE_SIZE (0x20)
 
 static int modules_init(struct xc_hvm_build_args *args,
@@ -114,7 +117,7 @@ static void build_hvm_info(void *hvm_info_page, uint64_t 
mem_size,
     /* Memory parameters. */
     hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT;
     hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT;
-    hvm_info->reserved_mem_pgstart = special_pfn(0);
+    hvm_info->reserved_mem_pgstart = ioreq_server_pfn(0);
 
     /* Finish with the checksum. */
     for ( i = 0, sum = 0; i < hvm_info->length; i++ )
@@ -502,6 +505,31 @@ static int setup_guest(xc_interface *xch,
                      special_pfn(SPECIALPAGE_SHARING));
 
     /*
+     * Allocate and clear additional ioreq server pages. The default
+     * server will use the IOREQ and BUFIOREQ special pages above.
+     */
+    for ( i = 0; i < NR_IOREQ_SERVER_PAGES; i++ )
+    {
+        xen_pfn_t pfn = ioreq_server_pfn(i);
+
+        rc = xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &pfn);
+        if ( rc != 0 )
+        {
+            PERROR("Could not allocate %d'th ioreq server page.", i);
+            goto error_out;
+        }
+
+        if ( xc_clear_domain_page(xch, dom, pfn) )
+            goto error_out;
+    }
+
+    /* Tell the domain where the pages are and how many there are */
+    xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_SERVER_PFN,
+                     ioreq_server_pfn(0));
+    xc_set_hvm_param(xch, dom, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+                     NR_IOREQ_SERVER_PAGES);
+
+    /*
      * Identity-map page table is required for running with CR0.PG=0 when
      * using Intel EPT. Create a 32-bit non-PAE page directory of superpages.
      */
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index e3a32f2..3b0c678 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1801,6 +1801,48 @@ void xc_clear_last_error(xc_interface *xch);
 int xc_set_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned 
long value);
 int xc_get_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned 
long *value);
 
+/*
+ * IOREQ server API
+ */
+
+int xc_hvm_create_ioreq_server(xc_interface *xch,
+                               domid_t domid,
+                               ioservid_t *id);
+
+int xc_hvm_get_ioreq_server_info(xc_interface *xch,
+                                 domid_t domid,
+                                 ioservid_t id,
+                                 xen_pfn_t *ioreq_pfn,
+                                 xen_pfn_t *bufioreq_pfn,
+                                 evtchn_port_t *bufioreq_port);
+
+int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch,
+                                        domid_t domid,
+                                        ioservid_t id,
+                                        int is_mmio,
+                                        uint64_t start,
+                                        uint64_t end);
+
+int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch,
+                                            domid_t domid,
+                                            ioservid_t id,
+                                            int is_mmio,
+                                            uint64_t start);
+
+int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch,
+                                      domid_t domid,
+                                      ioservid_t id,
+                                      uint16_t bdf);
+
+int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch,
+                                          domid_t domid,
+                                          ioservid_t id,
+                                          uint16_t bdf);
+
+int xc_hvm_destroy_ioreq_server(xc_interface *xch,
+                                domid_t domid,
+                                ioservid_t id);
+
 /* HVM guest pass-through */
 int xc_assign_device(xc_interface *xch,
                      uint32_t domid,
@@ -2425,3 +2467,13 @@ int xc_kexec_load(xc_interface *xch, uint8_t type, 
uint16_t arch,
 int xc_kexec_unload(xc_interface *xch, int type);
 
 #endif /* XENCTRL_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libxc/xg_save_restore.h b/tools/libxc/xg_save_restore.h
index f859621..f1ec7f5 100644
--- a/tools/libxc/xg_save_restore.h
+++ b/tools/libxc/xg_save_restore.h
@@ -259,6 +259,8 @@
 #define XC_SAVE_ID_HVM_ACCESS_RING_PFN  -16
 #define XC_SAVE_ID_HVM_SHARING_RING_PFN -17
 #define XC_SAVE_ID_TOOLSTACK          -18 /* Optional toolstack specific info 
*/
+#define XC_SAVE_ID_HVM_IOREQ_SERVER_PFN -19
+#define XC_SAVE_ID_HVM_NR_IOREQ_SERVER_PAGES -20
 
 /*
 ** We process save/restore/migrate in batches of pages; the below
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4ecbede..5af01b0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -369,28 +369,39 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, 
struct vcpu *v)
 
 bool_t hvm_io_pending(struct vcpu *v)
 {
-    struct hvm_ioreq_server *s = v->domain->arch.hvm_domain.ioreq_server;
-    ioreq_t *p;
-
-    if ( !s )
-        return 0;
+    struct domain *d = v->domain;
+    struct list_head *entry;
+ 
+    list_for_each ( entry, &d->arch.hvm_domain.ioreq_server_list )
+    {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
+        ioreq_t *p = get_ioreq(s, v);
+ 
+        p = get_ioreq(s, v);
+        if ( p->state != STATE_IOREQ_NONE )
+            return 1;
+    }
 
-    p = get_ioreq(s, v);
-    return ( p->state != STATE_IOREQ_NONE );
+    return 0;
 }
 
 void hvm_do_resume(struct vcpu *v)
 {
     struct domain *d = v->domain;
-    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+    struct list_head *entry;
 
     check_wakeup_from_wait();
 
     if ( is_hvm_vcpu(v) )
         pt_restore_timer(v);
 
-    if ( s )
+    list_for_each ( entry, &d->arch.hvm_domain.ioreq_server_list )
     {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
         ioreq_t *p = get_ioreq(s, v);
 
         while ( p->state != STATE_IOREQ_NONE )
@@ -408,7 +419,8 @@ void hvm_do_resume(struct vcpu *v)
                                           (p->state != STATE_IOREQ_INPROCESS));
                 break;
             default:
-                gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", 
p->state);
+                gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n",
+                         p->state);
                 domain_crash(d);
                 return; /* bail */
             }
@@ -423,6 +435,31 @@ void hvm_do_resume(struct vcpu *v)
     }
 }
 
+static int hvm_alloc_ioreq_gmfn(struct domain *d, unsigned long *gmfn)
+{
+    unsigned int i;
+    int rc;
+
+    rc = -ENOMEM;
+    for ( i = 0; i < d->arch.hvm_domain.ioreq_gmfn_count; i++ )
+    {
+        if ( !test_and_set_bit(i, &d->arch.hvm_domain.ioreq_gmfn_mask) ) {
+            *gmfn = d->arch.hvm_domain.ioreq_gmfn_base + i;
+            rc = 0;
+            break;
+        }
+    }
+
+    return rc;
+}
+
+static void hvm_free_ioreq_gmfn(struct domain *d, unsigned long gmfn)
+{
+    unsigned int i = gmfn - d->arch.hvm_domain.ioreq_gmfn_base;
+
+    clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn_mask);
+}
+
 void destroy_ring_for_helper(
     void **_va, struct page_info *page)
 {
@@ -503,6 +540,7 @@ static int hvm_map_ioreq_page(
 
     iorp->va = va;
     iorp->page = page;
+    iorp->gmfn = gmfn;
 
     return 0;
 }
@@ -533,6 +571,83 @@ static int hvm_print_line(
     return X86EMUL_OKAY;
 }
 
+static int hvm_access_cf8(
+    int dir, uint32_t port, uint32_t bytes, uint32_t *val)
+{
+    struct vcpu *curr = current;
+    struct hvm_domain *hd = &curr->domain->arch.hvm_domain;
+    int rc;
+
+    BUG_ON(port < 0xcf8);
+    port -= 0xcf8;
+
+    spin_lock(&hd->pci_lock);
+
+    if ( dir == IOREQ_WRITE )
+    {
+        switch ( bytes )
+        {
+        case 4:
+            hd->pci_cf8 = *val;
+            break;
+
+        case 2:
+        {
+            uint32_t mask = 0xffff << (port * 8);
+            uint32_t subval = *val << (port * 8);
+
+            hd->pci_cf8 = (hd->pci_cf8 & ~mask) |
+                          (subval & mask);
+            break;
+        }
+            
+        case 1:
+        {
+            uint32_t mask = 0xff << (port * 8);
+            uint32_t subval = *val << (port * 8);
+
+            hd->pci_cf8 = (hd->pci_cf8 & ~mask) |
+                          (subval & mask);
+            break;
+        }
+
+        default:
+            break;
+        }
+
+        /* We always need to fall through to the catch all emulator */
+        rc = X86EMUL_UNHANDLEABLE;
+    }
+    else
+    {
+        switch ( bytes )
+        {
+        case 4:
+            *val = hd->pci_cf8;
+            rc = X86EMUL_OKAY;
+            break;
+
+        case 2:
+            *val = (hd->pci_cf8 >> (port * 8)) & 0xffff;
+            rc = X86EMUL_OKAY;
+            break;
+            
+        case 1:
+            *val = (hd->pci_cf8 >> (port * 8)) & 0xff;
+            rc = X86EMUL_OKAY;
+            break;
+
+        default:
+            rc = X86EMUL_UNHANDLEABLE;
+            break;
+        }
+    }
+
+    spin_unlock(&hd->pci_lock);
+
+    return rc;
+}
+
 static int handle_pvh_io(
     int dir, uint32_t port, uint32_t bytes, uint32_t *val)
 {
@@ -561,7 +676,7 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server 
*s,
 }
 
 static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
-                                     struct vcpu *v)
+                                     bool_t is_default, struct vcpu *v)
 {
     struct hvm_ioreq_vcpu *sv;
     int rc;
@@ -589,8 +704,9 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
             goto fail3;
 
         s->bufioreq_evtchn = rc;
-        d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
-            s->bufioreq_evtchn;
+        if ( is_default )
+            d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
+                s->bufioreq_evtchn;
     }
 
     sv->vcpu = v;
@@ -669,57 +785,90 @@ static void hvm_ioreq_server_remove_all_vcpus(struct 
hvm_ioreq_server *s)
     spin_unlock(&s->lock);
 }
 
-static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s)
+static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
+                                      bool_t is_default)
 {
     struct domain *d = s->domain;
-    unsigned long pfn;
+    unsigned long ioreq_pfn, bufioreq_pfn;
     int rc;
 
-    pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
-    rc = hvm_map_ioreq_page(d, &s->ioreq, pfn);
+    if ( is_default ) {
+        ioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+        bufioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
+    } else {
+        rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
+        if ( rc )
+            goto fail1;
+
+        rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
+        if ( rc )
+            goto fail2;
+    }
+
+    rc = hvm_map_ioreq_page(d, &s->ioreq, ioreq_pfn);
     if ( rc )
-        goto fail1;
+        goto fail3;
 
-    pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
-    rc = hvm_map_ioreq_page(d, &s->bufioreq, pfn);
+    rc = hvm_map_ioreq_page(d, &s->bufioreq, bufioreq_pfn);
     if ( rc )
-        goto fail2;
+        goto fail4;
 
     return 0;
 
-fail2:
+fail4:
     hvm_unmap_ioreq_page(&s->ioreq);
 
+fail3:
+    if ( !is_default )
+        hvm_free_ioreq_gmfn(d, bufioreq_pfn);
+
+fail2:
+    if ( !is_default )
+        hvm_free_ioreq_gmfn(d, ioreq_pfn);
+
 fail1:
     return rc;
 }
 
-static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
+static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s, 
+                                         bool_t is_default)
 {
+    struct domain *d = s->domain;
+
     hvm_unmap_ioreq_page(&s->bufioreq);
     hvm_unmap_ioreq_page(&s->ioreq);
+
+    if ( !is_default ) {
+        hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+        hvm_free_ioreq_gmfn(d, s->ioreq.gmfn);
+    }
 }
 
 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d,
-                                 domid_t domid)
+                                 domid_t domid, bool_t is_default,
+                                 ioservid_t id)
 {
     struct vcpu *v;
     int rc;
 
+    s->id = id;
     s->domain = d;
     s->domid = domid;
+    INIT_LIST_HEAD(&s->mmio_range_list);
+    INIT_LIST_HEAD(&s->portio_range_list);
+    INIT_LIST_HEAD(&s->pcidev_list);
 
     spin_lock_init(&s->lock);
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
     spin_lock_init(&s->bufioreq_lock);
 
-    rc = hvm_ioreq_server_map_pages(s);
+    rc = hvm_ioreq_server_map_pages(s, is_default);
     if ( rc )
         return rc;
 
     for_each_vcpu ( d, v )
     {
-        rc = hvm_ioreq_server_add_vcpu(s, v);
+        rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
         if ( rc )
             goto fail;
     }
@@ -728,18 +877,53 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server 
*s, struct domain *d,
 
  fail:
     hvm_ioreq_server_remove_all_vcpus(s);
-    hvm_ioreq_server_unmap_pages(s);
+    hvm_ioreq_server_unmap_pages(s, is_default);
 
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
     return rc;
 }
 
-static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
+static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s,
+                                    bool_t is_default)
 {
+    struct list_head *entry;
+
+    list_for_each ( entry,
+                    &s->mmio_range_list )
+    {
+        struct hvm_io_range *x = list_entry(entry,
+                                            struct hvm_io_range,
+                                            list_entry);
+
+        xfree(x);
+    }
+
+    list_for_each ( entry,
+                    &s->portio_range_list )
+    {
+        struct hvm_io_range *x = list_entry(entry,
+                                            struct hvm_io_range,
+                                            list_entry);
+
+        xfree(x);
+    }
+
+    list_for_each ( entry,
+                    &s->mmio_range_list )
+    {
+        struct hvm_pcidev *x = list_entry(entry,
+                                          struct hvm_pcidev,
+                                          list_entry);
+
+        xfree(x);
+    }
+
     hvm_ioreq_server_remove_all_vcpus(s);
-    hvm_ioreq_server_unmap_pages(s);
+    hvm_ioreq_server_unmap_pages(s, is_default);
 }
 
-static int hvm_create_ioreq_server(struct domain *d, domid_t domid)
+static int hvm_create_ioreq_server(struct domain *d, domid_t domid,
+                                   bool_t is_default, ioservid_t *id)
 {
     struct hvm_ioreq_server *s;
     int rc;
@@ -747,7 +931,7 @@ static int hvm_create_ioreq_server(struct domain *d, 
domid_t domid)
     spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
 
     rc = -EEXIST;
-    if ( d->arch.hvm_domain.ioreq_server != NULL )
+    if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
         goto fail1;
  
     rc = -ENOMEM;
@@ -757,14 +941,23 @@ static int hvm_create_ioreq_server(struct domain *d, 
domid_t domid)
 
     domain_pause(d);
 
-    rc = hvm_ioreq_server_init(s, d, domid);
+    rc = hvm_ioreq_server_init(s, d, domid, is_default,
+                               d->arch.hvm_domain.ioreq_server_id++);
     if ( rc )
         goto fail3;
 
-    d->arch.hvm_domain.ioreq_server = s;
+    list_add(&s->list_entry,
+             &d->arch.hvm_domain.ioreq_server_list);
+    d->arch.hvm_domain.ioreq_server_count++;
+
+    if ( is_default )
+        d->arch.hvm_domain.default_ioreq_server = s;
 
     domain_unpause(d);
 
+    if (id != NULL)
+        *id = s->id;
+
     spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
     return 0;
 
@@ -779,27 +972,363 @@ static int hvm_create_ioreq_server(struct domain *d, 
domid_t domid)
     return rc;
 }
 
-static void hvm_destroy_ioreq_server(struct domain *d)
+static void hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
+{
+    struct list_head *entry;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    list_for_each ( entry,
+                    &d->arch.hvm_domain.ioreq_server_list )
+    {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
+        bool_t is_default = ( s == d->arch.hvm_domain.default_ioreq_server);
+
+        if ( s->id != id )
+            continue;
+
+        domain_pause(d);
+
+        if ( is_default )
+            d->arch.hvm_domain.default_ioreq_server = NULL;
+
+        --d->arch.hvm_domain.ioreq_server_count;
+        list_del_init(&s->list_entry);
+        
+        hvm_ioreq_server_deinit(s, is_default);
+
+        domain_unpause(d);
+
+        xfree(s);
+
+        break;
+    }
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+}
+
+static int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
+                                     unsigned long *ioreq_pfn,
+                                     unsigned long *bufioreq_pfn,
+                                     evtchn_port_t *bufioreq_port)
+{
+    struct list_head *entry;
+    int rc;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    rc = -ENOENT;
+    list_for_each ( entry,
+                    &d->arch.hvm_domain.ioreq_server_list )
+    {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
+
+        if ( s->id != id )
+            continue;
+
+        *ioreq_pfn = s->ioreq.gmfn;
+        *bufioreq_pfn = s->bufioreq.gmfn;
+        *bufioreq_port = s->bufioreq_evtchn;
+
+        rc = 0;
+        break;
+    }
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    return rc;
+}
+
+static int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
+                                            int is_mmio, uint64_t start,
+                                            uint64_t end)
 {
     struct hvm_ioreq_server *s;
+    struct hvm_io_range *x;
+    struct list_head *list;
+    int rc;
+
+    x = xmalloc(struct hvm_io_range);
+    if ( x == NULL )
+        return -ENOMEM;
 
     spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
 
-    s = d->arch.hvm_domain.ioreq_server;
-    if ( !s )
-        goto done;
+    rc = -ENOENT;
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server_list,
+                          list_entry )
+    {
+        if ( s->id == id )
+            goto found;
+    }
 
-    d->arch.hvm_domain.ioreq_server = NULL;
+    goto fail;
 
-    domain_pause(d);
+ found:
+    INIT_RCU_HEAD(&x->rcu);
+    x->start = start;
+    x->end = end;
 
-    hvm_ioreq_server_deinit(s);
+    list = ( is_mmio ) ? &s->mmio_range_list : &s->portio_range_list;
+    list_add_rcu(&x->list_entry, list);
 
-    domain_unpause(d);
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
 
-    xfree(s);
+    return 0;
+
+ fail:
+    xfree(x);
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    return rc;
+}
+
+static void free_io_range(struct rcu_head *rcu)
+{
+    struct hvm_io_range *x;
+
+    x = container_of (rcu, struct hvm_io_range, rcu);
+
+    xfree(x);
+}
+
+static int hvm_unmap_io_range_from_ioreq_server(struct domain *d, 
+                                                ioservid_t id,
+                                                int is_mmio, uint64_t start)
+{
+    struct hvm_ioreq_server *s;
+    struct list_head *list, *entry;
+    int rc;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    rc = -ENOENT;
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server_list,
+                          list_entry )
+    {
+        if ( s->id == id )
+            goto found;
+    }
+
+    goto done;
+
+ found:
+    list = ( is_mmio ) ? &s->mmio_range_list : &s->portio_range_list;
+
+    list_for_each ( entry,
+                    list )
+    {
+        struct hvm_io_range *x = list_entry(entry,
+                                            struct hvm_io_range,
+                                            list_entry);
+
+        if ( start == x->start )
+        {
+            list_del_rcu(&x->list_entry);
+            call_rcu(&x->rcu, free_io_range);
+
+            rc = 0;
+            break;
+        }
+    }
+
+ done:
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    return rc;
+}
+
+static int hvm_map_pcidev_to_ioreq_server(struct domain *d, ioservid_t id,
+                                          uint16_t bdf)
+{
+    struct hvm_ioreq_server *s;
+    struct hvm_pcidev *x;
+    int rc;
+
+    x = xmalloc(struct hvm_pcidev);
+    if ( x == NULL )
+        return -ENOMEM;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    rc = -ENOENT;
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server_list,
+                          list_entry )
+    {
+        if ( s->id == id )
+            goto found;
+    }
+
+    goto fail;
+
+ found:
+    INIT_RCU_HEAD(&x->rcu);
+    x->bdf = bdf;
+
+    list_add_rcu(&x->list_entry, &s->pcidev_list);
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    return 0;
+
+ fail:
+    xfree(x);
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    return rc;
+}
+
+static void free_pcidev(struct rcu_head *rcu)
+{
+    struct hvm_pcidev *x;
+
+    x = container_of (rcu, struct hvm_pcidev, rcu);
+
+    xfree(x);
+}
+
+static int hvm_unmap_pcidev_from_ioreq_server(struct domain *d, ioservid_t id,
+                                              uint16_t bdf)
+{
+    struct hvm_ioreq_server *s;
+    struct list_head *entry;
+    int rc;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    rc = -ENOENT;
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server_list,
+                          list_entry )
+    {
+        if ( s->id == id )
+            goto found;
+    }
+
+    goto done;
+
+ found:
+    list_for_each ( entry,
+                    &s->pcidev_list )
+    {
+        struct hvm_pcidev *x = list_entry(entry,
+                                          struct hvm_pcidev,
+                                          list_entry);
+
+        if ( bdf == x->bdf )
+        {
+            list_del_rcu(&x->list_entry);
+            call_rcu(&x->rcu, free_pcidev);
+
+            rc = 0;
+            break;
+        }
+    }
+
+ done:
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    return rc;
+}
+
+static int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
+{
+    struct list_head *entry;
+    int rc;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    list_for_each ( entry,
+                    &d->arch.hvm_domain.ioreq_server_list )
+    {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
+        bool_t is_default = ( s == d->arch.hvm_domain.default_ioreq_server);
+
+        rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+        if ( rc )
+            goto fail;
+    }
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    return 0;
+
+ fail:
+    list_for_each ( entry,
+                    &d->arch.hvm_domain.ioreq_server_list )
+    {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
+
+        hvm_ioreq_server_remove_vcpu(s, v);
+    }
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    return rc;
+}
+
+static void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
+{
+    struct list_head *entry;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    list_for_each ( entry,
+                    &d->arch.hvm_domain.ioreq_server_list )
+    {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
+
+        hvm_ioreq_server_remove_vcpu(s, v);
+    }
+
+    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+}
+
+static void hvm_destroy_all_ioreq_servers(struct domain *d)
+{
+    struct list_head *entry, *next;
+
+    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+    list_for_each_safe ( entry,
+                         next,
+                         &d->arch.hvm_domain.ioreq_server_list )
+    {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
+        bool_t is_default = ( s == d->arch.hvm_domain.default_ioreq_server);
+
+        domain_pause(d);
+
+        if ( is_default )
+            d->arch.hvm_domain.default_ioreq_server = NULL;
+
+        --d->arch.hvm_domain.ioreq_server_count;
+        list_del_init(&s->list_entry);
+        
+        hvm_ioreq_server_deinit(s, is_default);
+
+        domain_unpause(d);
+
+        xfree(s);
+    }
 
- done:
     spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
 }
 
@@ -812,7 +1341,7 @@ static int hvm_set_ioreq_pfn(struct domain *d, bool_t buf,
 
     spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
 
-    s = d->arch.hvm_domain.ioreq_server;
+    s = d->arch.hvm_domain.default_ioreq_server;
     if ( !s )
         goto done;
 
@@ -870,7 +1399,7 @@ static int hvm_set_dm_domain(struct domain *d, domid_t 
domid)
 
     spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
 
-    s = d->arch.hvm_domain.ioreq_server;
+    s = d->arch.hvm_domain.default_ioreq_server;
     if ( !s )
         goto done;
 
@@ -943,6 +1472,8 @@ int hvm_domain_initialise(struct domain *d)
     }
 
     spin_lock_init(&d->arch.hvm_domain.ioreq_server_lock);
+    INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server_list);
+    spin_lock_init(&d->arch.hvm_domain.pci_lock);
     spin_lock_init(&d->arch.hvm_domain.irq_lock);
     spin_lock_init(&d->arch.hvm_domain.uc_lock);
 
@@ -984,6 +1515,7 @@ int hvm_domain_initialise(struct domain *d)
     rtc_init(d);
 
     register_portio_handler(d, 0xe9, 1, hvm_print_line);
+    register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
 
     rc = hvm_funcs.domain_initialise(d);
     if ( rc != 0 )
@@ -1014,7 +1546,7 @@ void hvm_domain_relinquish_resources(struct domain *d)
     if ( hvm_funcs.nhvm_domain_relinquish_resources )
         hvm_funcs.nhvm_domain_relinquish_resources(d);
 
-    hvm_destroy_ioreq_server(d);
+    hvm_destroy_all_ioreq_servers(d);
 
     msixtbl_pt_cleanup(d);
 
@@ -1647,7 +2179,6 @@ int hvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
     struct domain *d = v->domain;
-    struct hvm_ioreq_server *s;
 
     hvm_asid_flush_vcpu(v);
 
@@ -1690,14 +2221,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
          && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: 
nestedhvm_vcpu_destroy */
         goto fail5;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
-
-    s = d->arch.hvm_domain.ioreq_server;
-    if ( s )
-        rc = hvm_ioreq_server_add_vcpu(s, v);
-
-    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
-
+    rc = hvm_all_ioreq_servers_add_vcpu(d, v);
     if ( rc != 0 )
         goto fail6;
 
@@ -1734,15 +2258,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
 void hvm_vcpu_destroy(struct vcpu *v)
 {
     struct domain *d = v->domain;
-    struct hvm_ioreq_server *s;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
-
-    s = d->arch.hvm_domain.ioreq_server;
-    if ( s )
-        hvm_ioreq_server_remove_vcpu(s, v);
-
-    spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+    hvm_all_ioreq_servers_remove_vcpu(d, v);
 
     nestedhvm_vcpu_destroy(v);
 
@@ -1781,9 +2298,105 @@ void hvm_vcpu_down(struct vcpu *v)
     }
 }
 
-int hvm_buffered_io_send(struct domain *d, const ioreq_t *p)
+static DEFINE_RCU_READ_LOCK(ioreq_server_rcu_lock);
+
+static struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
+                                                        ioreq_t *p)
+{
+#define BDF(cf8) (((cf8) & 0x00ffff00) >> 8)
+
+    struct hvm_ioreq_server *s;
+    uint8_t type;
+    uint64_t addr;
+
+    if ( d->arch.hvm_domain.ioreq_server_count == 1 &&
+         d->arch.hvm_domain.default_ioreq_server )
+        goto done;
+
+    if ( p->type == IOREQ_TYPE_PIO &&
+         (p->addr & ~3) == 0xcfc )
+    { 
+        /* PCI config data cycle */
+        type = IOREQ_TYPE_PCI_CONFIG;
+
+        spin_lock(&d->arch.hvm_domain.pci_lock);
+        addr = d->arch.hvm_domain.pci_cf8 + (p->addr & 3);
+        spin_unlock(&d->arch.hvm_domain.pci_lock);
+    }
+    else
+    {
+        type = p->type;
+        addr = p->addr;
+    }
+
+    rcu_read_lock(&ioreq_server_rcu_lock);
+
+    switch ( type )
+    {
+    case IOREQ_TYPE_COPY:
+    case IOREQ_TYPE_PIO:
+    case IOREQ_TYPE_PCI_CONFIG:
+        break;
+    default:
+        goto done;
+    }
+
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server_list,
+                          list_entry )
+    {
+        switch ( type )
+        {
+            case IOREQ_TYPE_COPY:
+            case IOREQ_TYPE_PIO: {
+                struct list_head *list;
+                struct hvm_io_range *x;
+
+                list = ( type == IOREQ_TYPE_COPY ) ?
+                    &s->mmio_range_list :
+                    &s->portio_range_list;
+
+                list_for_each_entry ( x,
+                                      list,
+                                      list_entry )
+                {
+                    if ( (addr >= x->start) && (addr <= x->end) )
+                        goto found;
+                }
+                break;
+            }
+            case IOREQ_TYPE_PCI_CONFIG: {
+                struct hvm_pcidev *x;
+
+                list_for_each_entry ( x,
+                                      &s->pcidev_list,
+                                      list_entry )
+                {
+                    if ( BDF(addr) == x->bdf ) {
+                        p->type = type;
+                        p->addr = addr;
+                        goto found;
+                    }
+                }
+                break;
+            }
+        }
+    }
+
+ done:
+    s = d->arch.hvm_domain.default_ioreq_server;
+
+ found:
+    rcu_read_unlock(&ioreq_server_rcu_lock);
+
+    return s;
+
+#undef BDF
+}
+
+int hvm_buffered_io_send(struct domain *d, ioreq_t *p)
 {
-    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+    struct hvm_ioreq_server *s = hvm_select_ioreq_server(d, p);
     struct hvm_ioreq_page *iorp;
     buffered_iopage_t *pg;
     buf_ioreq_t bp;
@@ -1865,21 +2478,19 @@ int hvm_buffered_io_send(struct domain *d, const 
ioreq_t *p)
 
 bool_t hvm_has_dm(struct domain *d)
 {
-    return !!d->arch.hvm_domain.ioreq_server;
+    return !list_empty(&d->arch.hvm_domain.ioreq_server_list);
 }
 
-bool_t hvm_send_assist_req(struct vcpu *v, const ioreq_t *proto_p)
+bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
+                                           struct vcpu *v,
+                                           ioreq_t *proto_p)
 {
     struct domain *d = v->domain;
-    struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
     ioreq_t *p;
 
     if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
         return 0; /* implicitly bins the i/o operation */
 
-    if ( !s )
-        return 0;
-
     p = get_ioreq(s, v);
 
     if ( unlikely(p->state != STATE_IOREQ_NONE) )
@@ -1911,6 +2522,33 @@ bool_t hvm_send_assist_req(struct vcpu *v, const ioreq_t 
*proto_p)
     return 1;
 }
 
+bool_t hvm_send_assist_req(struct vcpu *v, ioreq_t *p)
+{
+    struct domain *d = v->domain;
+    struct hvm_ioreq_server *s = hvm_select_ioreq_server(d, p);
+
+    if ( !s )
+        return 0;
+
+    return hvm_send_assist_req_to_ioreq_server(s, v, p);
+}
+
+void hvm_broadcast_assist_req(struct vcpu *v, ioreq_t *p)
+{
+    struct domain *d = v->domain;
+    struct list_head *entry;
+
+    list_for_each ( entry,
+                    &d->arch.hvm_domain.ioreq_server_list )
+    {
+        struct hvm_ioreq_server *s = list_entry(entry,
+                                                struct hvm_ioreq_server,
+                                                list_entry);
+
+        (void) hvm_send_assist_req_to_ioreq_server(s, v, p);
+    }
+}
+
 void hvm_hlt(unsigned long rflags)
 {
     struct vcpu *curr = current;
@@ -4524,6 +5162,195 @@ static int hvmop_flush_tlb_all(void)
     return 0;
 }
 
+static int hvmop_create_ioreq_server(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
+{
+    struct domain *curr_d = current->domain;
+    xen_hvm_create_ioreq_server_t op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0, &op.id);
+    if ( rc != 0 )
+        goto out;
+
+    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
+    
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
+static int hvmop_get_ioreq_server_info(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
+{
+    xen_hvm_get_ioreq_server_info_t op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    if ( (rc = hvm_get_ioreq_server_info(d, op.id,
+                                         &op.ioreq_pfn,
+                                         &op.bufioreq_pfn, 
+                                         &op.bufioreq_port)) < 0 )
+        goto out;
+
+    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
+    
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
+static int hvmop_map_io_range_to_ioreq_server(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_map_io_range_to_ioreq_server_t) uop)
+{
+    xen_hvm_map_io_range_to_ioreq_server_t op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.is_mmio,
+                                          op.start, op.end);
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
+static int hvmop_unmap_io_range_from_ioreq_server(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_unmap_io_range_from_ioreq_server_t) uop)
+{
+    xen_hvm_unmap_io_range_from_ioreq_server_t op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.is_mmio,
+                                              op.start);
+    
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
+static int hvmop_map_pcidev_to_ioreq_server(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_map_pcidev_to_ioreq_server_t) uop)
+{
+    xen_hvm_map_pcidev_to_ioreq_server_t op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    rc = hvm_map_pcidev_to_ioreq_server(d, op.id, op.bdf);
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
+static int hvmop_unmap_pcidev_from_ioreq_server(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_unmap_pcidev_from_ioreq_server_t) uop)
+{
+    xen_hvm_unmap_pcidev_from_ioreq_server_t op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    rc = hvm_unmap_pcidev_from_ioreq_server(d, op.id, op.bdf);
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
+static int hvmop_destroy_ioreq_server(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
+{
+    xen_hvm_destroy_ioreq_server_t op;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    hvm_destroy_ioreq_server(d, op.id);
+    rc = 0;
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
 
 {
@@ -4532,6 +5359,41 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
     switch ( op )
     {
+    case HVMOP_create_ioreq_server:
+        rc = hvmop_create_ioreq_server(
+            guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
+        break;
+    
+    case HVMOP_get_ioreq_server_info:
+        rc = hvmop_get_ioreq_server_info(
+            guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
+        break;
+    
+    case HVMOP_map_io_range_to_ioreq_server:
+        rc = hvmop_map_io_range_to_ioreq_server(
+            guest_handle_cast(arg, xen_hvm_map_io_range_to_ioreq_server_t));
+        break;
+    
+    case HVMOP_unmap_io_range_from_ioreq_server:
+        rc = hvmop_unmap_io_range_from_ioreq_server(
+            guest_handle_cast(arg, 
xen_hvm_unmap_io_range_from_ioreq_server_t));
+        break;
+    
+    case HVMOP_map_pcidev_to_ioreq_server:
+        rc = hvmop_map_pcidev_to_ioreq_server(
+            guest_handle_cast(arg, xen_hvm_map_pcidev_to_ioreq_server_t));
+        break;
+    
+    case HVMOP_unmap_pcidev_from_ioreq_server:
+        rc = hvmop_unmap_pcidev_from_ioreq_server(
+            guest_handle_cast(arg, xen_hvm_unmap_pcidev_from_ioreq_server_t));
+        break;
+    
+    case HVMOP_destroy_ioreq_server:
+        rc = hvmop_destroy_ioreq_server(
+            guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
+        break;
+    
     case HVMOP_set_param:
     case HVMOP_get_param:
     {
@@ -4626,7 +5488,9 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                 if ( a.value == DOMID_SELF )
                     a.value = curr_d->domain_id;
 
-                rc = hvm_set_dm_domain(d, a.value);
+                rc = hvm_create_ioreq_server(d, a.value, 1, NULL);
+                if ( rc == -EEXIST )
+                    rc = hvm_set_dm_domain(d, a.value);
                 break;
             case HVM_PARAM_ACPI_S_STATE:
                 /* Not reflexive, as we must domain_pause(). */
@@ -4691,6 +5555,25 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                 if ( a.value > SHUTDOWN_MAX )
                     rc = -EINVAL;
                 break;
+            case HVM_PARAM_IOREQ_SERVER_PFN:
+                if ( d == current->domain ) {
+                    rc = -EPERM;
+                    break;
+                }
+                d->arch.hvm_domain.ioreq_gmfn_base = a.value;
+                break;
+            case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
+                if ( d == current->domain ) {
+                    rc = -EPERM;
+                    break;
+                }
+                if ( a.value == 0 ||
+                     a.value > sizeof(unsigned long) * 8 ) {
+                    rc = -EINVAL;
+                    break;
+                }
+                d->arch.hvm_domain.ioreq_gmfn_count = a.value;
+                break;
             }
 
             if ( rc == 0 ) 
@@ -4724,6 +5607,12 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             case HVM_PARAM_ACPI_S_STATE:
                 a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
                 break;
+            case HVM_PARAM_IOREQ_SERVER_PFN:
+            case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
+                if ( d == current->domain ) {
+                    rc = -EPERM;
+                    break;
+                }
             case HVM_PARAM_IOREQ_PFN:
             case HVM_PARAM_BUFIOREQ_PFN:
             case HVM_PARAM_BUFIOREQ_EVTCHN: {
@@ -4731,7 +5620,7 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                 
                 /* May need to create server */
                 domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
-                rc = hvm_create_ioreq_server(d, domid);
+                rc = hvm_create_ioreq_server(d, domid, 1, NULL);
                 if ( rc != 0 && rc != -EEXIST )
                     goto param_fail;
                 /*FALLTHRU*/
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 8db300d..8461cc3 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -74,7 +74,7 @@ void send_invalidate_req(void)
         .data = ~0UL, /* flush all */
     };
 
-    (void)hvm_send_assist_req(current, &p);
+    hvm_broadcast_assist_req(current, &p);
 }
 
 int handle_mmio(void)
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index b6911f9..d7a73ce 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -36,10 +36,23 @@
 #include <public/hvm/save.h>
 
 struct hvm_ioreq_page {
+    unsigned long gmfn;
     struct page_info *page;
     void *va;
 };
 
+struct hvm_io_range {
+    struct list_head    list_entry;
+    uint64_t            start, end;
+    struct rcu_head     rcu;
+};     
+
+struct hvm_pcidev {
+    struct list_head    list_entry;
+    uint16_t            bdf;
+    struct rcu_head     rcu;
+};     
+
 struct hvm_ioreq_vcpu {
     struct list_head list_entry;
     struct vcpu      *vcpu;
@@ -47,6 +60,9 @@ struct hvm_ioreq_vcpu {
 };
 
 struct hvm_ioreq_server {
+    struct list_head       list_entry;
+    ioservid_t             id;
+
     /* Lock to serialize toolstack modifications */
     spinlock_t             lock;
     struct domain          *domain;
@@ -60,11 +76,27 @@ struct hvm_ioreq_server {
     /* Lock to serialize access to buffered ioreq ring */
     spinlock_t             bufioreq_lock;
     evtchn_port_t          bufioreq_evtchn;
+    struct list_head       mmio_range_list;
+    struct list_head       portio_range_list;
+    struct list_head       pcidev_list;
 };
 
 struct hvm_domain {
+    /* Guest page range used for non-default ioreq servers */
+    unsigned long           ioreq_gmfn_base;
+    unsigned int            ioreq_gmfn_count;
+    unsigned long           ioreq_gmfn_mask;
+
+    /* Lock protects all other values in the following block */
     spinlock_t              ioreq_server_lock;
-    struct hvm_ioreq_server *ioreq_server;
+    ioservid_t              ioreq_server_id;
+    struct list_head        ioreq_server_list;
+    unsigned int            ioreq_server_count;
+    struct hvm_ioreq_server *default_ioreq_server;
+
+    /* Cached CF8 for guest PCI config cycles */
+    uint32_t                pci_cf8;
+    spinlock_t              pci_lock;
 
     struct pl_time         pl_time;
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 08a62ea..6c4530e 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -228,7 +228,8 @@ int prepare_ring_for_helper(struct domain *d, unsigned long 
gmfn,
                             struct page_info **_page, void **_va);
 void destroy_ring_for_helper(void **_va, struct page_info *page);
 
-bool_t hvm_send_assist_req(struct vcpu *v, const ioreq_t *p);
+bool_t hvm_send_assist_req(struct vcpu *v, ioreq_t *p);
+void hvm_broadcast_assist_req(struct vcpu *v, ioreq_t *p);
 
 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
 int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index bfd28c2..be6546d 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -92,7 +92,7 @@ static inline int hvm_buffered_io_intercept(ioreq_t *p)
 }
 
 int hvm_mmio_intercept(ioreq_t *p);
-int hvm_buffered_io_send(struct domain *d, const ioreq_t *p);
+int hvm_buffered_io_send(struct domain *d, ioreq_t *p);
 
 static inline void register_portio_handler(
     struct domain *d, unsigned long addr,
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index a9aab4b..c6ceea5 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -23,6 +23,7 @@
 
 #include "../xen.h"
 #include "../trace.h"
+#include "../event_channel.h"
 
 /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
 #define HVMOP_set_param           0
@@ -270,6 +271,75 @@ struct xen_hvm_inject_msi {
 typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
 
+typedef uint32_t ioservid_t;
+
+DEFINE_XEN_GUEST_HANDLE(ioservid_t);
+
+#define HVMOP_create_ioreq_server 17
+struct xen_hvm_create_ioreq_server {
+    domid_t domid;  /* IN - domain to be serviced */
+    ioservid_t id;  /* OUT - server id */
+};
+typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
+
+#define HVMOP_get_ioreq_server_info 18
+struct xen_hvm_get_ioreq_server_info {
+    domid_t domid;               /* IN - domain to be serviced */
+    ioservid_t id;               /* IN - server id */
+    xen_pfn_t ioreq_pfn;         /* OUT - sync ioreq pfn */
+    xen_pfn_t bufioreq_pfn;      /* OUT - buffered ioreq pfn */
+    evtchn_port_t bufioreq_port; /* OUT - buffered ioreq port */
+};
+typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
+
+#define HVMOP_map_io_range_to_ioreq_server 19
+struct xen_hvm_map_io_range_to_ioreq_server {
+    domid_t domid;                  /* IN - domain to be serviced */
+    ioservid_t id;                  /* IN - handle from 
HVMOP_register_ioreq_server */
+    int is_mmio;                    /* IN - MMIO or port IO? */
+    uint64_aligned_t start, end;    /* IN - inclusive start and end of range */
+};
+typedef struct xen_hvm_map_io_range_to_ioreq_server 
xen_hvm_map_io_range_to_ioreq_server_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_map_io_range_to_ioreq_server_t);
+
+#define HVMOP_unmap_io_range_from_ioreq_server 20
+struct xen_hvm_unmap_io_range_from_ioreq_server {
+    domid_t domid;          /* IN - domain to be serviced */
+    ioservid_t id;          /* IN - handle from HVMOP_register_ioreq_server */
+    uint8_t is_mmio;        /* IN - MMIO or port IO? */
+    uint64_aligned_t start; /* IN - start address of the range to remove */
+};
+typedef struct xen_hvm_unmap_io_range_from_ioreq_server 
xen_hvm_unmap_io_range_from_ioreq_server_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_unmap_io_range_from_ioreq_server_t);
+
+#define HVMOP_map_pcidev_to_ioreq_server 21
+struct xen_hvm_map_pcidev_to_ioreq_server {
+    domid_t domid;      /* IN - domain to be serviced */
+    ioservid_t id;      /* IN - handle from HVMOP_register_ioreq_server */
+    uint16_t bdf;       /* IN - PCI bus/dev/func */
+};
+typedef struct xen_hvm_map_pcidev_to_ioreq_server 
xen_hvm_map_pcidev_to_ioreq_server_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_map_pcidev_to_ioreq_server_t);
+
+#define HVMOP_unmap_pcidev_from_ioreq_server 22
+struct xen_hvm_unmap_pcidev_from_ioreq_server {
+    domid_t domid;      /* IN - domain to be serviced */
+    ioservid_t id;      /* IN - handle from HVMOP_register_ioreq_server */
+    uint16_t bdf;       /* IN - PCI bus/dev/func */
+};
+typedef struct xen_hvm_unmap_pcidev_from_ioreq_server 
xen_hvm_unmap_pcidev_from_ioreq_server_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_unmap_pcidev_from_ioreq_server_t);
+
+#define HVMOP_destroy_ioreq_server 23
+struct xen_hvm_destroy_ioreq_server {
+    domid_t domid;          /* IN - domain to be serviced */
+    ioservid_t id;          /* IN - server id */
+};
+typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
+
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
diff --git a/xen/include/public/hvm/ioreq.h b/xen/include/public/hvm/ioreq.h
index f05d130..e84fa75 100644
--- a/xen/include/public/hvm/ioreq.h
+++ b/xen/include/public/hvm/ioreq.h
@@ -34,6 +34,7 @@
 
 #define IOREQ_TYPE_PIO          0 /* pio */
 #define IOREQ_TYPE_COPY         1 /* mmio ops */
+#define IOREQ_TYPE_PCI_CONFIG   2 /* pci config ops */
 #define IOREQ_TYPE_TIMEOFFSET   7
 #define IOREQ_TYPE_INVALIDATE   8 /* mapcache */
 
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index 517a184..f830bdd 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -145,6 +145,9 @@
 /* SHUTDOWN_* action in case of a triple fault */
 #define HVM_PARAM_TRIPLE_FAULT_REASON 31
 
-#define HVM_NR_PARAMS          32
+#define HVM_PARAM_IOREQ_SERVER_PFN 32
+#define HVM_PARAM_NR_IOREQ_SERVER_PAGES 33
+
+#define HVM_NR_PARAMS          34
 
 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.