|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH-for-4.9 v1 2/8] dm_op: convert HVMOP_*ioreq_server*
NOTE: The definitions of HVM_IOREQSRV_BUFIOREQ_*, HVMOP_IO_RANGE_* and
HVMOP_PCI_SBDF have to persist for new interface versions as
they are already in use by callers of the libxc interface.
Suggested-by: Jan Beulich <jbeulich@xxxxxxxx>
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
--
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
tools/libxc/xc_domain.c | 204 ++++++++++++++----------------------
xen/arch/x86/hvm/dm.c | 55 ++++++++++
xen/arch/x86/hvm/hvm.c | 221 +--------------------------------------
xen/arch/x86/hvm/ioreq.c | 42 ++++----
xen/include/asm-x86/hvm/domain.h | 3 +-
xen/include/public/hvm/dm_op.h | 157 +++++++++++++++++++++++++++
xen/include/public/hvm/hvm_op.h | 40 ++++---
xen/include/xsm/dummy.h | 6 --
xen/include/xsm/xsm.h | 6 --
xen/xsm/dummy.c | 1 -
xen/xsm/flask/hooks.c | 6 --
11 files changed, 340 insertions(+), 401 deletions(-)
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 296b852..1cbe49d 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1417,24 +1417,22 @@ int xc_hvm_create_ioreq_server(xc_interface *xch,
int handle_bufioreq,
ioservid_t *id)
{
- DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
+ struct xen_dm_op op;
+ struct xen_dm_op_create_ioreq_server *data;
int rc;
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- return -1;
+ op.op = DMOP_create_ioreq_server;
+ data = &op.u.create_ioreq_server;
- arg->domid = domid;
- arg->handle_bufioreq = handle_bufioreq;
+ data->handle_bufioreq = handle_bufioreq;
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_create_ioreq_server,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ rc = do_dm_op(xch, domid, 1, &op, sizeof(op));
+ if ( rc )
+ return rc;
- *id = arg->id;
+ *id = data->id;
- xc_hypercall_buffer_free(xch, arg);
- return rc;
+ return 0;
}
int xc_hvm_get_ioreq_server_info(xc_interface *xch,
@@ -1444,84 +1442,65 @@ int xc_hvm_get_ioreq_server_info(xc_interface *xch,
xen_pfn_t *bufioreq_pfn,
evtchn_port_t *bufioreq_port)
{
- DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
+ struct xen_dm_op op;
+ struct xen_dm_op_get_ioreq_server_info *data;
int rc;
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- return -1;
+ op.op = DMOP_get_ioreq_server_info;
+ data = &op.u.get_ioreq_server_info;
- arg->domid = domid;
- arg->id = id;
+ data->id = id;
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_get_ioreq_server_info,
- HYPERCALL_BUFFER_AS_ARG(arg));
- if ( rc != 0 )
- goto done;
+ rc = do_dm_op(xch, domid, 1, &op, sizeof(op));
+ if ( rc )
+ return rc;
if ( ioreq_pfn )
- *ioreq_pfn = arg->ioreq_pfn;
+ *ioreq_pfn = data->ioreq_pfn;
if ( bufioreq_pfn )
- *bufioreq_pfn = arg->bufioreq_pfn;
+ *bufioreq_pfn = data->bufioreq_pfn;
if ( bufioreq_port )
- *bufioreq_port = arg->bufioreq_port;
+ *bufioreq_port = data->bufioreq_port;
-done:
- xc_hypercall_buffer_free(xch, arg);
- return rc;
+ return 0;
}
int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t domid,
ioservid_t id, int is_mmio,
uint64_t start, uint64_t end)
{
- DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
- int rc;
+ struct xen_dm_op op;
+ struct xen_dm_op_ioreq_server_range *data;
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- return -1;
-
- arg->domid = domid;
- arg->id = id;
- arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
- arg->start = start;
- arg->end = end;
+ op.op = DMOP_map_io_range_to_ioreq_server;
+ data = &op.u.map_io_range_to_ioreq_server;
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_map_io_range_to_ioreq_server,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ data->id = id;
+ data->type = is_mmio ? DMOP_IO_RANGE_MEMORY : DMOP_IO_RANGE_PORT;
+ data->start = start;
+ data->end = end;
- xc_hypercall_buffer_free(xch, arg);
- return rc;
+ return do_dm_op(xch, domid, 1, &op, sizeof(op));
}
int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
ioservid_t id, int is_mmio,
uint64_t start, uint64_t end)
{
- DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
- int rc;
+ struct xen_dm_op op;
+ struct xen_dm_op_ioreq_server_range *data;
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- return -1;
+ op.op = DMOP_unmap_io_range_from_ioreq_server;
+ data = &op.u.unmap_io_range_from_ioreq_server;
- arg->domid = domid;
- arg->id = id;
- arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
- arg->start = start;
- arg->end = end;
-
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_unmap_io_range_from_ioreq_server,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ data->id = id;
+ data->type = is_mmio ? DMOP_IO_RANGE_MEMORY : DMOP_IO_RANGE_PORT;
+ data->start = start;
+ data->end = end;
- xc_hypercall_buffer_free(xch, arg);
- return rc;
+ return do_dm_op(xch, domid, 1, &op, sizeof(op));
}
int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1529,37 +1508,30 @@ int xc_hvm_map_pcidev_to_ioreq_server(xc_interface
*xch, domid_t domid,
uint8_t bus, uint8_t device,
uint8_t function)
{
- DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
- int rc;
+ struct xen_dm_op op;
+ struct xen_dm_op_ioreq_server_range *data;
if (device > 0x1f || function > 0x7) {
errno = EINVAL;
return -1;
}
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- return -1;
+ op.op = DMOP_map_io_range_to_ioreq_server;
+ data = &op.u.map_io_range_to_ioreq_server;
- arg->domid = domid;
- arg->id = id;
- arg->type = HVMOP_IO_RANGE_PCI;
+ data->id = id;
+ data->type = DMOP_IO_RANGE_PCI;
/*
* The underlying hypercall will deal with ranges of PCI SBDF
* but, for simplicity, the API only uses singletons.
*/
- arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
- (uint64_t)bus,
- (uint64_t)device,
- (uint64_t)function);
-
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_map_io_range_to_ioreq_server,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ data->start = data->end = DMOP_PCI_SBDF((uint64_t)segment,
+ (uint64_t)bus,
+ (uint64_t)device,
+ (uint64_t)function);
- xc_hypercall_buffer_free(xch, arg);
- return rc;
+ return do_dm_op(xch, domid, 1, &op, sizeof(op));
}
int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1567,54 +1539,45 @@ int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface
*xch, domid_t domid,
uint8_t bus, uint8_t device,
uint8_t function)
{
- DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
- int rc;
+ struct xen_dm_op op;
+ struct xen_dm_op_ioreq_server_range *data;
if (device > 0x1f || function > 0x7) {
errno = EINVAL;
return -1;
}
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- return -1;
+ op.op = DMOP_unmap_io_range_from_ioreq_server;
+ data = &op.u.unmap_io_range_from_ioreq_server;
- arg->domid = domid;
- arg->id = id;
- arg->type = HVMOP_IO_RANGE_PCI;
- arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
- (uint64_t)bus,
- (uint64_t)device,
- (uint64_t)function);
-
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_unmap_io_range_from_ioreq_server,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ data->id = id;
+ data->type = DMOP_IO_RANGE_PCI;
- xc_hypercall_buffer_free(xch, arg);
- return rc;
+ /*
+ * The underlying hypercall will deal with ranges of PCI SBDF
+ * but, for simplicity, the API only uses singletons.
+ */
+ data->start = data->end = DMOP_PCI_SBDF((uint64_t)segment,
+ (uint64_t)bus,
+ (uint64_t)device,
+ (uint64_t)function);
+
+ return do_dm_op(xch, domid, 1, &op, sizeof(op));
}
int xc_hvm_destroy_ioreq_server(xc_interface *xch,
domid_t domid,
ioservid_t id)
{
- DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
- int rc;
-
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- return -1;
+ struct xen_dm_op op;
+ struct xen_dm_op_destroy_ioreq_server *data;
- arg->domid = domid;
- arg->id = id;
+ op.op = DMOP_destroy_ioreq_server;
+ data = &op.u.destroy_ioreq_server;
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_destroy_ioreq_server,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ data->id = id;
- xc_hypercall_buffer_free(xch, arg);
- return rc;
+ return do_dm_op(xch, domid, 1, &op, sizeof(op));
}
int xc_hvm_set_ioreq_server_state(xc_interface *xch,
@@ -1622,23 +1585,16 @@ int xc_hvm_set_ioreq_server_state(xc_interface *xch,
ioservid_t id,
int enabled)
{
- DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
- int rc;
+ struct xen_dm_op op;
+ struct xen_dm_op_set_ioreq_server_state *data;
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- return -1;
+ op.op = DMOP_set_ioreq_server_state;
+ data = &op.u.set_ioreq_server_state;
- arg->domid = domid;
- arg->id = id;
- arg->enabled = !!enabled;
+ data->id = id;
+ data->enabled = !!enabled;
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_set_ioreq_server_state,
- HYPERCALL_BUFFER_AS_ARG(arg));
-
- xc_hypercall_buffer_free(xch, arg);
- return rc;
+ return do_dm_op(xch, domid, 1, &op, sizeof(op));
}
int xc_domain_setdebugging(xc_interface *xch,
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index ba7b8f6..c718a76 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -102,6 +102,61 @@ long do_dm_op(domid_t domid,
switch ( op.op )
{
+ case DMOP_create_ioreq_server:
+ {
+ struct domain *curr_d = current->domain;
+ struct xen_dm_op_create_ioreq_server *data =
+ &op.u.create_ioreq_server;
+
+ rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
+ data->handle_bufioreq, &data->id);
+ break;
+ }
+ case DMOP_get_ioreq_server_info:
+ {
+ struct xen_dm_op_get_ioreq_server_info *data =
+ &op.u.get_ioreq_server_info;
+
+ rc = hvm_get_ioreq_server_info(d, data->id,
+ &data->ioreq_pfn,
+ &data->bufioreq_pfn,
+ &data->bufioreq_port);
+ break;
+ }
+ case DMOP_map_io_range_to_ioreq_server:
+ {
+ struct xen_dm_op_ioreq_server_range *data =
+ &op.u.map_io_range_to_ioreq_server;
+
+ rc = hvm_map_io_range_to_ioreq_server(d, data->id, data->type,
+ data->start, data->end);
+ break;
+ }
+ case DMOP_unmap_io_range_from_ioreq_server:
+ {
+ struct xen_dm_op_ioreq_server_range *data =
+ &op.u.unmap_io_range_from_ioreq_server;
+
+ rc = hvm_unmap_io_range_from_ioreq_server(d, data->id, data->type,
+ data->start, data->end);
+ break;
+ }
+ case DMOP_set_ioreq_server_state:
+ {
+ struct xen_dm_op_set_ioreq_server_state *data =
+ &op.u.set_ioreq_server_state;
+
+ rc = hvm_set_ioreq_server_state(d, data->id, !!data->enabled);
+ break;
+ }
+ case DMOP_destroy_ioreq_server:
+ {
+ struct xen_dm_op_destroy_ioreq_server *data =
+ &op.u.destroy_ioreq_server;
+
+ rc = hvm_destroy_ioreq_server(d, data->id);
+ break;
+ }
default:
rc = -EOPNOTSUPP;
break;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 25c32e6..b2a7772 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4789,195 +4789,6 @@ static int hvmop_flush_tlb_all(void)
return 0;
}
-static int hvmop_create_ioreq_server(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
-{
- struct domain *curr_d = current->domain;
- xen_hvm_create_ioreq_server_t op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_create_ioreq_server);
- if ( rc != 0 )
- goto out;
-
- rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
- op.handle_bufioreq, &op.id);
- if ( rc != 0 )
- goto out;
-
- rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
-static int hvmop_get_ioreq_server_info(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
-{
- xen_hvm_get_ioreq_server_info_t op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_get_ioreq_server_info);
- if ( rc != 0 )
- goto out;
-
- rc = hvm_get_ioreq_server_info(d, op.id,
- &op.ioreq_pfn,
- &op.bufioreq_pfn,
- &op.bufioreq_port);
- if ( rc != 0 )
- goto out;
-
- rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
-static int hvmop_map_io_range_to_ioreq_server(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
- xen_hvm_io_range_t op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
HVMOP_map_io_range_to_ioreq_server);
- if ( rc != 0 )
- goto out;
-
- rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.type,
- op.start, op.end);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
-static int hvmop_unmap_io_range_from_ioreq_server(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
- xen_hvm_io_range_t op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d,
HVMOP_unmap_io_range_from_ioreq_server);
- if ( rc != 0 )
- goto out;
-
- rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.type,
- op.start, op.end);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
-static int hvmop_set_ioreq_server_state(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
-{
- xen_hvm_set_ioreq_server_state_t op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_set_ioreq_server_state);
- if ( rc != 0 )
- goto out;
-
- rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
-static int hvmop_destroy_ioreq_server(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
-{
- xen_hvm_destroy_ioreq_server_t op;
- struct domain *d;
- int rc;
-
- if ( copy_from_guest(&op, uop, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(op.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_destroy_ioreq_server);
- if ( rc != 0 )
- goto out;
-
- rc = hvm_destroy_ioreq_server(d, op.id);
-
- out:
- rcu_unlock_domain(d);
- return rc;
-}
-
static int hvmop_set_evtchn_upcall_vector(
XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
{
@@ -5324,7 +5135,7 @@ static int hvmop_get_param(
/* May need to create server. */
domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
rc = hvm_create_ioreq_server(d, domid, 1,
- HVM_IOREQSRV_BUFIOREQ_LEGACY, NULL);
+ DMOP_BUFIOREQ_LEGACY, NULL);
if ( rc != 0 && rc != -EEXIST )
goto out;
}
@@ -5687,36 +5498,6 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
start_iter = op & ~mask;
switch ( op &= mask )
{
- case HVMOP_create_ioreq_server:
- rc = hvmop_create_ioreq_server(
- guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
- break;
-
- case HVMOP_get_ioreq_server_info:
- rc = hvmop_get_ioreq_server_info(
- guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
- break;
-
- case HVMOP_map_io_range_to_ioreq_server:
- rc = hvmop_map_io_range_to_ioreq_server(
- guest_handle_cast(arg, xen_hvm_io_range_t));
- break;
-
- case HVMOP_unmap_io_range_from_ioreq_server:
- rc = hvmop_unmap_io_range_from_ioreq_server(
- guest_handle_cast(arg, xen_hvm_io_range_t));
- break;
-
- case HVMOP_set_ioreq_server_state:
- rc = hvmop_set_ioreq_server_state(
- guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
- break;
-
- case HVMOP_destroy_ioreq_server:
- rc = hvmop_destroy_ioreq_server(
- guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
- break;
-
case HVMOP_set_evtchn_upcall_vector:
rc = hvmop_set_evtchn_upcall_vector(
guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index d2245e2..c11e7a0 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -513,9 +513,9 @@ static int hvm_ioreq_server_alloc_rangesets(struct
hvm_ioreq_server *s,
char *name;
rc = asprintf(&name, "ioreq_server %d %s", s->id,
- (i == HVMOP_IO_RANGE_PORT) ? "port" :
- (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
- (i == HVMOP_IO_RANGE_PCI) ? "pci" :
+ (i == DMOP_IO_RANGE_PORT) ? "port" :
+ (i == DMOP_IO_RANGE_MEMORY) ? "memory" :
+ (i == DMOP_IO_RANGE_PCI) ? "pci" :
"");
if ( rc )
goto fail;
@@ -617,11 +617,11 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server
*s,
if ( rc )
return rc;
- if ( bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC )
+ if ( bufioreq_handling == DMOP_BUFIOREQ_ATOMIC )
s->bufioreq_atomic = 1;
rc = hvm_ioreq_server_setup_pages(
- s, is_default, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
+ s, is_default, bufioreq_handling != DMOP_BUFIOREQ_OFF);
if ( rc )
goto fail_map;
@@ -686,7 +686,7 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
struct hvm_ioreq_server *s;
int rc;
- if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
+ if ( bufioreq_handling > DMOP_BUFIOREQ_ATOMIC )
return -EINVAL;
rc = -ENOMEM;
@@ -833,9 +833,9 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d,
ioservid_t id,
switch ( type )
{
- case HVMOP_IO_RANGE_PORT:
- case HVMOP_IO_RANGE_MEMORY:
- case HVMOP_IO_RANGE_PCI:
+ case DMOP_IO_RANGE_PORT:
+ case DMOP_IO_RANGE_MEMORY:
+ case DMOP_IO_RANGE_PCI:
r = s->range[type];
break;
@@ -885,9 +885,9 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d,
ioservid_t id,
switch ( type )
{
- case HVMOP_IO_RANGE_PORT:
- case HVMOP_IO_RANGE_MEMORY:
- case HVMOP_IO_RANGE_PCI:
+ case DMOP_IO_RANGE_PORT:
+ case DMOP_IO_RANGE_MEMORY:
+ case DMOP_IO_RANGE_PCI:
r = s->range[type];
break;
@@ -1128,12 +1128,12 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct
domain *d,
/* PCI config data cycle */
- sbdf = HVMOP_PCI_SBDF(0,
- PCI_BUS(CF8_BDF(cf8)),
- PCI_SLOT(CF8_BDF(cf8)),
- PCI_FUNC(CF8_BDF(cf8)));
+ sbdf = DMOP_PCI_SBDF(0,
+ PCI_BUS(CF8_BDF(cf8)),
+ PCI_SLOT(CF8_BDF(cf8)),
+ PCI_FUNC(CF8_BDF(cf8)));
- type = HVMOP_IO_RANGE_PCI;
+ type = DMOP_IO_RANGE_PCI;
addr = ((uint64_t)sbdf << 32) |
CF8_ADDR_LO(cf8) |
(p->addr & 3);
@@ -1152,7 +1152,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct
domain *d,
else
{
type = (p->type == IOREQ_TYPE_PIO) ?
- HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
+ DMOP_IO_RANGE_PORT : DMOP_IO_RANGE_MEMORY;
addr = p->addr;
}
@@ -1174,19 +1174,19 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct
domain *d,
{
unsigned long end;
- case HVMOP_IO_RANGE_PORT:
+ case DMOP_IO_RANGE_PORT:
end = addr + p->size - 1;
if ( rangeset_contains_range(r, addr, end) )
return s;
break;
- case HVMOP_IO_RANGE_MEMORY:
+ case DMOP_IO_RANGE_MEMORY:
end = addr + (p->size * p->count) - 1;
if ( rangeset_contains_range(r, addr, end) )
return s;
break;
- case HVMOP_IO_RANGE_PCI:
+ case DMOP_IO_RANGE_PCI:
if ( rangeset_contains_singleton(r, addr >> 32) )
{
p->type = IOREQ_TYPE_PCI_CONFIG;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index f34d784..894c01d 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -33,6 +33,7 @@
#include <public/hvm/params.h>
#include <public/hvm/save.h>
#include <public/hvm/hvm_op.h>
+#include <public/hvm/dm_op.h>
struct hvm_ioreq_page {
unsigned long gmfn;
@@ -47,7 +48,7 @@ struct hvm_ioreq_vcpu {
bool_t pending;
};
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (DMOP_IO_RANGE_PCI + 1)
#define MAX_NR_IO_RANGES 256
struct hvm_ioreq_server {
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index 3eb37d6..dc1d2ad 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -27,11 +27,168 @@
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#include "../xen.h"
+#include "../event_channel.h"
#define DMOP_invalid 0
+/*
+ * IOREQ Servers
+ *
+ * The interface between an I/O emulator an Xen is called an IOREQ Server.
+ * A domain supports a single 'legacy' IOREQ Server which is instantiated if
+ * parameter...
+ *
+ * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * ioreq structures), or...
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * ioreq ring), or...
+ * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
+ * to request buffered I/O emulation).
+ *
+ * The following hypercalls facilitate the creation of IOREQ Servers for
+ * 'secondary' emulators which are invoked to implement port I/O, memory, or
+ * PCI config space ranges which they explicitly register.
+ */
+
+typedef uint16_t ioservid_t;
+
+/*
+ * DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
+ * emulator servicing domain <domid>.
+ *
+ * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
+ * the buffered ioreq ring will not be allocated and hence all emulation
+ * requestes to this server will be synchronous.
+ */
+#define DMOP_create_ioreq_server 1
+
+struct xen_dm_op_create_ioreq_server {
+ /* IN - should server handle buffered ioreqs */
+ uint8_t handle_bufioreq;
+#define DMOP_BUFIOREQ_OFF 0
+#define DMOP_BUFIOREQ_LEGACY 1
+/*
+ * Use this when read_pointer gets updated atomically and
+ * the pointer pair gets read atomically:
+ */
+#define DMOP_BUFIOREQ_ATOMIC 2
+ uint8_t __pad[3];
+ /* OUT - server id */
+ ioservid_t id;
+};
+
+/*
+ * DMOP_get_ioreq_server_info: Get all the information necessary to access
+ * IOREQ Server <id>.
+ *
+ * The emulator needs to map the synchronous ioreq structures and buffered
+ * ioreq ring (if it exists) that Xen uses to request emulation. These are
+ * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * respectively. In addition, if the IOREQ Server is handling buffered
+ * emulation requests, the emulator needs to bind to event channel
+ * <bufioreq_port> to listen for them. (The event channels used for
+ * synchronous emulation requests are specified in the per-CPU ioreq
+ * structures in <ioreq_pfn>).
+ * If the IOREQ Server is not handling buffered emulation requests then the
+ * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ */
+#define DMOP_get_ioreq_server_info 2
+
+struct xen_dm_op_get_ioreq_server_info {
+ /* IN - server id */
+ ioservid_t id;
+ uint16_t __pad;
+ /* OUT - buffered ioreq port */
+ evtchn_port_t bufioreq_port;
+ /* OUT - sync ioreq pfn */
+ uint64_aligned_t ioreq_pfn;
+ /* OUT - buffered ioreq pfn */
+ uint64_aligned_t bufioreq_pfn;
+};
+
+/*
+ * DMOP_map_io_range_to_ioreq_server: Register an I/O range of domain
+ * <domid> for emulation by the client
+ * of IOREQ Server <id>
+ * DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
+ * for emulation by the client of
+ * IOREQ Server <id>
+ *
+ * There are three types of I/O that can be emulated: port I/O, memory
+ * accesses and PCI config space accesses. The <type> field denotes which
+ * type of range* the <start> and <end> (inclusive) fields are specifying.
+ * PCI config space ranges are specified by segment/bus/device/function
+ * values which should be encoded using the HVMOP_PCI_SBDF helper macro
+ * below.
+ *
+ * NOTE: unless an emulation request falls entirely within a range mapped
+ * by a secondary emulator, it will not be passed to that emulator.
+ */
+#define DMOP_map_io_range_to_ioreq_server 3
+#define DMOP_unmap_io_range_from_ioreq_server 4
+
+struct xen_dm_op_ioreq_server_range {
+ /* IN - server id */
+ ioservid_t id;
+ uint16_t __pad;
+ /* IN - type of range */
+ uint32_t type;
+# define DMOP_IO_RANGE_PORT 0 /* I/O port range */
+# define DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
+# define DMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
+ /* IN - inclusive start and end of range */
+ uint64_aligned_t start, end;
+};
+
+#define DMOP_PCI_SBDF(s,b,d,f) \
+ ((((s) & 0xffff) << 16) | \
+ (((b) & 0xff) << 8) | \
+ (((d) & 0x1f) << 3) | \
+ ((f) & 0x07))
+
+/*
+ * DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
+ * servicing domain <domid>.
+ *
+ * The IOREQ Server will not be passed any emulation requests until it is
+ * in the enabled state.
+ * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
+ * is in the enabled state.
+ */
+#define DMOP_set_ioreq_server_state 5
+
+struct xen_dm_op_set_ioreq_server_state {
+ /* IN - server id */
+ ioservid_t id;
+ uint16_t __pad;
+ /* IN - enabled? */
+ uint8_t enabled;
+};
+
+/*
+ * DMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
+ * <domid>.
+ *
+ * Any registered I/O ranges will be automatically deregistered.
+ */
+#define DMOP_destroy_ioreq_server 6
+
+struct xen_dm_op_destroy_ioreq_server {
+ /* IN - server id */
+ ioservid_t id;
+};
+
struct xen_dm_op {
uint32_t op;
+ union {
+ struct xen_dm_op_create_ioreq_server create_ioreq_server;
+ struct xen_dm_op_get_ioreq_server_info get_ioreq_server_info;
+ struct xen_dm_op_ioreq_server_range map_io_range_to_ioreq_server;
+ struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server;
+ struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
+ struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
+ } u;
};
struct xen_dm_op_buf {
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index b3e45cf..cf5e59a 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -26,6 +26,7 @@
#include "../xen.h"
#include "../trace.h"
#include "../event_channel.h"
+#include "dm_op.h"
/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
#define HVMOP_set_param 0
@@ -242,6 +243,8 @@ struct xen_hvm_inject_msi {
typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
+#if __XEN_INTERFACE_VERSION__ < 0x00040900
+
/*
* IOREQ Servers
*
@@ -274,13 +277,6 @@ typedef uint16_t ioservid_t;
#define HVMOP_create_ioreq_server 17
struct xen_hvm_create_ioreq_server {
domid_t domid; /* IN - domain to be serviced */
-#define HVM_IOREQSRV_BUFIOREQ_OFF 0
-#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
-/*
- * Use this when read_pointer gets updated atomically and
- * the pointer pair gets read atomically:
- */
-#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
ioservid_t id; /* OUT - server id */
};
@@ -336,20 +332,11 @@ struct xen_hvm_io_range {
domid_t domid; /* IN - domain to be serviced */
ioservid_t id; /* IN - server id */
uint32_t type; /* IN - type of range */
-# define HVMOP_IO_RANGE_PORT 0 /* I/O port range */
-# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
-# define HVMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
uint64_aligned_t start, end; /* IN - inclusive start and end of range */
};
typedef struct xen_hvm_io_range xen_hvm_io_range_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
-#define HVMOP_PCI_SBDF(s,b,d,f) \
- ((((s) & 0xffff) << 16) | \
- (((b) & 0xff) << 8) | \
- (((d) & 0x1f) << 3) | \
- ((f) & 0x07))
-
/*
* HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
* <domid>.
@@ -383,6 +370,27 @@ struct xen_hvm_set_ioreq_server_state {
typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
+
+/*
+ * Definitions relating to HVMOP/DMOP_create_ioreq_server.
+ */
+
+#define HVM_IOREQSRV_BUFIOREQ_OFF DMOP_BUFIOREQ_OFF
+#define HVM_IOREQSRV_BUFIOREQ_LEGACY DMOP_BUFIOREQ_LEGACY
+#define HVM_IOREQSRV_BUFIOREQ_ATOMIC DMOP_BUFIOREQ_ATOMIC
+
+/*
+ * Definitions relating to HVMOP/DMOP_map_io_range_to_ioreq_server and
+ * HVMOP/DMOP_unmap_io_range_from_ioreq_server
+ */
+
+#define HVMOP_IO_RANGE_PORT DMOP_IO_RANGE_PORT
+#define HVMOP_IO_RANGE_MEMORY DMOP_IO_RANGE_MEMORY
+#define HVMOP_IO_RANGE_PCI DMOP_IO_RANGE_PCI
+
+#define HVMOP_PCI_SBDF DMOP_PCI_SBDF
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#if defined(__i386__) || defined(__x86_64__)
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index 711318e..b7d3173 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -634,12 +634,6 @@ static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG
struct domain *d)
return xsm_default_action(action, current->domain, d);
}
-static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d,
int op)
-{
- XSM_ASSERT_ACTION(XSM_DM_PRIV);
- return xsm_default_action(action, current->domain, d);
-}
-
static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d,
struct domain *cd, int op)
{
XSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index c94c1a2..0bcde39 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -166,7 +166,6 @@ struct xsm_operations {
int (*hvm_set_isa_irq_level) (struct domain *d);
int (*hvm_set_pci_link_route) (struct domain *d);
int (*hvm_inject_msi) (struct domain *d);
- int (*hvm_ioreq_server) (struct domain *d, int op);
int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
int (*apic) (struct domain *d, int cmd);
int (*memtype) (uint32_t access);
@@ -656,11 +655,6 @@ static inline int xsm_hvm_inject_msi (xsm_default_t def,
struct domain *d)
return xsm_ops->hvm_inject_msi(d);
}
-static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d,
int op)
-{
- return xsm_ops->hvm_ioreq_server(d, op);
-}
-
static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d,
struct domain *cd, int op)
{
return xsm_ops->mem_sharing_op(d, cd, op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index a082b28..d544ec1 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -149,7 +149,6 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
set_to_dummy_if_null(ops, hvm_set_pci_link_route);
set_to_dummy_if_null(ops, hvm_inject_msi);
- set_to_dummy_if_null(ops, hvm_ioreq_server);
set_to_dummy_if_null(ops, mem_sharing_op);
set_to_dummy_if_null(ops, apic);
set_to_dummy_if_null(ops, machine_memory_map);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index d24bc01..d60c96d 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1522,11 +1522,6 @@ static int flask_hvm_inject_msi(struct domain *d)
return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
}
-static int flask_hvm_ioreq_server(struct domain *d, int op)
-{
- return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
-}
-
static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
{
int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
@@ -1805,7 +1800,6 @@ static struct xsm_operations flask_ops = {
.hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
.hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
.hvm_inject_msi = flask_hvm_inject_msi,
- .hvm_ioreq_server = flask_hvm_ioreq_server,
.mem_sharing_op = flask_mem_sharing_op,
.apic = flask_apic,
.machine_memory_map = flask_machine_memory_map,
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |