|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 7/8] ioreq-server: make buffered ioreq handling optional
Some emulators will only register regions that require non-buffered
access. (In practice the only region that a guest uses buffered access
for today is the VGA aperture from 0xa0000-0xbffff). This patch therefore
makes allocation of the buffered ioreq page and event channel optional for
secondary ioreq servers.
If a guest attempts buffered access to an ioreq server that does not
support it, the access will be handled via the normal synchronous path.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
---
tools/libxc/xc_domain.c | 2 ++
tools/libxc/xenctrl.h | 1 +
xen/arch/x86/hvm/hvm.c | 74 +++++++++++++++++++++++++++------------
xen/include/public/hvm/hvm_op.h | 5 +--
4 files changed, 58 insertions(+), 24 deletions(-)
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 67829c5..6eacce6 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1286,6 +1286,7 @@ int xc_get_hvm_param(xc_interface *handle, domid_t dom,
int param, unsigned long
int xc_hvm_create_ioreq_server(xc_interface *xch,
domid_t domid,
+ int handle_bufioreq,
ioservid_t *id)
{
DECLARE_HYPERCALL;
@@ -1300,6 +1301,7 @@ int xc_hvm_create_ioreq_server(xc_interface *xch,
hypercall.arg[0] = HVMOP_create_ioreq_server;
hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
arg->domid = domid;
+ arg->handle_bufioreq = handle_bufioreq;
rc = do_xen_hypercall(xch, &hypercall);
*id = arg->id;
xc_hypercall_buffer_free(xch, arg);
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 1f8d490..cc0dab9 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1807,6 +1807,7 @@ int xc_get_hvm_param(xc_interface *handle, domid_t dom,
int param, unsigned long
int xc_hvm_create_ioreq_server(xc_interface *xch,
domid_t domid,
+ int handle_bufioreq,
ioservid_t *id);
int xc_hvm_get_ioreq_server_info(xc_interface *xch,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ba9b304..6a117e8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -709,7 +709,7 @@ static int hvm_ioreq_server_add_vcpu(struct
hvm_ioreq_server *s,
sv->ioreq_evtchn = rc;
- if ( v->vcpu_id == 0 )
+ if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
{
struct domain *d = s->domain;
@@ -761,7 +761,7 @@ static void hvm_ioreq_server_remove_vcpu(struct
hvm_ioreq_server *s,
list_del_init(&sv->list_entry);
- if ( v->vcpu_id == 0 )
+ if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
free_xen_event_channel(v, s->bufioreq_evtchn);
free_xen_event_channel(v, sv->ioreq_evtchn);
@@ -788,7 +788,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct
hvm_ioreq_server *s)
list_del_init(&sv->list_entry);
- if ( v->vcpu_id == 0 )
+ if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
free_xen_event_channel(v, s->bufioreq_evtchn);
free_xen_event_channel(v, sv->ioreq_evtchn);
@@ -800,7 +800,7 @@ static void hvm_ioreq_server_remove_all_vcpus(struct
hvm_ioreq_server *s)
}
static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
- bool_t is_default)
+ bool_t is_default, bool_t
handle_bufioreq)
{
struct domain *d = s->domain;
unsigned long ioreq_pfn, bufioreq_pfn;
@@ -808,24 +808,34 @@ static int hvm_ioreq_server_map_pages(struct
hvm_ioreq_server *s,
if ( is_default ) {
ioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+
+ /*
+ * The default ioreq server must handle buffered ioreqs, for
+ * backwards compatibility.
+ */
+ ASSERT(handle_bufioreq);
bufioreq_pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
} else {
rc = hvm_alloc_ioreq_gmfn(d, &ioreq_pfn);
if ( rc )
goto fail1;
- rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
- if ( rc )
- goto fail2;
+ if ( handle_bufioreq ) {
+ rc = hvm_alloc_ioreq_gmfn(d, &bufioreq_pfn);
+ if ( rc )
+ goto fail2;
+ }
}
rc = hvm_map_ioreq_page(d, &s->ioreq, ioreq_pfn);
if ( rc )
goto fail3;
- rc = hvm_map_ioreq_page(d, &s->bufioreq, bufioreq_pfn);
- if ( rc )
- goto fail4;
+ if ( handle_bufioreq ) {
+ rc = hvm_map_ioreq_page(d, &s->bufioreq, bufioreq_pfn);
+ if ( rc )
+ goto fail4;
+ }
return 0;
@@ -833,7 +843,7 @@ fail4:
hvm_unmap_ioreq_page(&s->ioreq);
fail3:
- if ( !is_default )
+ if ( !is_default && handle_bufioreq )
hvm_free_ioreq_gmfn(d, bufioreq_pfn);
fail2:
@@ -848,12 +858,17 @@ static void hvm_ioreq_server_unmap_pages(struct
hvm_ioreq_server *s,
bool_t is_default)
{
struct domain *d = s->domain;
+ bool_t handle_bufioreq = ( s->bufioreq.va != NULL );
+
+ if ( handle_bufioreq )
+ hvm_unmap_ioreq_page(&s->bufioreq);
- hvm_unmap_ioreq_page(&s->bufioreq);
hvm_unmap_ioreq_page(&s->ioreq);
if ( !is_default ) {
- hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+ if ( handle_bufioreq )
+ hvm_free_ioreq_gmfn(d, s->bufioreq.gmfn);
+
hvm_free_ioreq_gmfn(d, s->ioreq.gmfn);
}
}
@@ -880,7 +895,7 @@ static void hvm_ioreq_server_disable(struct
hvm_ioreq_server *s)
static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d,
domid_t domid, bool_t is_default,
- ioservid_t id)
+ bool_t handle_bufioreq, ioservid_t id)
{
struct vcpu *v;
int rc;
@@ -896,7 +911,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server
*s, struct domain *d,
INIT_LIST_HEAD(&s->ioreq_vcpu_list);
spin_lock_init(&s->bufioreq_lock);
- rc = hvm_ioreq_server_map_pages(s, is_default);
+ rc = hvm_ioreq_server_map_pages(s, is_default, handle_bufioreq);
if ( rc )
return rc;
@@ -960,7 +975,8 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server
*s,
}
static int hvm_create_ioreq_server(struct domain *d, domid_t domid,
- bool_t is_default, ioservid_t *id)
+ bool_t is_default, bool_t handle_bufioreq,
+ ioservid_t *id)
{
struct hvm_ioreq_server *s;
int rc;
@@ -978,7 +994,7 @@ static int hvm_create_ioreq_server(struct domain *d,
domid_t domid,
domain_pause(d);
- rc = hvm_ioreq_server_init(s, d, domid, is_default,
+ rc = hvm_ioreq_server_init(s, d, domid, is_default, handle_bufioreq,
d->arch.hvm_domain.ioreq_server_id++);
if ( rc )
goto fail3;
@@ -1070,8 +1086,11 @@ static int hvm_get_ioreq_server_info(struct domain *d,
ioservid_t id,
continue;
*ioreq_pfn = s->ioreq.gmfn;
- *bufioreq_pfn = s->bufioreq.gmfn;
- *bufioreq_port = s->bufioreq_evtchn;
+
+ if ( s->bufioreq.va != NULL ) {
+ *bufioreq_pfn = s->bufioreq.gmfn;
+ *bufioreq_port = s->bufioreq_evtchn;
+ }
rc = 0;
break;
@@ -1425,6 +1444,13 @@ static int hvm_set_ioreq_pfn(struct domain *d, bool_t
buf,
spin_lock(&s->lock);
iorp = buf ? &s->bufioreq : &s->ioreq;
+
+ /*
+ * There must already be mapped page, set up when the
+ * ioreq server was created.
+ */
+ hvm_unmap_ioreq_page(iorp);
+
rc = hvm_map_ioreq_page(d, iorp, pfn);
if ( rc )
goto fail;
@@ -2493,6 +2519,9 @@ int hvm_buffered_io_send(struct domain *d, ioreq_t *p)
iorp = &s->bufioreq;
pg = iorp->va;
+ if ( !pg )
+ return 0;
+
/*
* Return 0 for the cases we can't deal with:
* - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
@@ -5262,7 +5291,8 @@ static int hvmop_create_ioreq_server(
if ( !is_hvm_domain(d) )
goto out;
- rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0, &op.id);
+ rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0, op.handle_bufioreq,
+ &op.id);
if ( rc != 0 )
goto out;
@@ -5599,7 +5629,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( a.value == DOMID_SELF )
a.value = curr_d->domain_id;
- rc = hvm_create_ioreq_server(d, a.value, 1, NULL);
+ rc = hvm_create_ioreq_server(d, a.value, 1, 1, NULL);
if ( rc == -EEXIST )
rc = hvm_set_dm_domain(d, a.value);
break;
@@ -5731,7 +5761,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
/* May need to create server */
domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
- rc = hvm_create_ioreq_server(d, domid, 1, NULL);
+ rc = hvm_create_ioreq_server(d, domid, 1, 1, NULL);
if ( rc != 0 && rc != -EEXIST )
goto param_fail;
/*FALLTHRU*/
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index a39290e..0ecb492 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -277,8 +277,9 @@ DEFINE_XEN_GUEST_HANDLE(ioservid_t);
#define HVMOP_create_ioreq_server 17
struct xen_hvm_create_ioreq_server {
- domid_t domid; /* IN - domain to be serviced */
- ioservid_t id; /* OUT - server id */
+ domid_t domid; /* IN - domain to be serviced */
+ uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
+ ioservid_t id; /* OUT - server id */
};
typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |