|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] Revert "x86/hvm: remove default ioreq server"
commit 36e29dd9e580cb0f847f5ac1e72afdb5febe3e99
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Aug 27 15:50:50 2018 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Aug 27 15:50:50 2018 +0200
Revert "x86/hvm: remove default ioreq server"
This reverts commit 629856eae2a7f766f1f024a06ad3abf1fd4b9d37,
which breaks at least one of the qemu builds.
---
xen/arch/x86/hvm/dm.c | 2 +-
xen/arch/x86/hvm/hvm.c | 38 +++++++++---
xen/arch/x86/hvm/ioreq.c | 123 ++++++++++++++++++++++++++++++++++-----
xen/include/asm-x86/hvm/domain.h | 1 +
xen/include/asm-x86/hvm/ioreq.h | 4 +-
xen/include/public/hvm/params.h | 9 ++-
6 files changed, 144 insertions(+), 33 deletions(-)
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 6ace3dc7c1..6755f3fd96 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -411,7 +411,7 @@ static int dm_op(const struct dmop_args *op_args)
if ( data->pad[0] || data->pad[1] || data->pad[2] )
break;
- rc = hvm_create_ioreq_server(d, data->handle_bufioreq,
+ rc = hvm_create_ioreq_server(d, false, data->handle_bufioreq,
&data->id);
break;
}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index cbfd6fca88..72c51faecb 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4093,16 +4093,12 @@ static int hvm_allow_set_param(struct domain *d,
case HVM_PARAM_CONSOLE_EVTCHN:
case HVM_PARAM_X87_FIP_WIDTH:
break;
- /* The following parameters are deprecated. */
- case HVM_PARAM_DM_DOMAIN:
- case HVM_PARAM_BUFIOREQ_EVTCHN:
- rc = -EPERM;
- break;
/*
* The following parameters must not be set by the guest
* since the domain may need to be paused.
*/
case HVM_PARAM_IDENT_PT:
+ case HVM_PARAM_DM_DOMAIN:
case HVM_PARAM_ACPI_S_STATE:
/* The remaining parameters should not be set by the guest. */
default:
@@ -4267,6 +4263,9 @@ static int hvmop_set_param(
d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
rc = -EINVAL;
break;
+ case HVM_PARAM_BUFIOREQ_EVTCHN:
+ rc = -EINVAL;
+ break;
case HVM_PARAM_TRIPLE_FAULT_REASON:
if ( a.value > SHUTDOWN_MAX )
rc = -EINVAL;
@@ -4374,11 +4373,13 @@ static int hvm_allow_get_param(struct domain *d,
case HVM_PARAM_ALTP2M:
case HVM_PARAM_X87_FIP_WIDTH:
break;
- /* The following parameters are deprecated. */
- case HVM_PARAM_DM_DOMAIN:
+ /*
+ * The following parameters must not be read by the guest
+ * since the domain may need to be paused.
+ */
+ case HVM_PARAM_IOREQ_PFN:
+ case HVM_PARAM_BUFIOREQ_PFN:
case HVM_PARAM_BUFIOREQ_EVTCHN:
- rc = -ENODATA;
- break;
/* The remaining parameters should not be read by the guest. */
default:
if ( d == current->domain )
@@ -4432,6 +4433,25 @@ static int hvmop_get_param(
case HVM_PARAM_X87_FIP_WIDTH:
a.value = d->arch.x87_fip_width;
break;
+ case HVM_PARAM_IOREQ_PFN:
+ case HVM_PARAM_BUFIOREQ_PFN:
+ case HVM_PARAM_BUFIOREQ_EVTCHN:
+ /*
+ * It may be necessary to create a default ioreq server here,
+ * because legacy versions of QEMU are not aware of the new API for
+ * explicit ioreq server creation. However, if the domain is not
+ * under construction then it will not be QEMU querying the
+ * parameters and thus the query should not have that side-effect.
+ */
+ if ( !d->creation_finished )
+ {
+ rc = hvm_create_ioreq_server(d, true,
+ HVM_IOREQSRV_BUFIOREQ_LEGACY, NULL);
+ if ( rc != 0 && rc != -EEXIST )
+ goto out;
+ }
+
+ /*FALLTHRU*/
default:
a.value = d->arch.hvm_domain.params[a.index];
break;
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index eac7ecc4b6..940a2c9728 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -55,6 +55,9 @@ static struct hvm_ioreq_server *get_ioreq_server(const struct
domain *d,
return GET_IOREQ_SERVER(d, id);
}
+#define IS_DEFAULT(s) \
+ ((s) && (s) == GET_IOREQ_SERVER((s)->target, DEFAULT_IOSERVID))
+
/*
* Iterate over all possible ioreq servers.
*
@@ -242,6 +245,8 @@ static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
struct domain *d = s->target;
unsigned int i;
+ ASSERT(!IS_DEFAULT(s));
+
for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
{
if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
@@ -256,6 +261,7 @@ static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s,
gfn_t gfn)
struct domain *d = s->target;
unsigned int i = gfn_x(gfn) - d->arch.hvm_domain.ioreq_gfn.base;
+ ASSERT(!IS_DEFAULT(s));
ASSERT(!gfn_eq(gfn, INVALID_GFN));
set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
@@ -271,7 +277,9 @@ static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s,
bool buf)
destroy_ring_for_helper(&iorp->va, iorp->page);
iorp->page = NULL;
- hvm_free_ioreq_gfn(s, iorp->gfn);
+ if ( !IS_DEFAULT(s) )
+ hvm_free_ioreq_gfn(s, iorp->gfn);
+
iorp->gfn = INVALID_GFN;
}
@@ -297,7 +305,13 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s,
bool buf)
if ( d->is_dying )
return -EINVAL;
- iorp->gfn = hvm_alloc_ioreq_gfn(s);
+ if ( IS_DEFAULT(s) )
+ iorp->gfn = _gfn(buf ?
+ d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
+ d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]);
+ else
+ iorp->gfn = hvm_alloc_ioreq_gfn(s);
+
if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return -ENOMEM;
@@ -402,7 +416,7 @@ static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server
*s, bool buf)
struct domain *d = s->target;
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
- if ( gfn_eq(iorp->gfn, INVALID_GFN) )
+ if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
return;
if ( guest_physmap_remove_page(d, iorp->gfn,
@@ -417,7 +431,7 @@ static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s,
bool buf)
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
int rc;
- if ( gfn_eq(iorp->gfn, INVALID_GFN) )
+ if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
return 0;
clear_page(iorp->va);
@@ -469,12 +483,17 @@ static int hvm_ioreq_server_add_vcpu(struct
hvm_ioreq_server *s,
if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
{
+ struct domain *d = s->target;
+
rc = alloc_unbound_xen_event_channel(v->domain, 0,
s->emulator->domain_id, NULL);
if ( rc < 0 )
goto fail3;
s->bufioreq_evtchn = rc;
+ if ( IS_DEFAULT(s) )
+ d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
+ s->bufioreq_evtchn;
}
sv->vcpu = v;
@@ -598,6 +617,9 @@ static void hvm_ioreq_server_free_rangesets(struct
hvm_ioreq_server *s)
{
unsigned int i;
+ if ( IS_DEFAULT(s) )
+ return;
+
for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
rangeset_destroy(s->range[i]);
}
@@ -608,6 +630,11 @@ static int hvm_ioreq_server_alloc_rangesets(struct
hvm_ioreq_server *s,
unsigned int i;
int rc;
+ if ( id == DEFAULT_IOSERVID )
+ goto done;
+
+ ASSERT(!IS_DEFAULT(s));
+
for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
{
char *name;
@@ -632,6 +659,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct
hvm_ioreq_server *s,
rangeset_limit(s->range[i], MAX_NR_IO_RANGES);
}
+ done:
return 0;
fail:
@@ -705,6 +733,13 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server
*s,
s->bufioreq_handling = bufioreq_handling;
+ if ( id == DEFAULT_IOSERVID )
+ {
+ rc = hvm_ioreq_server_map_pages(s);
+ if ( rc )
+ goto fail_map;
+ }
+
for_each_vcpu ( d, v )
{
rc = hvm_ioreq_server_add_vcpu(s, v);
@@ -718,6 +753,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
hvm_ioreq_server_remove_all_vcpus(s);
hvm_ioreq_server_unmap_pages(s);
+ fail_map:
hvm_ioreq_server_free_rangesets(s);
put_domain(s->emulator);
@@ -746,8 +782,8 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server
*s)
put_domain(s->emulator);
}
-int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
- ioservid_t *id)
+int hvm_create_ioreq_server(struct domain *d, bool is_default,
+ int bufioreq_handling, ioservid_t *id)
{
struct hvm_ioreq_server *s;
unsigned int i;
@@ -763,19 +799,32 @@ int hvm_create_ioreq_server(struct domain *d, int
bufioreq_handling,
domain_pause(d);
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
- for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
+ if ( is_default )
{
- if ( !GET_IOREQ_SERVER(d, i) )
- break;
+ i = DEFAULT_IOSERVID;
+
+ rc = -EEXIST;
+ if ( GET_IOREQ_SERVER(d, i) )
+ goto fail;
}
+ else
+ {
+ for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
+ {
+ if ( i != DEFAULT_IOSERVID && !GET_IOREQ_SERVER(d, i) )
+ break;
+ }
- rc = -ENOSPC;
- if ( i >= MAX_NR_IOREQ_SERVERS )
- goto fail;
+ rc = -ENOSPC;
+ if ( i >= MAX_NR_IOREQ_SERVERS )
+ goto fail;
+ }
/*
* It is safe to call set_ioreq_server() prior to
* hvm_ioreq_server_init() since the target domain is paused.
+ * It is necessary for the calls to be ordered thus otherwise
+ * the IS_DEFAULT() macro would not evaluate correctly.
*/
set_ioreq_server(d, i, s);
@@ -786,6 +835,9 @@ int hvm_create_ioreq_server(struct domain *d, int
bufioreq_handling,
goto fail;
}
+ if ( i == DEFAULT_IOSERVID )
+ hvm_ioreq_server_enable(s);
+
if ( id )
*id = i;
@@ -807,6 +859,9 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t
id)
struct hvm_ioreq_server *s;
int rc;
+ if ( id == DEFAULT_IOSERVID )
+ return -EPERM;
+
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -815,6 +870,8 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t
id)
if ( !s )
goto out;
+ ASSERT(!IS_DEFAULT(s));
+
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
@@ -827,7 +884,9 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t
id)
/*
* It is safe to call hvm_ioreq_server_deinit() prior to
- * set_ioreq_server() since the target domain is paused.
+ * set_ioreq_server() since the target domain is paused. It is
+ * necessary for the calls to be ordered thus otherwise the
+ * IS_DEFAULT() macro would not evaluate correctly.
*/
hvm_ioreq_server_deinit(s);
set_ioreq_server(d, id, NULL);
@@ -852,6 +911,9 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t
id,
struct hvm_ioreq_server *s;
int rc;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -860,6 +922,8 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t
id,
if ( !s )
goto out;
+ ASSERT(!IS_DEFAULT(s));
+
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
@@ -897,6 +961,9 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t
id,
struct hvm_ioreq_server *s;
int rc;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
if ( !is_hvm_domain(d) )
return -EINVAL;
@@ -908,6 +975,8 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t
id,
if ( !s )
goto out;
+ ASSERT(!IS_DEFAULT(s));
+
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
@@ -954,6 +1023,9 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d,
ioservid_t id,
if ( start > end )
return -EINVAL;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -962,6 +1034,8 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d,
ioservid_t id,
if ( !s )
goto out;
+ ASSERT(!IS_DEFAULT(s));
+
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
@@ -1006,6 +1080,9 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain
*d, ioservid_t id,
if ( start > end )
return -EINVAL;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -1014,6 +1091,8 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain
*d, ioservid_t id,
if ( !s )
goto out;
+ ASSERT(!IS_DEFAULT(s));
+
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
@@ -1061,6 +1140,9 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d,
ioservid_t id,
struct hvm_ioreq_server *s;
int rc;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
if ( type != HVMMEM_ioreq_server )
return -EINVAL;
@@ -1075,6 +1157,8 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d,
ioservid_t id,
if ( !s )
goto out;
+ ASSERT(!IS_DEFAULT(s));
+
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
@@ -1101,6 +1185,9 @@ int hvm_set_ioreq_server_state(struct domain *d,
ioservid_t id,
struct hvm_ioreq_server *s;
int rc;
+ if ( id == DEFAULT_IOSERVID )
+ return -EOPNOTSUPP;
+
spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
s = get_ioreq_server(d, id);
@@ -1109,6 +1196,8 @@ int hvm_set_ioreq_server_state(struct domain *d,
ioservid_t id,
if ( !s )
goto out;
+ ASSERT(!IS_DEFAULT(s));
+
rc = -EPERM;
if ( s->emulator != current->domain )
goto out;
@@ -1193,6 +1282,8 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
/*
* It is safe to call hvm_ioreq_server_deinit() prior to
* set_ioreq_server() since the target domain is being destroyed.
+ * It is necessary for the calls to be ordered thus otherwise the
+ * IS_DEFAULT() macro would not evaluate correctly.
*/
hvm_ioreq_server_deinit(s);
set_ioreq_server(d, id, NULL);
@@ -1213,7 +1304,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct
domain *d,
unsigned int id;
if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
- return NULL;
+ return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
cf8 = d->arch.hvm_domain.pci_cf8;
@@ -1255,7 +1346,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct
domain *d,
{
struct rangeset *r;
- if ( !s->enabled )
+ if ( IS_DEFAULT(s) || !s->enabled )
continue;
r = s->range[type];
@@ -1293,7 +1384,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct
domain *d,
}
}
- return NULL;
+ return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
}
static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 2398858442..588595059d 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -97,6 +97,7 @@ struct hvm_pi_ops {
};
#define MAX_NR_IOREQ_SERVERS 8
+#define DEFAULT_IOSERVID 0
struct hvm_domain {
/* Guest page range used for non-default ioreq servers */
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index e2588e912f..bab473cf44 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -23,8 +23,8 @@ bool hvm_io_pending(struct vcpu *v);
bool handle_hvm_io_completion(struct vcpu *v);
bool is_ioreq_server_page(struct domain *d, const struct page_info *page);
-int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
- ioservid_t *id);
+int hvm_create_ioreq_server(struct domain *d, bool is_default,
+ int bufioreq_handling, ioservid_t *id);
int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id);
int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
unsigned long *ioreq_gfn,
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index c947274737..396977c2bb 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -81,13 +81,9 @@
#define HVM_PARAM_PAE_ENABLED 4
#define HVM_PARAM_IOREQ_PFN 5
-#define HVM_PARAM_BUFIOREQ_PFN 6
-#ifdef __XEN__
-/* These parameters are deprecated and their meaning is undefined. */
-#define HVM_PARAM_DM_DOMAIN 13
+#define HVM_PARAM_BUFIOREQ_PFN 6
#define HVM_PARAM_BUFIOREQ_EVTCHN 26
-#endif
#if defined(__i386__) || defined(__x86_64__)
@@ -185,6 +181,9 @@
/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
#define HVM_PARAM_IDENT_PT 12
+/* Device Model domain, defaults to 0. */
+#define HVM_PARAM_DM_DOMAIN 13
+
/* ACPI S state: currently support S0 and S3 on x86. */
#define HVM_PARAM_ACPI_S_STATE 14
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |