|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/2] xen: implement VCPUOP_register_runstate_phys_memory_area
From: Andrii Anisov <andrii_anisov@xxxxxxxx>
VCPUOP_register_runstate_phys_memory_area is implemented via runstate
area mapping.
Signed-off-by: Andrii Anisov <andrii_anisov@xxxxxxxx>
---
xen/arch/arm/domain.c | 62 +++++++++++++++++--------
xen/arch/x86/domain.c | 105 +++++++++++++++++++++++++++++++------------
xen/common/domain.c | 80 ++++++++++++++++++++++++++++++++-
xen/include/asm-arm/domain.h | 2 +
xen/include/xen/domain.h | 2 +
xen/include/xen/sched.h | 8 ++++
6 files changed, 210 insertions(+), 49 deletions(-)
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 6dc633e..8e24e63 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -275,32 +275,55 @@ static void ctxt_switch_to(struct vcpu *n)
}
/* Update per-VCPU guest runstate shared memory area (if registered). */
-static void update_runstate_area(struct vcpu *v)
+void update_runstate_area(struct vcpu *v)
{
- void __user *guest_handle = NULL;
+ if ( !guest_handle_is_null(runstate_guest(v)) )
+ {
+ void __user *guest_handle = NULL;
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
+ {
+ guest_handle = &v->runstate_guest.p->state_entry_time + 1;
+ guest_handle--;
+ v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&v->runstate.state_entry_time + 1) -
1,
+ 1);
+ smp_wmb();
+ }
- if ( guest_handle_is_null(runstate_guest(v)) )
- return;
+ __copy_to_guest(runstate_guest(v), &v->runstate, 1);
- if ( VM_ASSIST(v->domain, runstate_update_flag) )
- {
- guest_handle = &v->runstate_guest.p->state_entry_time + 1;
- guest_handle--;
- v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
- __raw_copy_to_guest(guest_handle,
- (void *)(&v->runstate.state_entry_time + 1) - 1,
1);
- smp_wmb();
+ if ( guest_handle )
+ {
+ v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ smp_wmb();
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&v->runstate.state_entry_time + 1) -
1,
+ 1);
+ }
}
- __copy_to_guest(runstate_guest(v), &v->runstate, 1);
-
- if ( guest_handle )
+ spin_lock(&v->mapped_runstate_lock);
+ if ( v->mapped_runstate )
{
- v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
- smp_wmb();
- __raw_copy_to_guest(guest_handle,
- (void *)(&v->runstate.state_entry_time + 1) - 1,
1);
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
+ {
+ v->mapped_runstate->state_entry_time |= XEN_RUNSTATE_UPDATE;
+ smp_wmb();
+ v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+ }
+
+ memcpy(v->mapped_runstate, &v->runstate, sizeof(v->runstate));
+
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
+ {
+ v->mapped_runstate->state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ smp_wmb();
+ v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ }
}
+ spin_unlock(&v->mapped_runstate_lock);
+
}
static void schedule_tail(struct vcpu *prev)
@@ -998,6 +1021,7 @@ long do_arm_vcpu_op(int cmd, unsigned int vcpuid,
XEN_GUEST_HANDLE_PARAM(void) a
{
case VCPUOP_register_vcpu_info:
case VCPUOP_register_runstate_memory_area:
+ case VCPUOP_register_runstate_phys_memory_area:
return do_vcpu_op(cmd, vcpuid, arg);
default:
return -EINVAL;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 9eaa978..46c2219 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1558,51 +1558,98 @@ void paravirt_ctxt_switch_to(struct vcpu *v)
wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
-/* Update per-VCPU guest runstate shared memory area (if registered). */
-bool update_runstate_area(struct vcpu *v)
+static void update_mapped_runstate_area_native(struct vcpu *v)
{
- bool rc;
- struct guest_memory_policy policy = { .nested_guest_mode = false };
- void __user *guest_handle = NULL;
-
- if ( guest_handle_is_null(runstate_guest(v)) )
- return true;
-
- update_guest_memory_policy(v, &policy);
-
if ( VM_ASSIST(v->domain, runstate_update_flag) )
{
- guest_handle = has_32bit_shinfo(v->domain)
- ? &v->runstate_guest.compat.p->state_entry_time + 1
- : &v->runstate_guest.native.p->state_entry_time + 1;
- guest_handle--;
v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
- __raw_copy_to_guest(guest_handle,
- (void *)(&v->runstate.state_entry_time + 1) - 1,
1);
+ v->mapped_runstate.native->state_entry_time |= XEN_RUNSTATE_UPDATE;
smp_wmb();
}
- if ( has_32bit_shinfo(v->domain) )
+ memcpy(v->mapped_runstate.native, &v->runstate, sizeof(v->runstate));
+
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
{
- struct compat_vcpu_runstate_info info;
+ v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ v->mapped_runstate.native->state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ smp_wmb();
+ }
+}
- XLAT_vcpu_runstate_info(&info, &v->runstate);
- __copy_to_guest(v->runstate_guest.compat, &info, 1);
- rc = true;
+static void update_mapped_runstate_area_compat(struct vcpu *v)
+{
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
+ {
+ v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+ v->mapped_runstate.compat->state_entry_time |= XEN_RUNSTATE_UPDATE;
+ smp_wmb();
}
- else
- rc = __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
- sizeof(v->runstate);
- if ( guest_handle )
+ memcpy(v->mapped_runstate.compat, &v->runstate, sizeof(v->runstate));
+
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
{
v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ v->mapped_runstate.compat->state_entry_time &= ~XEN_RUNSTATE_UPDATE;
smp_wmb();
- __raw_copy_to_guest(guest_handle,
- (void *)(&v->runstate.state_entry_time + 1) - 1,
1);
}
+}
- update_guest_memory_policy(v, &policy);
+/* Update per-VCPU guest runstate shared memory area (if registered). */
+bool update_runstate_area(struct vcpu *v)
+{
+ bool rc = true;
+
+ if ( !guest_handle_is_null(runstate_guest(v)) )
+ {
+ struct guest_memory_policy policy = { .nested_guest_mode = false };
+ void __user *guest_handle = NULL;
+
+ update_guest_memory_policy(v, &policy);
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
+ {
+ guest_handle = has_32bit_shinfo(v->domain)
+ ? &v->runstate_guest.compat.p->state_entry_time + 1
+ : &v->runstate_guest.native.p->state_entry_time + 1;
+ guest_handle--;
+ v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&v->runstate.state_entry_time + 1) -
1, 1);
+ smp_wmb();
+ }
+
+ if ( has_32bit_shinfo(v->domain) )
+ {
+ struct compat_vcpu_runstate_info info;
+
+ XLAT_vcpu_runstate_info(&info, &v->runstate);
+ __copy_to_guest(v->runstate_guest.compat, &info, 1);
+ rc = true;
+ }
+ else
+ rc = __copy_to_guest(runstate_guest(v), &v->runstate, 1) !=
+ sizeof(v->runstate);
+
+ if ( guest_handle )
+ {
+ v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ smp_wmb();
+ __raw_copy_to_guest(guest_handle,
+ (void *)(&v->runstate.state_entry_time + 1) -
1, 1);
+ }
+ update_guest_memory_policy(v, &policy);
+ }
+
+ spin_lock(v->mapped_runstate_lock);
+ if ( v->mapped_runstate )
+ {
+ if ( has_32bit_shinfo((v)->domain) )
+ update_mapped_runstate_area_compat(v);
+ else
+ update_mapped_runstate_area_native(v);
+ }
+ spin_unlock(v->mapped_runstate_lock);
return rc;
}
diff --git a/xen/common/domain.c b/xen/common/domain.c
index ae22049..6df76c6 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -149,6 +149,7 @@ struct vcpu *vcpu_create(
v->dirty_cpu = VCPU_CPU_CLEAN;
spin_lock_init(&v->virq_lock);
+ spin_lock_init(&v->mapped_runstate_lock);
tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);
@@ -699,6 +700,69 @@ int rcu_lock_live_remote_domain_by_id(domid_t dom, struct
domain **d)
return 0;
}
+static void _unmap_runstate_area(struct vcpu *v)
+{
+ mfn_t mfn;
+
+ if ( !v->mapped_runstate )
+ return;
+
+ mfn = _mfn(virt_to_mfn(runstate_guest(v).p));
+
+ unmap_domain_page_global((void *)
+ ((unsigned long)v->mapped_runstate &
+ PAGE_MASK));
+
+ v->mapped_runstate = NULL;
+ put_page_and_type(mfn_to_page(mfn));
+}
+
+static int map_runstate_area(struct vcpu *v,
+ struct vcpu_register_runstate_memory_area *area)
+{
+ unsigned long offset = area->addr.p & ~PAGE_MASK;
+ gfn_t gfn = gaddr_to_gfn(area->addr.p);
+ struct domain *d = v->domain;
+ void *mapping;
+ struct page_info *page;
+ size_t size = sizeof (struct vcpu_runstate_info );
+
+ if ( offset > (PAGE_SIZE - size) )
+ return -EINVAL;
+
+ page = get_page_from_gfn(d, gfn_x(gfn), NULL, P2M_ALLOC);
+ if ( !page )
+ return -EINVAL;
+
+ if ( !get_page_type(page, PGT_writable_page) )
+ {
+ put_page(page);
+ return -EINVAL;
+ }
+
+ mapping = __map_domain_page_global(page);
+
+ if ( mapping == NULL )
+ {
+ put_page_and_type(page);
+ return -ENOMEM;
+ }
+
+ spin_lock(&v->mapped_runstate_lock);
+ _unmap_runstate_area(v);
+ v->mapped_runstate = mapping + offset;
+ spin_unlock(&v->mapped_runstate_lock);
+
+ return 0;
+}
+
+static void unmap_runstate_area(struct vcpu *v)
+{
+ spin_lock(&v->mapped_runstate_lock);
+ _unmap_runstate_area(v);
+ spin_unlock(&v->mapped_runstate_lock);
+}
+
int domain_kill(struct domain *d)
{
int rc = 0;
@@ -737,7 +801,11 @@ int domain_kill(struct domain *d)
if ( cpupool_move_domain(d, cpupool0) )
return -ERESTART;
for_each_vcpu ( d, v )
+ {
+ set_xen_guest_handle(runstate_guest(v), NULL);
+ unmap_runstate_area(v);
unmap_vcpu_info(v);
+ }
d->is_dying = DOMDYING_dead;
/* Mem event cleanup has to go here because the rings
* have to be put before we call put_domain. */
@@ -1192,6 +1260,7 @@ int domain_soft_reset(struct domain *d)
for_each_vcpu ( d, v )
{
set_xen_guest_handle(runstate_guest(v), NULL);
+ unmap_runstate_area(v);
unmap_vcpu_info(v);
}
@@ -1536,8 +1605,17 @@ long do_vcpu_op(int cmd, unsigned int vcpuid,
XEN_GUEST_HANDLE_PARAM(void) arg)
}
case VCPUOP_register_runstate_phys_memory_area:
- rc = -EOPNOTSUPP;
+ {
+ struct vcpu_register_runstate_memory_area area;
+
+ rc = -EFAULT;
+ if ( copy_from_guest(&area, arg, 1) )
+ break;
+
+ rc = map_runstate_area(v, &area);
+
break;
+ }
#ifdef VCPU_TRAP_NMI
case VCPUOP_send_nmi:
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 312fec8..3fb6ea2 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -217,6 +217,8 @@ void vcpu_show_execution_state(struct vcpu *);
void vcpu_show_registers(const struct vcpu *);
void vcpu_switch_to_aarch64_mode(struct vcpu *);
+void update_runstate_area(struct vcpu *);
+
/*
* Due to the restriction of GICv3, the number of vCPUs in AFF0 is
* limited to 16, thus only the first 4 bits of AFF0 are legal. We will
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index d1bfc82..ecddcfe 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -118,4 +118,6 @@ struct vnuma_info {
void vnuma_destroy(struct vnuma_info *vnuma);
+struct vcpu_register_runstate_memory_area;
+
#endif /* __XEN_DOMAIN_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 748bb0f..2afe31c 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -163,15 +163,23 @@ struct vcpu
void *sched_priv; /* scheduler-specific data */
struct vcpu_runstate_info runstate;
+
+ spinlock_t mapped_runstate_lock;
+
#ifndef CONFIG_COMPAT
# define runstate_guest(v) ((v)->runstate_guest)
XEN_GUEST_HANDLE(vcpu_runstate_info_t) runstate_guest; /* guest address */
+ vcpu_runstate_info_t *mapped_runstate;
#else
# define runstate_guest(v) ((v)->runstate_guest.native)
union {
XEN_GUEST_HANDLE(vcpu_runstate_info_t) native;
XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t) compat;
} runstate_guest; /* guest address */
+ union {
+ vcpu_runstate_info_t* native;
+ vcpu_runstate_info_compat_t* compat;
+ } mapped_runstate; /* guest address */
#endif
/* last time when vCPU is scheduled out */
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |