|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] Revert "x86/HVM: fix preemption handling in do_hvm_op()"
commit 16b8b8551ee810639bc110d83f648aaae3ca2b32
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Apr 1 10:41:29 2014 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Apr 1 10:41:29 2014 +0200
Revert "x86/HVM: fix preemption handling in do_hvm_op()"
This reverts commit 8bad6c5626129ffba04dbab3a38115b6f3669596
(clearly broken for 32-bit callers, reportedly broken also
for 64-bit Dom0 with qemu-trad).
---
xen/arch/x86/hvm/hvm.c | 77 ++++++++++++++++++++++-----------------
xen/include/public/hvm/hvm_op.h | 8 ++--
2 files changed, 48 insertions(+), 37 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 15bf7b7..5e89cf5 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4072,25 +4072,20 @@ static int hvm_replace_event_channel(struct vcpu *v,
domid_t remote_domid,
return 0;
}
-#define HVMOP_op_bits 32
-
long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct domain *curr_d = current->domain;
- unsigned long start_iter = op >> HVMOP_op_bits;
long rc = 0;
- switch ( op &= ((1UL << HVMOP_op_bits) - 1) )
+ switch ( op )
{
- struct domain *d;
- unsigned long pfn;
-
case HVMOP_set_param:
case HVMOP_get_param:
{
struct xen_hvm_param a;
struct hvm_ioreq_page *iorp;
+ struct domain *d;
struct vcpu *v;
if ( copy_from_guest(&a, arg, 1) )
@@ -4354,6 +4349,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_track_dirty_vram:
{
struct xen_hvm_track_dirty_vram a;
+ struct domain *d;
if ( copy_from_guest(&a, arg, 1) )
return -EFAULT;
@@ -4394,6 +4390,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_modified_memory:
{
struct xen_hvm_modified_memory a;
+ struct domain *d;
if ( copy_from_guest(&a, arg, 1) )
return -EFAULT;
@@ -4411,8 +4408,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
goto param_fail3;
rc = -EINVAL;
- if ( a.nr < start_iter ||
- ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+ if ( ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
goto param_fail3;
@@ -4420,8 +4416,9 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( !paging_mode_log_dirty(d) )
goto param_fail3;
- for ( pfn = a.first_pfn + start_iter; a.nr > start_iter; ++pfn )
+ while ( a.nr > 0 )
{
+ unsigned long pfn = a.first_pfn;
struct page_info *page;
page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
@@ -4434,13 +4431,16 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
put_page(page);
}
- ++pfn;
- ++start_iter;
+ a.first_pfn++;
+ a.nr--;
/* Check for continuation if it's not the last interation */
- if ( a.nr > start_iter && hypercall_preempt_check() )
+ if ( a.nr > 0 && hypercall_preempt_check() )
{
- rc = -EAGAIN;
+ if ( __copy_to_guest(arg, &a, 1) )
+ rc = -EFAULT;
+ else
+ rc = -EAGAIN;
break;
}
}
@@ -4453,6 +4453,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_get_mem_type:
{
struct xen_hvm_get_mem_type a;
+ struct domain *d;
p2m_type_t t;
if ( copy_from_guest(&a, arg, 1) )
@@ -4496,6 +4497,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_set_mem_type:
{
struct xen_hvm_set_mem_type a;
+ struct domain *d;
/* Interface types to internal p2m types */
static const p2m_type_t memtype[] = {
@@ -4520,19 +4522,20 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
goto param_fail4;
rc = -EINVAL;
- if ( a.nr < start_iter ||
- ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+ if ( ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
goto param_fail4;
if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
goto param_fail4;
- for ( pfn = a.first_pfn + start_iter; a.nr > start_iter; ++pfn )
+ while ( a.nr )
{
- p2m_type_t t, nt;
-
- get_gfn_unshare(d, pfn, &t);
+ unsigned long pfn = a.first_pfn;
+ p2m_type_t t;
+ p2m_type_t nt;
+ mfn_t mfn;
+ mfn = get_gfn_unshare(d, pfn, &t);
if ( p2m_is_paging(t) )
{
put_gfn(d, pfn);
@@ -4569,13 +4572,16 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
}
put_gfn(d, pfn);
- ++pfn;
- ++start_iter;
+ a.first_pfn++;
+ a.nr--;
/* Check for continuation if it's not the last interation */
- if ( a.nr > start_iter && hypercall_preempt_check() )
+ if ( a.nr > 0 && hypercall_preempt_check() )
{
- rc = -EAGAIN;
+ if ( __copy_to_guest(arg, &a, 1) )
+ rc = -EFAULT;
+ else
+ rc = -EAGAIN;
goto param_fail4;
}
}
@@ -4590,6 +4596,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_set_mem_access:
{
struct xen_hvm_set_mem_access a;
+ struct domain *d;
if ( copy_from_guest(&a, arg, 1) )
return -EFAULT;
@@ -4608,17 +4615,19 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
rc = -EINVAL;
if ( (a.first_pfn != ~0ull) &&
- (a.nr < start_iter ||
- ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+ (((a.first_pfn + a.nr - 1) < a.first_pfn) ||
((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d))) )
goto param_fail5;
- rc = p2m_set_mem_access(d, a.first_pfn + start_iter, a.nr - start_iter,
- a.hvmmem_access);
+ rc = p2m_set_mem_access(d, a.first_pfn, a.nr, a.hvmmem_access);
if ( rc > 0 )
{
- start_iter = a.nr - rc;
- rc = -EAGAIN;
+ a.first_pfn += a.nr - rc;
+ a.nr = rc;
+ if ( __copy_to_guest(arg, &a, 1) )
+ rc = -EFAULT;
+ else
+ rc = -EAGAIN;
}
param_fail5:
@@ -4629,6 +4638,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_get_mem_access:
{
struct xen_hvm_get_mem_access a;
+ struct domain *d;
hvmmem_access_t access;
if ( copy_from_guest(&a, arg, 1) )
@@ -4665,6 +4675,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_pagetable_dying:
{
struct xen_hvm_pagetable_dying a;
+ struct domain *d;
if ( copy_from_guest(&a, arg, 1) )
return -EFAULT;
@@ -4717,6 +4728,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_inject_trap:
{
xen_hvm_inject_trap_t tr;
+ struct domain *d;
struct vcpu *v;
if ( copy_from_guest(&tr, arg, 1 ) )
@@ -4764,9 +4776,8 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
}
if ( rc == -EAGAIN )
- rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh",
- op | (start_iter << HVMOP_op_bits),
- arg);
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_hvm_op, "lh", op, arg);
return rc;
}
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 3204ec4..a9aab4b 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -90,10 +90,10 @@ typedef enum {
struct xen_hvm_track_dirty_vram {
/* Domain to be tracked. */
domid_t domid;
- /* Number of pages to track. */
- uint32_t nr;
/* First pfn to track. */
uint64_aligned_t first_pfn;
+ /* Number of pages to track. */
+ uint64_aligned_t nr;
/* OUT variable. */
/* Dirty bitmap buffer. */
XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
@@ -106,10 +106,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
struct xen_hvm_modified_memory {
/* Domain to be updated. */
domid_t domid;
- /* Number of pages. */
- uint32_t nr;
/* First pfn. */
uint64_aligned_t first_pfn;
+ /* Number of pages. */
+ uint64_aligned_t nr;
};
typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |