[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 2/2] Xen/mem_event: Prevent underflow of vcpu pause counts
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Tested-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> CC: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx> CC: Aravindh Puthiyaparambil <aravindp@xxxxxxxxx> -- v3: * Newline on warning v2: * Allow for multiple pause refcounts --- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/mm/mem_event.c | 31 +++++++++++++++++++++++++++++++ xen/arch/x86/mm/mem_sharing.c | 4 ++-- xen/arch/x86/mm/p2m.c | 8 ++++---- xen/include/asm-x86/mem_event.h | 3 +++ xen/include/xen/sched.h | 3 +++ 6 files changed, 44 insertions(+), 7 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index ef2411c..efd79b8 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -6113,7 +6113,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason, if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync ) { req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; - vcpu_pause_nosync(v); + mem_event_vcpu_pause(v); } req.gfn = value; diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c index 40ae841..ba7e71e 100644 --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -663,6 +663,37 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, return rc; } +void mem_event_vcpu_pause(struct vcpu *v) +{ + ASSERT(v == current); + + atomic_inc(&v->mem_event_pause_count); + vcpu_pause_nosync(v); +} + +void mem_event_vcpu_unpause(struct vcpu *v) +{ + int old, new, prev = v->mem_event_pause_count.counter; + + /* All unpause requests as a result of toolstack responses. Prevent + * underflow of the vcpu pause count. */ + do + { + old = prev; + new = old - 1; + + if ( new < 0 ) + { + printk(XENLOG_G_WARNING + "%pv mem_event: Too many unpause attempts\n", v); + return; + } + + prev = cmpxchg(&v->mem_event_pause_count.counter, old, new); + } while ( prev != old ); + + vcpu_unpause(v); +} /* * Local variables: diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index ec99266..79188b9 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -568,7 +568,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, if ( v->domain == d ) { req.flags = MEM_EVENT_FLAG_VCPU_PAUSED; - vcpu_pause_nosync(v); + mem_event_vcpu_pause(v); } req.p2mt = p2m_ram_shared; @@ -609,7 +609,7 @@ int mem_sharing_sharing_resume(struct domain *d) /* Unpause domain/vcpu */ if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) - vcpu_unpause(v); + mem_event_vcpu_unpause(v); } return 0; diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index f213a39..2c7bc0f 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -1158,7 +1158,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn) /* Pause domain if request came from guest and gfn has paging type */ if ( p2m_is_paging(p2mt) && v->domain == d ) { - vcpu_pause_nosync(v); + mem_event_vcpu_pause(v); req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; } /* No need to inform pager if the gfn is not in the page-out path */ @@ -1319,7 +1319,7 @@ void p2m_mem_paging_resume(struct domain *d) } /* Unpause domain */ if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) - vcpu_unpause(v); + mem_event_vcpu_unpause(v); } } @@ -1414,7 +1414,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, /* Pause the current VCPU */ if ( p2ma != p2m_access_n2rwx ) - vcpu_pause_nosync(v); + mem_event_vcpu_pause(v); /* VCPU may be paused, return whether we promoted automatically */ return (p2ma == p2m_access_n2rwx); @@ -1440,7 +1440,7 @@ void p2m_mem_access_resume(struct domain *d) /* Unpause domain */ if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) - vcpu_unpause(v); + mem_event_vcpu_unpause(v); } } diff --git a/xen/include/asm-x86/mem_event.h b/xen/include/asm-x86/mem_event.h index 045ef9b..ed4481a 100644 --- a/xen/include/asm-x86/mem_event.h +++ b/xen/include/asm-x86/mem_event.h @@ -66,6 +66,9 @@ int do_mem_event_op(int op, uint32_t domain, void *arg); int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, XEN_GUEST_HANDLE_PARAM(void) u_domctl); +void mem_event_vcpu_pause(struct vcpu *v); +void mem_event_vcpu_unpause(struct vcpu *v); + #endif /* __MEM_EVENT_H__ */ diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 2f876f5..62a4785 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -214,6 +214,9 @@ struct vcpu unsigned long pause_flags; atomic_t pause_count; + /* VCPU paused for mem_event replies. */ + atomic_t mem_event_pause_count; + /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */ evtchn_port_t virq_to_evtchn[NR_VIRQS]; spinlock_t virq_lock; -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |