[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 2/2] Xen/mem_event: Prevent underflow of vcpu pause counts



On Fri, Jul 18, 2014 at 9:53 AM, Andrew Cooper <andrew.cooper3@xxxxxxxxxx> wrote:
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Tested-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
CC: Aravindh Puthiyaparambil <aravindp@xxxxxxxxx>
Reviewed-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>

Couple nits below ...

--
v3:
Â* Newline on warning
v2:
Â* Allow for multiple pause refcounts
---
Âxen/arch/x86/hvm/hvm.c     Â|  Â2 +-
Âxen/arch/x86/mm/mem_event.c   |  31 +++++++++++++++++++++++++++++++
Âxen/arch/x86/mm/mem_sharing.c  |  Â4 ++--
Âxen/arch/x86/mm/p2m.c      |  Â8 ++++----
Âxen/include/asm-x86/mem_event.h | Â Â3 +++
Âxen/include/xen/sched.h     |  Â3 +++
Â6 files changed, 44 insertions(+), 7 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ef2411c..efd79b8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -6113,7 +6113,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason,
  Âif ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
  Â{
    Âreq.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
- Â Â Â Âvcpu_pause_nosync(v);
+ Â Â Â Âmem_event_vcpu_pause(v);
  Â}

  Âreq.gfn = value;
diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c
index 40ae841..ba7e71e 100644
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -663,6 +663,37 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
  Âreturn rc;
Â}

+void mem_event_vcpu_pause(struct vcpu *v)
+{
+ Â ÂASSERT(v == current);
+
+ Â Âatomic_inc(&v->mem_event_pause_count);
Nit #1: A buggy toolstack going into an infinite loop could overflow this.Â
+ Â Âvcpu_pause_nosync(v);
+}
+
+void mem_event_vcpu_unpause(struct vcpu *v)
+{
+ Â Âint old, new, prev = v->mem_event_pause_count.counter;
Nit #2: not fresh in my mind whether atomic is supposed to be signed or unsigned. The flawless comparison below is to take these all as unsigned and check for new > old.

Thanks
Andres
+
+ Â Â/* All unpause requests as a result of toolstack responses. ÂPrevent
+ Â Â * underflow of the vcpu pause count. */
+ Â Âdo
+ Â Â{
+ Â Â Â Âold = prev;
+ Â Â Â Ânew = old - 1;
+
+ Â Â Â Âif ( new < 0 )
+ Â Â Â Â{
+ Â Â Â Â Â Âprintk(XENLOG_G_WARNING
+ Â Â Â Â Â Â Â Â Â "%pv mem_event: Too many unpause attempts\n", v);
+ Â Â Â Â Â Âreturn;
+ Â Â Â Â}
+
+ Â Â Â Âprev = cmpxchg(&v->mem_event_pause_count.counter, old, new);
+ Â Â} while ( prev != old );
+
+ Â Âvcpu_unpause(v);
+}

Â/*
 * Local variables:
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index ec99266..79188b9 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -568,7 +568,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
  Âif ( v->domain == d )
  Â{
    Âreq.flags = MEM_EVENT_FLAG_VCPU_PAUSED;
- Â Â Â Âvcpu_pause_nosync(v);
+ Â Â Â Âmem_event_vcpu_pause(v);
  Â}

  Âreq.p2mt = p2m_ram_shared;
@@ -609,7 +609,7 @@ int mem_sharing_sharing_resume(struct domain *d)

    Â/* Unpause domain/vcpu */
    Âif ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
- Â Â Â Â Â Âvcpu_unpause(v);
+ Â Â Â Â Â Âmem_event_vcpu_unpause(v);
  Â}

  Âreturn 0;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index f213a39..2c7bc0f 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1158,7 +1158,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
  Â/* Pause domain if request came from guest and gfn has paging type */
  Âif ( p2m_is_paging(p2mt) && v->domain == d )
  Â{
- Â Â Â Âvcpu_pause_nosync(v);
+ Â Â Â Âmem_event_vcpu_pause(v);
    Âreq.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
  Â}
  Â/* No need to inform pager if the gfn is not in the page-out path */
@@ -1319,7 +1319,7 @@ void p2m_mem_paging_resume(struct domain *d)
    Â}
    Â/* Unpause domain */
    Âif ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
- Â Â Â Â Â Âvcpu_unpause(v);
+ Â Â Â Â Â Âmem_event_vcpu_unpause(v);
  Â}
Â}

@@ -1414,7 +1414,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,

  Â/* Pause the current VCPU */
  Âif ( p2ma != p2m_access_n2rwx )
- Â Â Â Âvcpu_pause_nosync(v);
+ Â Â Â Âmem_event_vcpu_pause(v);

  Â/* VCPU may be paused, return whether we promoted automatically */
  Âreturn (p2ma == p2m_access_n2rwx);
@@ -1440,7 +1440,7 @@ void p2m_mem_access_resume(struct domain *d)

    Â/* Unpause domain */
    Âif ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
- Â Â Â Â Â Âvcpu_unpause(v);
+ Â Â Â Â Â Âmem_event_vcpu_unpause(v);
  Â}
Â}

diff --git a/xen/include/asm-x86/mem_event.h b/xen/include/asm-x86/mem_event.h
index 045ef9b..ed4481a 100644
--- a/xen/include/asm-x86/mem_event.h
+++ b/xen/include/asm-x86/mem_event.h
@@ -66,6 +66,9 @@ int do_mem_event_op(int op, uint32_t domain, void *arg);
Âint mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
           XEN_GUEST_HANDLE_PARAM(void) u_domctl);

+void mem_event_vcpu_pause(struct vcpu *v);
+void mem_event_vcpu_unpause(struct vcpu *v);
+
Â#endif /* __MEM_EVENT_H__ */


diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 2f876f5..62a4785 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -214,6 +214,9 @@ struct vcpu
  Âunsigned long  Âpause_flags;
  Âatomic_t     pause_count;

+ Â Â/* VCPU paused for mem_event replies. */
+  Âatomic_t     mem_event_pause_count;
+
  Â/* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
  Âevtchn_port_t  Âvirq_to_evtchn[NR_VIRQS];
  Âspinlock_t    virq_lock;
--
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.