[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4 of 5] Consume multiple mem event responses off the ring
xen/arch/x86/mm/mem_event.c | 10 +++++++- xen/arch/x86/mm/mem_sharing.c | 13 +++++---- xen/arch/x86/mm/p2m.c | 52 +++++++++++++++++++++------------------- xen/include/asm-x86/mem_event.h | 2 +- 4 files changed, 44 insertions(+), 33 deletions(-) Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx> Signed-off-by: Adin Scannell <adin@xxxxxxxxxxx> diff -r 22b05954f754 -r c71f4c5b42f0 xen/arch/x86/mm/mem_event.c --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -166,7 +166,7 @@ void mem_event_put_request(struct domain notify_via_xen_event_channel(d, med->xen_port); } -void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp) +int mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp) { mem_event_front_ring_t *front_ring; RING_IDX rsp_cons; @@ -176,6 +176,12 @@ void mem_event_get_response(struct mem_e front_ring = &med->front_ring; rsp_cons = front_ring->rsp_cons; + if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) ) + { + mem_event_ring_unlock(med); + return 0; + } + /* Copy response */ memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp)); rsp_cons++; @@ -185,6 +191,8 @@ void mem_event_get_response(struct mem_e front_ring->sring->rsp_event = rsp_cons + 1; mem_event_ring_unlock(med); + + return 1; } void mem_event_unpause_vcpus(struct domain *d) diff -r 22b05954f754 -r c71f4c5b42f0 xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -300,12 +300,13 @@ int mem_sharing_sharing_resume(struct do { mem_event_response_t rsp; - /* Get request off the ring */ - mem_event_get_response(&d->mem_event->share, &rsp); - - /* Unpause domain/vcpu */ - if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) - vcpu_unpause(d->vcpu[rsp.vcpu_id]); + /* Get all requests off the ring */ + while ( mem_event_get_response(&d->mem_event->share, &rsp) ) + { + /* Unpause domain/vcpu */ + if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) + vcpu_unpause(d->vcpu[rsp.vcpu_id]); + } return 0; } diff -r 22b05954f754 -r c71f4c5b42f0 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -1066,31 +1066,31 @@ void p2m_mem_paging_resume(struct domain p2m_access_t a; mfn_t mfn; - /* Pull the response off the ring */ - mem_event_get_response(&d->mem_event->paging, &rsp); - - /* Fix p2m entry if the page was not dropped */ - if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) + /* Pull all responses off the ring */ + while( mem_event_get_response(&d->mem_event->paging, &rsp) ) { - p2m_lock(p2m); - mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL); - /* Allow only pages which were prepared properly, or pages which - * were nominated but not evicted */ - if ( mfn_valid(mfn) && - (p2mt == p2m_ram_paging_in || p2mt == p2m_ram_paging_in_start) ) + /* Fix p2m entry if the page was not dropped */ + if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) { - set_p2m_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K, - paging_mode_log_dirty(d) ? p2m_ram_logdirty : p2m_ram_rw, - a); - set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn); + p2m_lock(p2m); + mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL); + /* Allow only pages which were prepared properly, or pages which + * were nominated but not evicted */ + if ( mfn_valid(mfn) && + (p2mt == p2m_ram_paging_in || p2mt == p2m_ram_paging_in_start) ) + { + set_p2m_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K, + paging_mode_log_dirty(d) ? p2m_ram_logdirty : + p2m_ram_rw, a); + set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn); + } + p2m_unlock(p2m); } - p2m_unlock(p2m); + /* Unpause domain */ + if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) + vcpu_unpause(d->vcpu[rsp.vcpu_id]); } - /* Unpause domain */ - if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) - vcpu_unpause(d->vcpu[rsp.vcpu_id]); - /* Unpause any domains that were paused because the ring was full */ mem_event_unpause_vcpus(d); } @@ -1174,11 +1174,13 @@ void p2m_mem_access_resume(struct domain { mem_event_response_t rsp; - mem_event_get_response(&d->mem_event->access, &rsp); - - /* Unpause domain */ - if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) - vcpu_unpause(d->vcpu[rsp.vcpu_id]); + /* Pull all responses off the ring */ + while( mem_event_get_response(&d->mem_event->access, &rsp) ) + { + /* Unpause domain */ + if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) + vcpu_unpause(d->vcpu[rsp.vcpu_id]); + } /* Unpause any domains that were paused because the ring was full or no listener * was available */ diff -r 22b05954f754 -r c71f4c5b42f0 xen/include/asm-x86/mem_event.h --- a/xen/include/asm-x86/mem_event.h +++ b/xen/include/asm-x86/mem_event.h @@ -29,7 +29,7 @@ void mem_event_mark_and_pause(struct vcp int mem_event_check_ring(struct domain *d, struct mem_event_domain *med); void mem_event_put_req_producers(struct mem_event_domain *med); void mem_event_put_request(struct domain *d, struct mem_event_domain *med, mem_event_request_t *req); -void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp); +int mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp); void mem_event_unpause_vcpus(struct domain *d); int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |