|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v5] common/vm_event: Initialize vm_event lists on domain creation
On Tue, Aug 29, 2017 at 05:17:05PM +0300, Alexandru Isaila wrote:
[...]
>
> /**
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index b22aacc..30f507b 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -363,9 +363,6 @@ struct domain *domain_create(domid_t domid, unsigned int
> domcr_flags,
> poolid = 0;
>
> err = -ENOMEM;
> - d->vm_event = xzalloc(struct vm_event_per_domain);
> - if ( !d->vm_event )
> - goto fail;
>
> d->pbuf = xzalloc_array(char, DOMAIN_PBUF_SIZE);
> if ( !d->pbuf )
> @@ -403,7 +400,6 @@ struct domain *domain_create(domid_t domid, unsigned int
> domcr_flags,
> if ( hardware_domain == d )
> hardware_domain = old_hwdom;
> atomic_set(&d->refcnt, DOMAIN_DESTROYED);
> - xfree(d->vm_event);
> xfree(d->pbuf);
> if ( init_status & INIT_arch )
> arch_domain_destroy(d);
> @@ -820,7 +816,14 @@ static void complete_domain_destroy(struct rcu_head
> *head)
> free_xenoprof_pages(d);
> #endif
>
> - xfree(d->vm_event);
> +#ifdef CONFIG_HAS_MEM_PAGING
> + xfree(d->vm_event_paging);
> +#endif
> + xfree(d->vm_event_monitor);
Why do you unconditionally xfree these vm_event_monitor while you don't
unconditionally allocate them?
Not that this is strictly a bug but this deviates from the original
behaviour.
> +#ifdef CONFIG_HAS_MEM_SHARING
> + xfree(d->vm_event_share);
> +#endif
> +
> xfree(d->pbuf);
>
> for ( i = d->max_vcpus - 1; i >= 0; i-- )
> diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
> index 19f63bb..1bf6824 100644
> --- a/xen/common/mem_access.c
> +++ b/xen/common/mem_access.c
> @@ -52,7 +52,7 @@ int mem_access_memop(unsigned long cmd,
> goto out;
>
> rc = -ENODEV;
> - if ( unlikely(!d->vm_event->monitor.ring_page) )
> + if ( unlikely(!vm_event_check_ring(d->vm_event_monitor)) )
> goto out;
>
> switch ( mao.op )
> diff --git a/xen/common/monitor.c b/xen/common/monitor.c
> index 451f42f..70d38d4 100644
> --- a/xen/common/monitor.c
> +++ b/xen/common/monitor.c
> @@ -92,7 +92,7 @@ int monitor_traps(struct vcpu *v, bool_t sync,
> vm_event_request_t *req)
> int rc;
> struct domain *d = v->domain;
>
> - rc = vm_event_claim_slot(d, &d->vm_event->monitor);
> + rc = vm_event_claim_slot(d, d->vm_event_monitor);
> switch ( rc )
> {
> case 0:
> @@ -123,7 +123,7 @@ int monitor_traps(struct vcpu *v, bool_t sync,
> vm_event_request_t *req)
> }
>
> vm_event_fill_regs(req);
> - vm_event_put_request(d, &d->vm_event->monitor, req);
> + vm_event_put_request(d, d->vm_event_monitor, req);
>
> return rc;
> }
> diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
> index 9291db6..a9b47e2 100644
> --- a/xen/common/vm_event.c
> +++ b/xen/common/vm_event.c
> @@ -42,7 +42,7 @@
> static int vm_event_enable(
> struct domain *d,
> xen_domctl_vm_event_op_t *vec,
> - struct vm_event_domain *ved,
> + struct vm_event_domain **ved,
> int pause_flag,
> int param,
> xen_event_channel_notification_t notification_fn)
> @@ -50,32 +50,37 @@ static int vm_event_enable(
> int rc;
> unsigned long ring_gfn = d->arch.hvm_domain.params[param];
>
> + if ( !*ved )
> + (*ved) = xzalloc(struct vm_event_domain);
Unnecessary parentheses.
> + if ( !*ved )
> + return -ENOMEM;
> +
> /* Only one helper at a time. If the helper crashed,
> * the ring is in an undefined state and so is the guest.
> */
> - if ( ved->ring_page )
> - return -EBUSY;
> + if ( (*ved)->ring_page )
> + return -EBUSY;;
>
> /* The parameter defaults to zero, and it should be
> * set to something */
> if ( ring_gfn == 0 )
> return -ENOSYS;
>
> - vm_event_ring_lock_init(ved);
> - vm_event_ring_lock(ved);
> + vm_event_ring_lock_init(*ved);
> + vm_event_ring_lock(*ved);
>
> rc = vm_event_init_domain(d);
>
> if ( rc < 0 )
> goto err;
>
> - rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
> - &ved->ring_page);
> + rc = prepare_ring_for_helper(d, ring_gfn, &(*ved)->ring_pg_struct,
> + &(*ved)->ring_page);
Since you are modifying this, please align the second line to "d".
> if ( rc < 0 )
> goto err;
>
> /* Set the number of currently blocked vCPUs to 0. */
> - ved->blocked = 0;
> + (*ved)->blocked = 0;
>
> /* Allocate event channel */
> rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
> @@ -83,26 +88,28 @@ static int vm_event_enable(
> if ( rc < 0 )
> goto err;
>
> - ved->xen_port = vec->port = rc;
> + (*ved)->xen_port = vec->port = rc;
>
> /* Prepare ring buffer */
> - FRONT_RING_INIT(&ved->front_ring,
> - (vm_event_sring_t *)ved->ring_page,
> + FRONT_RING_INIT(&(*ved)->front_ring,
> + (vm_event_sring_t *)(*ved)->ring_page,
> PAGE_SIZE);
>
> /* Save the pause flag for this particular ring. */
> - ved->pause_flag = pause_flag;
> + (*ved)->pause_flag = pause_flag;
>
> /* Initialize the last-chance wait queue. */
> - init_waitqueue_head(&ved->wq);
> + init_waitqueue_head(&(*ved)->wq);
>
> - vm_event_ring_unlock(ved);
> + vm_event_ring_unlock((*ved));
Unnecessary parentheses.
> return 0;
>
> err:
> - destroy_ring_for_helper(&ved->ring_page,
> - ved->ring_pg_struct);
> - vm_event_ring_unlock(ved);
> + destroy_ring_for_helper(&(*ved)->ring_page,
> + (*ved)->ring_pg_struct);
> + vm_event_ring_unlock((*ved));
> + xfree(*ved);
> + *ved = NULL;
>
> return rc;
> }
> @@ -187,41 +194,44 @@ void vm_event_wake(struct domain *d, struct
> vm_event_domain *ved)
> vm_event_wake_blocked(d, ved);
> }
>
> -static int vm_event_disable(struct domain *d, struct vm_event_domain *ved)
> +static int vm_event_disable(struct domain *d, struct vm_event_domain **ved)
> {
A lot of the code churn here and above could be avoided by changing ved
in parameter list to something else (vedp?) and having a local variable
called
struct vm_event_domain *ved = *vedp;
(I don't feel very strongly about this though)
> - if ( ved->ring_page )
> + if ( vm_event_check_ring(*ved) )
> {
> struct vcpu *v;
>
> - vm_event_ring_lock(ved);
> + vm_event_ring_lock(*ved);
>
> - if ( !list_empty(&ved->wq.list) )
> + if ( !list_empty(&(*ved)->wq.list) )
> {
> - vm_event_ring_unlock(ved);
> + vm_event_ring_unlock(*ved);
> return -EBUSY;
> }
>
> /* Free domU's event channel and leave the other one unbound */
> - free_xen_event_channel(d, ved->xen_port);
> + free_xen_event_channel(d, (*ved)->xen_port);
>
> /* Unblock all vCPUs */
> for_each_vcpu ( d, v )
> {
> - if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
> + if ( test_and_clear_bit((*ved)->pause_flag, &v->pause_flags) )
> {
> vcpu_unpause(v);
> - ved->blocked--;
> + (*ved)->blocked--;
> }
> }
>
> - destroy_ring_for_helper(&ved->ring_page,
> - ved->ring_pg_struct);
> + destroy_ring_for_helper(&(*ved)->ring_page,
> + (*ved)->ring_pg_struct);
>
> vm_event_cleanup_domain(d);
>
> - vm_event_ring_unlock(ved);
> + vm_event_ring_unlock(*ved);
> }
>
> + xfree(*ved);
> + *ved = NULL;
> +
> return 0;
> }
>
> @@ -267,6 +277,9 @@ void vm_event_put_request(struct domain *d,
> RING_IDX req_prod;
> struct vcpu *curr = current;
>
> + if( !vm_event_check_ring(ved))
> + return;
> +
> if ( curr->domain != d )
> {
> req->flags |= VM_EVENT_FLAG_FOREIGN;
> @@ -434,6 +447,9 @@ void vm_event_resume(struct domain *d, struct
> vm_event_domain *ved)
>
> void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
> {
> + if( !vm_event_check_ring(ved) )
> + return;
> +
> vm_event_ring_lock(ved);
> vm_event_release_slot(d, ved);
> vm_event_ring_unlock(ved);
> @@ -482,7 +498,7 @@ static int vm_event_wait_slot(struct vm_event_domain *ved)
>
> bool_t vm_event_check_ring(struct vm_event_domain *ved)
> {
> - return (ved->ring_page != NULL);
> + return (ved != NULL && ved->ring_page != NULL);
ved && ved->ring_page
> }
>
> /*
[...]
> diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
> index 27bdb71..391c473 100644
> --- a/xen/drivers/passthrough/pci.c
> +++ b/xen/drivers/passthrough/pci.c
> @@ -21,6 +21,7 @@
> #include <xen/prefetch.h>
> #include <xen/iommu.h>
> #include <xen/irq.h>
> +#include <xen/vm_event.h>
> #include <asm/hvm/irq.h>
> #include <xen/delay.h>
> #include <xen/keyhandler.h>
> @@ -1365,7 +1366,7 @@ static int assign_device(struct domain *d, u16 seg, u8
> bus, u8 devfn, u32 flag)
> * enabled for this domain */
> if ( unlikely(!need_iommu(d) &&
> (d->arch.hvm_domain.mem_sharing_enabled ||
> - d->vm_event->paging.ring_page ||
> + vm_event_check_ring(d->vm_event_paging) ||
> p2m_get_hostp2m(d)->global_logdirty)) )
> return -EXDEV;
>
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index 6673b27..e48487c 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -295,16 +295,6 @@ struct vm_event_domain
> unsigned int last_vcpu_wake_up;
> };
>
> -struct vm_event_per_domain
> -{
> - /* Memory sharing support */
> - struct vm_event_domain share;
> - /* Memory paging support */
> - struct vm_event_domain paging;
> - /* VM event monitor support */
> - struct vm_event_domain monitor;
> -};
> -
> struct evtchn_port_ops;
>
> enum guest_type {
> @@ -464,7 +454,13 @@ struct domain
> struct lock_profile_qhead profile_head;
>
> /* Various vm_events */
> - struct vm_event_per_domain *vm_event;
> +
> + /* Memory sharing support */
> + struct vm_event_domain *vm_event_share;
> + /* Memory paging support */
> + struct vm_event_domain *vm_event_paging;
> + /* VM event monitor support */
> + struct vm_event_domain *vm_event_monitor;
Please consider place them inside CONFIG options like you did above.
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |