|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH RFC 5/6] xen-access: add support for slotted channel vm_events
On 11/28/18 5:29 PM, Petre Pircalabu wrote:
> Signed-off-by: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
> ---
> tools/tests/xen-access/xen-access.c | 510
> ++++++++++++++++++++++++++++--------
> 1 file changed, 408 insertions(+), 102 deletions(-)
>
> diff --git a/tools/tests/xen-access/xen-access.c
> b/tools/tests/xen-access/xen-access.c
> index 6aaee16..e44708d 100644
> --- a/tools/tests/xen-access/xen-access.c
> +++ b/tools/tests/xen-access/xen-access.c
> @@ -62,13 +62,33 @@
> /* From xen/include/asm-x86/x86-defns.h */
> #define X86_CR4_PGE 0x00000080 /* enable global pages */
>
> -typedef struct vm_event {
> - domid_t domain_id;
> +#ifndef round_pgup
> +#define round_pgup(p) (((p) + (XC_PAGE_SIZE - 1)) & XC_PAGE_MASK)
> +#endif /* round_pgup */
> +
> +struct vm_event_ring
> +{
> xenevtchn_handle *xce_handle;
> int port;
> vm_event_back_ring_t back_ring;
> uint32_t evtchn_port;
> - void *ring_page;
> + void *buffer;
> + unsigned int page_count;
> +};
> +
> +struct vm_event_channel
> +{
> + xenevtchn_handle **xce_handles;
> + int *ports;
> + uint32_t *evtchn_ports;
> + void *buffer;
> +};
> +
> +typedef struct vm_event {
> + domid_t domain_id;
> + unsigned int num_vcpus;
> + struct vm_event_ring *ring;
> + struct vm_event_channel *channel;
> } vm_event_t;
>
> typedef struct xenaccess {
> @@ -79,6 +99,7 @@ typedef struct xenaccess {
> vm_event_t vm_event;
> } xenaccess_t;
>
> +
> static int interrupted;
> bool evtchn_bind = 0, evtchn_open = 0, mem_access_enable = 0;
>
> @@ -87,45 +108,181 @@ static void close_handler(int sig)
> interrupted = sig;
> }
>
> -int xc_wait_for_event_or_timeout(xc_interface *xch, xenevtchn_handle *xce,
> unsigned long ms)
> +static int xenaccess_wait_for_events(xenaccess_t *xenaccess,
> + int **_ports,
> + unsigned long ms)
> {
> - struct pollfd fd = { .fd = xenevtchn_fd(xce), .events = POLLIN | POLLERR
> };
> - int port;
> - int rc;
> + struct pollfd *fds;
> + vm_event_t *vm_event;
> + int rc, fd_count = 0, i = 0, found = 0;
> + int *ports;
> +
> + vm_event = &xenaccess->vm_event;
>
> - rc = poll(&fd, 1, ms);
> - if ( rc == -1 )
> + fd_count = ((vm_event->channel) ? vm_event->num_vcpus : 0) + 1;
> +
> + fds = calloc(fd_count, sizeof(struct pollfd));
> +
> + if ( vm_event->channel )
> {
> - if (errno == EINTR)
> - return 0;
> + for (i = 0; i < vm_event->num_vcpus; i++ )
> + {
> + fds[i].fd = xenevtchn_fd(vm_event->channel->xce_handles[i]);
> + fds[i].events = POLLIN | POLLERR;
> + fds[i].revents = 0;
> + }
> + }
>
> - ERROR("Poll exited with an error");
> - goto err;
> + fds[i].fd = xenevtchn_fd(vm_event->ring->xce_handle);
> + fds[i].events = POLLIN | POLLERR;
> + fds[i].revents = 0;
> +
> + rc = poll(fds, fd_count, ms);
> + if ( rc == -1 || rc == 0 )
> + {
> + if ( errno == EINTR )
> + rc = 0;
> + goto cleanup;
> }
>
> - if ( rc == 1 )
> + ports = malloc(rc * sizeof(int));
> +
> + for ( i = 0; i < fd_count ; i++ )
> {
> - port = xenevtchn_pending(xce);
> - if ( port == -1 )
> + if ( fds[i].revents & POLLIN )
> {
> - ERROR("Failed to read port from event channel");
> - goto err;
> + xenevtchn_handle *xce = (i == (fd_count-1)) ?
> vm_event->ring->xce_handle :
> +
> vm_event->channel->xce_handles[i];
> + int port = xenevtchn_pending(xce);
> +
> + if ( port == -1 )
> + {
> + ERROR("Failed to read port from event channel");
> + rc = -1;
> + goto cleanup;
> + }
> +
> + if ( xenevtchn_unmask(xce, port) )
> + {
> + ERROR("Failed to unmask event channel port");
> + rc = -1;
> + goto cleanup;
> + }
> +
> + ports[found++] = port;
> }
> + }
>
> - rc = xenevtchn_unmask(xce, port);
> - if ( rc != 0 )
> + *_ports = ports;
> +
> +cleanup:
> + free(fds);
> + return rc;
> +}
> +
> +static int xenaccess_evtchn_bind_port(uint32_t evtchn_port,
> + domid_t domain_id,
> + xenevtchn_handle **_handle,
> + int *_port)
> +{
> + xenevtchn_handle *handle;
> + int rc;
> +
> + if ( !_handle || !_port )
> + return -EINVAL;
> +
> + /* Open event channel */
> + handle = xenevtchn_open(NULL, 0);
> + if ( handle == NULL )
> + {
> + ERROR("Failed to open event channel\n");
> + return -ENODEV;
> + }
> +
> + /* Bind event notification */
> + rc = xenevtchn_bind_interdomain(handle, domain_id, evtchn_port);
> + if ( rc < 0 )
> + {
> + ERROR("Failed to bind event channel\n");
> + xenevtchn_close(handle);
> + return rc;
> + }
> +
> + *_handle = handle;
> + *_port = rc;
> + return 0;
> +}
> +
> +static void xenaccess_evtchn_unbind_port(uint32_t evtchn_port,
> + xenevtchn_handle **_handle,
> + int *_port)
> +{
> + if ( !_handle || !*_handle || !_port )
> + return;
> +
> + xenevtchn_unbind(*_handle, *_port);
> + xenevtchn_close(*_handle);
> + *_handle = NULL;
> + *_port = 0;
> +}
> +
> +static int xenaccess_evtchn_bind(xenaccess_t *xenaccess)
> +{
> + int rc, i = 0;
> +
> + rc = xenaccess_evtchn_bind_port(xenaccess->vm_event.ring->evtchn_port,
> + xenaccess->vm_event.domain_id,
> + &xenaccess->vm_event.ring->xce_handle,
> + &xenaccess->vm_event.ring->port);
> + if ( rc < 0 )
> + {
> + ERROR("Failed to bind ring events\n");
> + return rc;
> + }
> +
> + if ( xenaccess->vm_event.channel == NULL)
> + return 0;
> +
> + for ( i = 0; i < xenaccess->vm_event.num_vcpus; i++ )
> + {
> + rc =
> xenaccess_evtchn_bind_port(xenaccess->vm_event.channel->evtchn_ports[i],
> + xenaccess->vm_event.domain_id,
> +
> &xenaccess->vm_event.channel->xce_handles[i],
> +
> &xenaccess->vm_event.channel->ports[i]);
> + if ( rc < 0 )
> {
> - ERROR("Failed to unmask event channel port");
> + ERROR("Failed to bind channel events\n");
> goto err;
> }
> }
> - else
> - port = -1;
>
> - return port;
> + evtchn_bind = true;
> + return 0;
>
> - err:
> - return -errno;
> +err:
> + xenaccess_evtchn_unbind_port(xenaccess->vm_event.ring->evtchn_port,
> + &xenaccess->vm_event.ring->xce_handle,
> + &xenaccess->vm_event.ring->port);
> +
> + for ( i--; i >= 0; i-- )
This for() looks peculiar.
Thanks,
Razvan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |