|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 08/17 v5] xen/arm: vpl011: Add a new domctl API to initialize vpl011
On Thu, 22 Jun 2017, Bhupinder Thakur wrote:
> Add a new domctl API to initialize vpl011. It takes the GFN and console
> backend domid as input and returns an event channel to be used for
> sending and receiving events from Xen.
>
> Xen will communicate with xenconsole using GFN as the ring buffer and
> the event channel to transmit and receive pl011 data on the guest domain's
> behalf.
>
> Signed-off-by: Bhupinder Thakur <bhupinder.thakur@xxxxxxxxxx>
> ---
> CC: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
> CC: Wei Liu <wei.liu2@xxxxxxxxxx>
> CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> CC: Julien Grall <julien.grall@xxxxxxx>
>
> Changes since v4:
> - Removed libxl__arch_domain_create_finish().
> - Added a new function libxl__arch_build_dom_finish(), which is called at the
> last
> in libxl__build_dom(). This function calls the vpl011 initialization
> function now.
>
> Changes since v3:
> - Added a new arch specific function libxl__arch_domain_create_finish(), which
> calls the vpl011 initialization function. For x86 this function does not do
> anything.
> - domain_vpl011_init() takes a pointer to a structure which contains all the
> required information such as console_domid, gfn instead of passing
> parameters
> separately.
> - Dropped a DOMCTL API defined for de-initializing vpl011 as that should be
> taken care when the domain is destroyed (and not dependent on userspace
> libraries/applications).
>
> Changes since v2:
> - Replaced the DOMCTL APIs defined for get/set of event channel and GFN with
> a set of DOMCTL APIs for initializing and de-initializing vpl011 emulation.
>
> tools/libxc/include/xenctrl.h | 20 ++++++++++++++++++++
> tools/libxc/xc_domain.c | 25 +++++++++++++++++++++++++
> tools/libxl/libxl_arch.h | 6 ++++++
> tools/libxl/libxl_arm.c | 22 ++++++++++++++++++++++
> tools/libxl/libxl_dom.c | 4 ++++
> tools/libxl/libxl_x86.c | 8 ++++++++
> xen/arch/arm/domain.c | 5 +++++
> xen/arch/arm/domctl.c | 37 +++++++++++++++++++++++++++++++++++++
> xen/include/public/domctl.h | 12 ++++++++++++
> 9 files changed, 139 insertions(+)
>
> diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
> index 1629f41..26f3d1e 100644
> --- a/tools/libxc/include/xenctrl.h
> +++ b/tools/libxc/include/xenctrl.h
> @@ -885,6 +885,26 @@ int xc_vcpu_getcontext(xc_interface *xch,
> uint32_t vcpu,
> vcpu_guest_context_any_t *ctxt);
>
> +#if defined (__arm__) || defined(__aarch64__)
> +/**
> + * This function initializes the vpl011 emulation and returns
> + * the event to be used by the backend for communicating with
> + * the emulation code.
> + *
> + * @parm xch a handle to an open hypervisor interface
> + * @parm domid the domain to get information from
> + * @parm console_domid the domid of the backend console
> + * @parm gfn the guest pfn to be used as the ring buffer
> + * @parm evtchn the event channel to be used for events
> + * @return 0 on success, negative error on failure
> + */
> +int xc_dom_vpl011_init(xc_interface *xch,
> + uint32_t domid,
> + uint32_t console_domid,
> + xen_pfn_t gfn,
> + evtchn_port_t *evtchn);
> +#endif
Actually, the pattern is to define the xc_ function on all architecture
but only return ENOSYS where it's not implemented, see
xc_vcpu_get_extstate.
> /**
> * This function returns information about the XSAVE state of a particular
> * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
> diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
> index 5d192ea..55de408 100644
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -343,6 +343,31 @@ int xc_domain_get_guest_width(xc_interface *xch,
> uint32_t domid,
> return 0;
> }
>
> +#if defined (__arm__) || defined(__aarch64__)
> +int xc_dom_vpl011_init(xc_interface *xch,
> + uint32_t domid,
> + uint32_t console_domid,
> + xen_pfn_t gfn,
> + evtchn_port_t *evtchn)
> +{
See other comment.
> + DECLARE_DOMCTL;
> + int rc = 0;
> +
> + domctl.cmd = XEN_DOMCTL_vuart_op;
> + domctl.domain = (domid_t)domid;
> + domctl.u.vuart_op.cmd = XEN_DOMCTL_VUART_OP_INIT_VPL011;
> + domctl.u.vuart_op.console_domid = console_domid;
> + domctl.u.vuart_op.gfn = gfn;
> +
> + if ( (rc = do_domctl(xch, &domctl)) < 0 )
> + return rc;
> +
> + *evtchn = domctl.u.vuart_op.evtchn;
> +
> + return rc;
> +}
> +#endif
> +
> int xc_domain_getinfo(xc_interface *xch,
> uint32_t first_domid,
> unsigned int max_doms,
> diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
> index 5e1fc60..118b92c 100644
> --- a/tools/libxl/libxl_arch.h
> +++ b/tools/libxl/libxl_arch.h
> @@ -44,6 +44,12 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc
> *gc,
> libxl_domain_build_info *info,
> struct xc_dom_image *dom);
>
> +/* perform any pending hardware initialization */
> +int libxl__arch_build_dom_finish(libxl__gc *gc,
> + libxl_domain_build_info *info,
> + struct xc_dom_image *dom,
> + libxl__domain_build_state *state);
> +
> /* build vNUMA vmemrange with arch specific information */
> _hidden
> int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
> diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c
> index d842d88..9d6448e 100644
> --- a/tools/libxl/libxl_arm.c
> +++ b/tools/libxl/libxl_arm.c
> @@ -1038,6 +1038,28 @@ int
> libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
> return 0;
> }
>
> +int libxl__arch_build_dom_finish(libxl__gc *gc,
> + libxl_domain_build_info *info,
> + struct xc_dom_image *dom,
> + libxl__domain_build_state *state)
> +{
> + int ret = 0;
> +
> + if ( info->arch_arm.vuart ) {
> + ret = xc_dom_vpl011_init(CTX->xch,
> + dom->guest_domid,
> + dom->console_domid,
> + dom->vuart_gfn,
> + &state->vuart_port);
> + if ( ret < 0 )
> + {
> + LOG(ERROR, "xc_dom_vpl011_init failed\n");
> + }
> + }
> +
> + return ret;
> +}
> +
> int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
> uint32_t domid,
> libxl_domain_build_info *info,
> diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
> index c98af60..9b42205 100644
> --- a/tools/libxl/libxl_dom.c
> +++ b/tools/libxl/libxl_dom.c
> @@ -687,6 +687,10 @@ static int libxl__build_dom(libxl__gc *gc, uint32_t
> domid,
> LOGE(ERROR, "xc_dom_gnttab_init failed");
> goto out;
> }
> + if ( (ret = libxl__arch_build_dom_finish(gc, info, dom, state)) != 0 ) {
> + LOGE(ERROR, "libxl__arch_build_dom_finish failed");
> + goto out;
> + }
>
> out:
> return ret != 0 ? ERROR_FAIL : 0;
> diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
> index 455f6f0..0aaeded 100644
> --- a/tools/libxl/libxl_x86.c
> +++ b/tools/libxl/libxl_x86.c
> @@ -391,6 +391,14 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc
> *gc,
> return rc;
> }
>
> +int libxl__arch_build_dom_finish(libxl__gc *gc,
> + libxl_domain_build_info *info,
> + struct xc_dom_image *dom,
> + libxl__domain_build_state *state)
> +{
> + return 0;
> +}
> +
> /* Return 0 on success, ERROR_* on failure. */
> int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
> uint32_t domid,
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 76310ed..f1b24cc 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -665,6 +665,11 @@ fail:
>
> void arch_domain_destroy(struct domain *d)
> {
> + /*
> + * vpl011 is initialized via a DOMCTL call XEN_DOMCTL_vuart_op.
> + */
> + domain_vpl011_deinit(d);
> +
> /* IOMMU page table is shared with P2M, always call
> * iommu_domain_destroy() before p2m_teardown().
> */
> diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
> index 86fa102..280c412 100644
> --- a/xen/arch/arm/domctl.c
> +++ b/xen/arch/arm/domctl.c
> @@ -6,9 +6,11 @@
>
> #include <xen/types.h>
> #include <xen/errno.h>
> +#include <xen/guest_access.h>
> #include <xen/hypercall.h>
> #include <xen/iocap.h>
> #include <xen/lib.h>
> +#include <xen/mm.h>
> #include <xen/sched.h>
> #include <xsm/xsm.h>
> #include <public/domctl.h>
> @@ -119,6 +121,41 @@ long arch_do_domctl(struct xen_domctl *domctl, struct
> domain *d,
> d->disable_migrate = domctl->u.disable_migrate.disable;
> return 0;
>
> + case XEN_DOMCTL_vuart_op:
> + {
> + int rc;
> + struct xen_domctl_vuart_op *vuart_op = &domctl->u.vuart_op;
> +
> + switch(vuart_op->cmd)
> + {
> + case XEN_DOMCTL_VUART_OP_INIT_VPL011:
> +
> + if ( !d->creation_finished )
> + {
> + struct vpl011_init_info info;
> +
> + info.console_domid = vuart_op->console_domid;
> + info.gfn = _gfn(vuart_op->gfn);
> +
> + rc = domain_vpl011_init(d, &info);
> + if ( !rc )
> + {
> + vuart_op->evtchn = info.evtchn;
> + rc = __copy_to_guest(u_domctl, domctl, 1);
> + }
> + }
> + else
> + rc = - EPERM;
> +
> + break;
> +
> + default:
> + rc = -EINVAL;
> + break;
> + }
> +
> + return rc;
> + }
> default:
> {
> int rc;
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index f7cbc0a..0da8aa6 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -36,6 +36,7 @@
> #include "grant_table.h"
> #include "hvm/save.h"
> #include "memory.h"
> +#include "event_channel.h"
>
> #define XEN_DOMCTL_INTERFACE_VERSION 0x0000000e
>
> @@ -1138,6 +1139,15 @@ struct xen_domctl_psr_cat_op {
> uint32_t target; /* IN */
> uint64_t data; /* IN/OUT */
> };
> +
> +struct xen_domctl_vuart_op {
> +#define XEN_DOMCTL_VUART_OP_INIT_VPL011 0
> + uint32_t cmd; /* XEN_DOMCTL_VUART_OP_* */
> + uint32_t console_domid; /* IN */
> + xen_pfn_t gfn; /* IN */
> + evtchn_port_t evtchn; /* OUT */
> +};
> +
> typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
> DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
>
> @@ -1218,6 +1228,7 @@ struct xen_domctl {
> #define XEN_DOMCTL_monitor_op 77
> #define XEN_DOMCTL_psr_cat_op 78
> #define XEN_DOMCTL_soft_reset 79
> +#define XEN_DOMCTL_vuart_op 80
> #define XEN_DOMCTL_gdbsx_guestmemio 1000
> #define XEN_DOMCTL_gdbsx_pausevcpu 1001
> #define XEN_DOMCTL_gdbsx_unpausevcpu 1002
> @@ -1280,6 +1291,7 @@ struct xen_domctl {
> struct xen_domctl_psr_cmt_op psr_cmt_op;
> struct xen_domctl_monitor_op monitor_op;
> struct xen_domctl_psr_cat_op psr_cat_op;
> + struct xen_domctl_vuart_op vuart_op;
> uint8_t pad[128];
> } u;
> };
> --
> 2.7.4
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |