[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 02/10 v2] xen/arm: vpl011: Add new vuart domctl interface to setup pfn and evtchn
1. Add two new domctl API to: - Allocate a new event channel for sending/receiving events to/from Xen. - Map the PFN allocted by the toolstack to be used as IN/OUT ring buffers. Xen will communicate with xenconsole over the ring buffer and the event channel to transmit and receive pl011 data on guest domain's behalf. Signed-off-by: Bhupinder Thakur <bhupinder.thakur@xxxxxxxxxx> --- Changes since v1: - Replaced the hvm interface with domctl interface tools/libxc/include/xenctrl.h | 26 ++++++++++++++++++++++++++ tools/libxc/xc_domain.c | 39 +++++++++++++++++++++++++++++++++++++++ xen/arch/arm/domctl.c | 20 ++++++++++++++++++++ xen/arch/x86/domctl.c | 4 ++++ xen/include/public/domctl.h | 11 +++++++++++ 5 files changed, 100 insertions(+) diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h index 1629f41..bebfe7d 100644 --- a/tools/libxc/include/xenctrl.h +++ b/tools/libxc/include/xenctrl.h @@ -886,6 +886,32 @@ int xc_vcpu_getcontext(xc_interface *xch, vcpu_guest_context_any_t *ctxt); /** + * This function returns information about the pfn and the event channel + * to be used for setting up a virtual uart console. + * + * @parm xch a handle to an open hypervisor interface + * @parm domid the domain to get information from + * @parm vuart_pfn the pfn to be used for the ring buffer + * @return 0 on success, negative error on failure + */ +int xc_domain_vuart_set_pfn(xc_interface *xch, + uint32_t domid, + uint32_t vuart_pfn); + +/** + * This function returns information about the pfn and the event channel + * to be used for setting up a virtual console. + * + * @parm xch a handle to an open hypervisor interface + * @parm domid the domain to get information from + * @parm vuart_evtchn the event channel to be used for console events + * @return 0 on success, negative error on failure + */ +int xc_domain_vuart_get_evtchn(xc_interface *xch, + uint32_t domid, + uint32_t *vuart_evtchn); + +/** * This function returns information about the XSAVE state of a particular * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0, * the call is considered a query to retrieve them and the buffer is not diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index 00909ad4..943f202 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -343,6 +343,45 @@ int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid, return 0; } +int xc_domain_vuart_set_pfn(xc_interface *xch, + uint32_t domid, + uint32_t vuart_pfn) +{ + DECLARE_DOMCTL; + int rc = 0; + + domctl.cmd = XEN_DOMCTL_vuart_op; + domctl.domain = (domid_t)domid; + domctl.u.vuart_op.cmd = XEN_DOMCTL_VUART_OP_SET_PFN; + domctl.u.vuart_op.pfn = vuart_pfn; + + if ( (rc = do_domctl(xch, &domctl)) < 0 ) + return rc; + + return rc; +} + +int xc_domain_vuart_get_evtchn(xc_interface *xch, + uint32_t domid, + uint32_t *vuart_evtchn) +{ + DECLARE_DOMCTL; + int rc = 0; + + *vuart_evtchn = -1; + + domctl.cmd = XEN_DOMCTL_vuart_op; + domctl.domain = (domid_t)domid; + domctl.u.vuart_op.cmd = XEN_DOMCTL_VUART_OP_GET_EVTCHN; + + if ( (rc = do_domctl(xch, &domctl)) < 0 ) + return rc; + + *vuart_evtchn = domctl.u.vuart_op.evtchn; + + return rc; +} + int xc_domain_getinfo(xc_interface *xch, uint32_t first_domid, unsigned int max_doms, diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c index 971caec..e400f87 100644 --- a/xen/arch/arm/domctl.c +++ b/xen/arch/arm/domctl.c @@ -10,6 +10,7 @@ #include <xen/sched.h> #include <xen/hypercall.h> #include <xen/iocap.h> +#include <xen/guest_access.h> #include <xsm/xsm.h> #include <public/domctl.h> @@ -119,6 +120,25 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, d->disable_migrate = domctl->u.disable_migrate.disable; return 0; + case XEN_DOMCTL_vuart_op: + { + int rc; + if ( d->arch.vpl011.initialized ) + { + if ( domctl->u.vuart_op.cmd == XEN_DOMCTL_VUART_OP_SET_PFN ) + { + rc = vpl011_map_guest_page(d, domctl->u.vuart_op.pfn); + } + else + { + domctl->u.vuart_op.evtchn = d->arch.vpl011.evtchn; + rc = __copy_to_guest(u_domctl, domctl, 1); + } + return rc; + } + else + return -EINVAL; + } default: { int rc; diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index e104be2..49e907d 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1465,6 +1465,10 @@ long arch_do_domctl( recalculate_cpuid_policy(d); break; + case XEN_DOMCTL_vuart_op: + ret = -EOPNOTSUPP; + break; + default: ret = iommu_do_domctl(domctl, d, u_domctl); break; diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index e6cf211..8bee0c3 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -1138,6 +1138,15 @@ struct xen_domctl_psr_cat_op { uint32_t target; /* IN */ uint64_t data; /* IN/OUT */ }; + +struct xen_domctl_vuart_op { +#define XEN_DOMCTL_VUART_OP_SET_PFN 0 +#define XEN_DOMCTL_VUART_OP_GET_EVTCHN 1 + uint32_t cmd; /* XEN_DOMCTL_VUART_OP_* */ + uint32_t pfn; /* IN */ + uint32_t evtchn; /* OUT */ +}; + typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t); @@ -1218,6 +1227,7 @@ struct xen_domctl { #define XEN_DOMCTL_monitor_op 77 #define XEN_DOMCTL_psr_cat_op 78 #define XEN_DOMCTL_soft_reset 79 +#define XEN_DOMCTL_vuart_op 80 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -1280,6 +1290,7 @@ struct xen_domctl { struct xen_domctl_psr_cmt_op psr_cmt_op; struct xen_domctl_monitor_op monitor_op; struct xen_domctl_psr_cat_op psr_cat_op; + struct xen_domctl_vuart_op vuart_op; uint8_t pad[128]; } u; }; -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |