[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 3/6] Qemu-Xen-vTPM: Xen frontend driver infrastructure
This patch adds infrastructure for xen front drivers living in qemu, so drivers don't need to implement common stuff on their own. It's mostly xenbus management stuff: some functions to access XenStore, setting up XenStore watches, callbacks on device discovery and state changes, and handle event channel between the virtual machines. Call xen_fe_register() function to register XenDevOps, and make sure, [...] 3 = "" [...] device = "" (frontend device, the backend is running in QEMU/.etc) vkbd = "" [...] vif = "" [...] .. (QEMU) xen_vtpmdev_ops is initialized with the following process: xen_hvm_init() [...] -->xen_fe_register("vtpm", ...) -->xenstore_fe_scan() -->xen_fe_try_init(ops) --> XenDevOps.init() -->xen_fe_get_xendev() --> XenDevOps.alloc() -->xen_fe_check() -->xen_fe_try_initialise() --> XenDevOps.initialise() -->xen_fe_try_connected() --> XenDevOps.connected() -->xs_watch() [...] Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx> --Changes in v5: -Comments enhancement. -Pass the size of the buff for vtpm_recv()|vtpm_send(). --- hw/tpm/Makefile.objs | 1 + hw/tpm/xen_vtpm_frontend.c | 321 +++++++++++++++++++++++++++++++++++++++++++ hw/xen/xen_frontend.c | 20 +++ include/hw/xen/xen_backend.h | 7 + include/hw/xen/xen_common.h | 6 + xen-hvm.c | 5 + 6 files changed, 360 insertions(+) create mode 100644 hw/tpm/xen_vtpm_frontend.c diff --git a/hw/tpm/Makefile.objs b/hw/tpm/Makefile.objs index 99f5983..57919fa 100644 --- a/hw/tpm/Makefile.objs +++ b/hw/tpm/Makefile.objs @@ -1,2 +1,3 @@ common-obj-$(CONFIG_TPM_TIS) += tpm_tis.o common-obj-$(CONFIG_TPM_PASSTHROUGH) += tpm_passthrough.o +common-obj-$(CONFIG_TPM_XENSTUBDOMS) += xen_vtpm_frontend.o diff --git a/hw/tpm/xen_vtpm_frontend.c b/hw/tpm/xen_vtpm_frontend.c new file mode 100644 index 0000000..3ca9501 --- /dev/null +++ b/hw/tpm/xen_vtpm_frontend.c @@ -0,0 +1,321 @@ +/* + * Connect to Xen vTPM stubdom domain + * + * Copyright (c) 2015 Intel Corporation + * Authors: + * Quan Xu <quan.xu@xxxxxxxxx> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/> + */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdarg.h> +#include <string.h> +#include <unistd.h> +#include <signal.h> +#include <inttypes.h> +#include <time.h> +#include <fcntl.h> +#include <errno.h> +#include <sys/ioctl.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/mman.h> +#include <sys/uio.h> + +#include "hw/hw.h" +#include "block/aio.h" +#include "hw/xen/xen_backend.h" + +#ifndef XS_STUBDOM_VTPM_ENABLE +#define XS_STUBDOM_VTPM_ENABLE "1" +#endif + +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + +enum tpmif_state { + /* No contents, vTPM idle, cancel complete */ + TPMIF_STATE_IDLE, + /* Request ready or vTPM working */ + TPMIF_STATE_SUBMIT, + /* Response ready or vTPM idle */ + TPMIF_STATE_FINISH, + /* Cancel requested or vTPM working */ + TPMIF_STATE_CANCEL, +}; + +static AioContext *vtpm_aio_ctx; + +enum status_bits { + VTPM_STATUS_RUNNING = 0x1, + VTPM_STATUS_IDLE = 0x2, + VTPM_STATUS_RESULT = 0x4, + VTPM_STATUS_CANCELED = 0x8, +}; + +struct tpmif_shared_page { + /* Request and response length in bytes */ + uint32_t length; + /* Enum tpmif_state */ + uint8_t state; + /* For the current request */ + uint8_t locality; + /* Should be zero */ + uint8_t pad; + /* Extra pages for long packets; may be zero */ + uint8_t nr_extra_pages; + /* + * Grant IDs, the length is actually nr_extra_pages. + * beyond the extra_pages entries is the actual requese + * and response. + */ + uint32_t extra_pages[0]; +}; + +struct xen_vtpm_dev { + struct XenDevice xendev; /* must be first */ + struct tpmif_shared_page *shr; + xc_gntshr *xen_xcs; + int ring_ref; + int bedomid; + QEMUBH *sr_bh; +}; + +static uint8_t vtpm_status(struct xen_vtpm_dev *vtpmdev) +{ + switch (vtpmdev->shr->state) { + case TPMIF_STATE_IDLE: + case TPMIF_STATE_FINISH: + return VTPM_STATUS_IDLE; + case TPMIF_STATE_SUBMIT: + case TPMIF_STATE_CANCEL: + return VTPM_STATUS_RUNNING; + default: + return 0; + } +} + +static bool vtpm_aio_wait(AioContext *ctx) +{ + return aio_poll(ctx, true); +} + +static void sr_bh_handler(void *opaque) +{ +} + +int vtpm_recv(struct XenDevice *xendev, uint8_t* buf, uint32_t buf_size, + size_t *count) +{ + struct xen_vtpm_dev *vtpmdev = container_of(xendev, struct xen_vtpm_dev, + xendev); + struct tpmif_shared_page *shr = vtpmdev->shr; + unsigned int offset; + size_t length = shr->length; + + if (shr->state == TPMIF_STATE_IDLE) { + return -ECANCELED; + } + + offset = sizeof(*shr) + sizeof(shr->extra_pages[0])*shr->nr_extra_pages; + if (offset > buf_size) { + return -EIO; + } + + if (offset + length > buf_size) { + length = buf_size - offset; + } + + if (length > *count) { + length = *count; + } + + memcpy(buf, offset + (uint8_t *)shr, shr->length); + *count = shr->length; + + return 0; +} + +int vtpm_send(struct XenDevice *xendev, uint8_t* buf, uint32_t buf_size, + size_t count) +{ + struct xen_vtpm_dev *vtpmdev = container_of(xendev, struct xen_vtpm_dev, + xendev); + struct tpmif_shared_page *shr = vtpmdev->shr; + unsigned int offset = sizeof(*shr) + + sizeof(shr->extra_pages[0])*shr->nr_extra_pages; + + if (offset > buf_size) { + return -EIO; + } + + if (offset + count > buf_size) { + return -ECANCELED; + } + + while (vtpm_status(vtpmdev) != VTPM_STATUS_IDLE) { + vtpm_aio_wait(vtpm_aio_ctx); + } + + memcpy(offset + (uint8_t *)shr, buf, count); + shr->length = count; + barrier(); + shr->state = TPMIF_STATE_SUBMIT; + xen_wmb(); + xen_be_send_notify(&vtpmdev->xendev); + + while (vtpm_status(vtpmdev) != VTPM_STATUS_IDLE) { + vtpm_aio_wait(vtpm_aio_ctx); + } + + return count; +} + +static int vtpm_initialise(struct XenDevice *xendev) +{ + struct xen_vtpm_dev *vtpmdev = container_of(xendev, struct xen_vtpm_dev, + xendev); + xs_transaction_t xbt = XBT_NULL; + unsigned int ring_ref; + + vtpmdev->xendev.fe = xenstore_read_be_str(&vtpmdev->xendev, "frontend"); + if (vtpmdev->xendev.fe == NULL) { + return -1; + } + + /* Get backend domid */ + if (xenstore_read_fe_int(&vtpmdev->xendev, "backend-id", + &vtpmdev->bedomid)) { + return -1; + } + + /* Alloc shared page */ + vtpmdev->shr = xc_gntshr_share_pages(vtpmdev->xen_xcs, vtpmdev->bedomid, 1, + &ring_ref, PROT_READ|PROT_WRITE); + vtpmdev->ring_ref = ring_ref; + if (vtpmdev->shr == NULL) { + return -1; + } + + /* Create event channel */ + if (xen_fe_alloc_unbound(&vtpmdev->xendev, 0, vtpmdev->bedomid)) { + xc_gntshr_munmap(vtpmdev->xen_xcs, vtpmdev->shr, 1); + return -1; + } + + xc_evtchn_unmask(vtpmdev->xendev.evtchndev, + vtpmdev->xendev.local_port); + +again: + xbt = xs_transaction_start(xenstore); + if (xbt == XBT_NULL) { + goto abort_transaction; + } + + if (xenstore_write_int(vtpmdev->xendev.fe, "ring-ref", + vtpmdev->ring_ref)) { + goto abort_transaction; + } + + if (xenstore_write_int(vtpmdev->xendev.fe, "event-channel", + vtpmdev->xendev.local_port)) { + goto abort_transaction; + } + + /* Publish protocol v2 feature */ + if (xenstore_write_int(vtpmdev->xendev.fe, "feature-protocol-v2", 1)) { + goto abort_transaction; + } + + if (!xs_transaction_end(xenstore, xbt, 0)) { + if (errno == EAGAIN) { + goto again; + } + } + + return 0; + +abort_transaction: + xc_gntshr_munmap(vtpmdev->xen_xcs, vtpmdev->shr, 1); + xs_transaction_end(xenstore, xbt, 1); + return -1; +} + +static int vtpm_free(struct XenDevice *xendev) +{ + struct xen_vtpm_dev *vtpmdev = container_of(xendev, struct xen_vtpm_dev, + xendev); + + aio_poll(vtpm_aio_ctx, false); + qemu_bh_delete(vtpmdev->sr_bh); + if (vtpmdev->shr) { + xc_gntshr_munmap(vtpmdev->xen_xcs, vtpmdev->shr, 1); + } + xc_interface_close(vtpmdev->xen_xcs); + return 0; +} + +static int vtpm_init(struct XenDevice *xendev) +{ + char path[XEN_BUFSIZE]; + char *value; + unsigned int stubdom_vtpm = 0; + + snprintf(path, sizeof(path), "/local/domain/%d/platform/acpi_stubdom_vtpm", + xen_domid); + value = xs_read(xenstore, 0, path, &stubdom_vtpm); + if (stubdom_vtpm <= 0 || strcmp(value, XS_STUBDOM_VTPM_ENABLE)) { + free(value); + return -1; + } + free(value); + return 0; +} + +static void vtpm_alloc(struct XenDevice *xendev) +{ + struct xen_vtpm_dev *vtpmdev = container_of(xendev, struct xen_vtpm_dev, + xendev); + + vtpm_aio_ctx = aio_context_new(NULL); + if (vtpm_aio_ctx == NULL) { + return; + } + vtpmdev->sr_bh = aio_bh_new(vtpm_aio_ctx, sr_bh_handler, vtpmdev); + qemu_bh_schedule(vtpmdev->sr_bh); + vtpmdev->xen_xcs = xen_xc_gntshr_open(0, 0); +} + +static void vtpm_event(struct XenDevice *xendev) +{ + struct xen_vtpm_dev *vtpmdev = container_of(xendev, struct xen_vtpm_dev, + xendev); + + qemu_bh_schedule(vtpmdev->sr_bh); +} + +struct XenDevOps xen_vtpmdev_ops = { + .size = sizeof(struct xen_vtpm_dev), + .flags = DEVOPS_FLAG_IGNORE_STATE | + DEVOPS_FLAG_FE, + .event = vtpm_event, + .free = vtpm_free, + .init = vtpm_init, + .alloc = vtpm_alloc, + .initialise = vtpm_initialise, + .backend_changed = vtpm_backend_changed, +}; diff --git a/hw/xen/xen_frontend.c b/hw/xen/xen_frontend.c index 55af45a..1ca7342 100644 --- a/hw/xen/xen_frontend.c +++ b/hw/xen/xen_frontend.c @@ -60,6 +60,26 @@ static void xen_fe_evtchn_event(void *opaque) /* ------------------------------------------------------------- */ +void vtpm_backend_changed(struct XenDevice *xendev, const char *node) +{ + int be_state; + + if (strcmp(node, "state") == 0) { + xenstore_read_be_int(xendev, node, &be_state); + switch (be_state) { + case XenbusStateConnected: + /* TODO */ + break; + case XenbusStateClosing: + case XenbusStateClosed: + xenbus_switch_state(xendev, XenbusStateClosing); + break; + default: + break; + } + } +} + int xen_fe_alloc_unbound(struct XenDevice *xendev, int dom, int remote_dom) { xendev->local_port = xc_evtchn_bind_unbound_port(xendev->evtchndev, diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h index bb0b303..1fcced0 100644 --- a/include/hw/xen/xen_backend.h +++ b/include/hw/xen/xen_backend.h @@ -110,6 +110,13 @@ int xen_fe_register(const char *type, struct XenDevOps *ops); int xen_fe_alloc_unbound(struct XenDevice *xendev, int dom, int remote_dom); int xenbus_switch_state(struct XenDevice *xendev, enum xenbus_state xbus); +/* Xen vtpm */ +int vtpm_send(struct XenDevice *xendev, uint8_t* buf, uint32_t buf_size, + size_t count); +int vtpm_recv(struct XenDevice *xendev, uint8_t* buf, uint32_t buf_size, + size_t *count); +void vtpm_backend_changed(struct XenDevice *xendev, const char *node); + /* actual backend drivers */ extern struct XenDevOps xen_console_ops; /* xen_console.c */ extern struct XenDevOps xen_kbdmouse_ops; /* xen_framebuffer.c */ diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h index 38f29fb..7268d0c 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_common.h @@ -132,6 +132,12 @@ static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger, return xc_interface_open(logger, dombuild_logger, open_flags); } +static inline xc_gntshr *xen_xc_gntshr_open(void *logger, + unsigned int open_flags) +{ + return xc_gntshr_open(logger, open_flags); +} + /* FIXME There is now way to have the xen fd */ static inline int xc_fd(xc_interface *xen_xc) { diff --git a/xen-hvm.c b/xen-hvm.c index 315864c..b2403dc 100644 --- a/xen-hvm.c +++ b/xen-hvm.c @@ -1308,6 +1308,11 @@ int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size, fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__); return -1; } + +#ifdef CONFIG_TPM_XENSTUBDOMS + xen_fe_register("vtpm", &xen_vtpmdev_ops); +#endif + xen_be_register("console", &xen_console_ops); xen_be_register("vkbd", &xen_kbdmouse_ops); xen_be_register("qdisk", &xen_blkdev_ops); -- 1.8.3.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |