[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH 10/10] Add VMware guest info access
From: Don Slutz <dslutz@xxxxxxxxxxx> Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx> --- tools/libxc/xc_domain.c | 112 ++++++++++++++++++++++++++++++ tools/libxc/xenctrl.h | 24 +++++++ xen/arch/x86/hvm/hvm.c | 148 ++++++++++++++++++++++++++++++++++++++++ xen/include/public/hvm/hvm_op.h | 18 +++++ 4 files changed, 302 insertions(+) diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index 1ccafc5..0437c6f 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -1246,6 +1246,118 @@ int xc_get_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned long return rc; } +int xc_set_vmport_guest_info(xc_interface *handle, + domid_t dom, + unsigned int key_len, + char *key, + unsigned int val_len, + char *val) +{ + DECLARE_HYPERCALL; + DECLARE_HYPERCALL_BUFFER(xen_hvm_vmport_guest_info_t, arg); + int rc; + + if ((key_len < 1) || + (key_len > VMPORT_GUEST_INFO_KEY_MAX) || + (val_len > VMPORT_GUEST_INFO_VAL_MAX)) { + return -1; + } + + arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg)); + if ( arg == NULL ) + return -1; + + hypercall.op = __HYPERVISOR_hvm_op; + hypercall.arg[0] = HVMOP_set_vmport_guest_info; + hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); + arg->domid = dom; + arg->key_length = key_len; + arg->value_length = val_len; + memcpy(arg->data, key, key_len); + memcpy(&arg->data[key_len], val, val_len); + rc = do_xen_hypercall(handle, &hypercall); + xc_hypercall_buffer_free(handle, arg); + return rc; +} + +int xc_get_vmport_guest_info(xc_interface *handle, + domid_t dom, + unsigned int key_len, + char *key, + unsigned int val_max, + unsigned int *val_len, + char *val) +{ + DECLARE_HYPERCALL; + DECLARE_HYPERCALL_BUFFER(xen_hvm_vmport_guest_info_t, arg); + int rc; + + if ((key_len < 1) || + (key_len > VMPORT_GUEST_INFO_KEY_MAX) ) + return -1; + + arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg)); + + hypercall.op = __HYPERVISOR_hvm_op; + hypercall.arg[0] = HVMOP_get_vmport_guest_info; + hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); + arg->domid = dom; + arg->key_length = key_len; + arg->value_length = 0; + *val_len = 0; + memcpy(arg->data, key, key_len); + rc = do_xen_hypercall(handle, &hypercall); + if (rc == 0) { + if (arg->value_length > val_max) + arg->value_length = val_max; + *val_len = arg->value_length; + memcpy(val, &arg->data[key_len], arg->value_length); + } + xc_hypercall_buffer_free(handle, arg); + return rc; +} + +int xc_fetch_vmport_guest_info(xc_interface *handle, + domid_t dom, + unsigned int idx, + unsigned int key_max, + unsigned int *key_len, + char *key, + unsigned int val_max, + unsigned int *val_len, + char *val) +{ + DECLARE_HYPERCALL; + DECLARE_HYPERCALL_BUFFER(xen_hvm_vmport_guest_info_t, arg); + int rc; + + arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg)); + + hypercall.op = __HYPERVISOR_hvm_op; + hypercall.arg[0] = HVMOP_get_vmport_guest_info; + hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); + arg->domid = dom; + arg->key_length = 0; + arg->value_length = idx; + *key_len = 0; + *val_len = 0; + rc = do_xen_hypercall(handle, &hypercall); + if (rc == 0) { + if (arg->key_length > key_max) + arg->key_length = key_max; + *key_len = arg->key_length; + memcpy(key, arg->data, arg->key_length); + if (arg->value_length > val_max) + arg->value_length = val_max; + *val_len = arg->value_length; + memcpy(val, + &arg->data[arg->key_length], + arg->value_length); + } + xc_hypercall_buffer_free(handle, arg); + return rc; +} + int xc_domain_setdebugging(xc_interface *xch, uint32_t domid, unsigned int enable) diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h index 6e58ebe..6b22b3b 100644 --- a/tools/libxc/xenctrl.h +++ b/tools/libxc/xenctrl.h @@ -1774,6 +1774,30 @@ void xc_clear_last_error(xc_interface *xch); int xc_set_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned long value); int xc_get_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned long *value); +int xc_set_vmport_guest_info(xc_interface *handle, + domid_t dom, + unsigned int key_len, + char *key, + unsigned int val_len, + char *val); +int xc_get_vmport_guest_info(xc_interface *handle, + domid_t dom, + unsigned int key_len, + char *key, + unsigned int val_max, + unsigned int *val_len, + char *val); +int xc_fetch_vmport_guest_info(xc_interface *handle, + domid_t dom, + unsigned int idx, + unsigned int key_max, + unsigned int *key_len, + char *key, + unsigned int val_max, + unsigned int *val_len, + char *val); + + /* HVM guest pass-through */ int xc_assign_device(xc_interface *xch, uint32_t domid, diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index a557272..c6f84fc 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4662,6 +4662,154 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) break; } + case HVMOP_get_vmport_guest_info: + case HVMOP_set_vmport_guest_info: + { + struct xen_hvm_vmport_guest_info a; + struct domain *d; + char *key = NULL; + char *value = NULL; + struct vmport_state *vs; + int idx; + vmport_guestinfo_t *add_slots[5]; + int num_slots = 0, num_free_slots = 0; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + ASSERT(strlen("guestinfo.") == 10); +#if VMPORT_MAX_KEY_LEN + 10 != VMPORT_GUEST_INFO_KEY_MAX +#error Need to adjust VMPORT_MAX_KEY_LEN & VMPORT_GUEST_INFO_KEY_MAX +#endif +#if VMPORT_MAX_VAL_LEN != VMPORT_GUEST_INFO_VAL_MAX +#error Need to adjust VMPORT_MAX_VAL_LEN & VMPORT_GUEST_INFO_VAL_MAX +#endif + if ( a.key_length > strlen("guestinfo.") ) { + if ( (unsigned long)a.key_length + (unsigned long)a.value_length > sizeof(a.data) ) + return -EINVAL; + if ( memcmp(a.data, "guestinfo.", strlen("guestinfo.")) == 0 ) { + key = &a.data[strlen("guestinfo.")]; + a.key_length -= strlen("guestinfo."); + } else { + key = &a.data[0]; + } + value = key + a.key_length; + } else if (a.key_length > 0) { + if ( (unsigned long)a.key_length + (unsigned long)a.value_length > sizeof(a.data) ) + return -EINVAL; + key = &a.data[0]; + if ( a.key_length > VMPORT_MAX_KEY_LEN ) + return -EINVAL; + if ( a.value_length > VMPORT_MAX_VAL_LEN ) + return -EINVAL; + value = key + a.key_length; + } else if ( (a.key_length == 0) && (op == HVMOP_set_vmport_guest_info) ) { + return -EINVAL; + } + d = rcu_lock_domain_by_any_id(a.domid); + if ( d == NULL ) + return rc; + + rc = -EINVAL; + if ( !is_hvm_domain(d) ) + goto param_fail9; + + rc = xsm_hvm_param(XSM_TARGET, d, op); + if ( rc ) + goto param_fail9; + + vs = d->arch.hvm_domain.vmport_data; + if ((a.key_length == 0) && (a.value_length >= vs->used_guestinfo)) { + rc = -E2BIG; + goto param_fail9; + } + for (idx = 0; idx < vs->used_guestinfo; idx++) { + if (vs->guestinfo[idx] && + (vs->guestinfo[idx]->key_len == 0)) + num_free_slots++; + } + if (num_free_slots < 5) { + num_slots = 5 - num_free_slots; + if (vs->used_guestinfo + num_slots > VMPORT_MAX_NUM_KEY) + num_slots = VMPORT_MAX_NUM_KEY - vs->used_guestinfo; + for (idx = 0; idx < num_slots; idx++) + add_slots[idx] = xzalloc(vmport_guestinfo_t); + } + + spin_lock(&d->arch.hvm_domain.vmport_lock); + + for (idx = 0; idx < num_slots; idx++) + vs->guestinfo[vs->used_guestinfo + idx] = add_slots[idx]; + vs->used_guestinfo += num_slots; + + if ( op == HVMOP_set_vmport_guest_info ) + { + int free_idx = -1; + + for (idx = 0; idx < vs->used_guestinfo; idx++) { + if (!vs->guestinfo[idx]) { + gdprintk(XENLOG_WARNING, "idx=%d not allocated, but used_guestinfo=%d\n", + idx, vs->used_guestinfo); + } else if ((vs->guestinfo[idx]->key_len == a.key_length) && + (memcmp(key, + vs->guestinfo[idx]->key_data, + vs->guestinfo[idx]->key_len) == 0)) { + vs->guestinfo[idx]->val_len = a.value_length; + memcpy(vs->guestinfo[idx]->val_data, value, a.value_length); + break; + } else if ((vs->guestinfo[idx]->key_len == 0) && + (free_idx == -1)) { + free_idx = idx; + } + } + if (idx >= vs->used_guestinfo) { + if (free_idx == -1) { + rc = -EBUSY; + } else { + vs->guestinfo[free_idx]->key_len = a.key_length; + memcpy(vs->guestinfo[free_idx]->key_data, key, a.key_length); + vs->guestinfo[free_idx]->val_len = a.value_length; + memcpy(vs->guestinfo[free_idx]->val_data, value, a.value_length); + } + } + } + else + { + if (a.key_length == 0) { + idx = a.value_length; + a.key_length = vs->guestinfo[idx]->key_len; + memcpy(a.data, vs->guestinfo[idx]->key_data, a.key_length); + a.value_length = vs->guestinfo[idx]->val_len; + memcpy(&a.data[a.key_length], + vs->guestinfo[idx]->val_data, + a.value_length); + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + } else { + for (idx = 0; idx < vs->used_guestinfo; idx++) { + if ((vs->guestinfo[idx]->key_len == a.key_length) && + (memcmp(key, + vs->guestinfo[idx]->key_data, + vs->guestinfo[idx]->key_len) == 0)) { + a.value_length = vs->guestinfo[idx]->val_len; + memcpy(value, + vs->guestinfo[idx]->val_data, + a.value_length); + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + break; + } + } + if (idx >= vs->used_guestinfo) { + rc = -ENOENT; + } + } + } + spin_unlock(&d->arch.hvm_domain.vmport_lock); + + param_fail9: + rcu_unlock_domain(d); + break; + } + default: { gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op); diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h index a9aab4b..a530903 100644 --- a/xen/include/public/hvm/hvm_op.h +++ b/xen/include/public/hvm/hvm_op.h @@ -272,4 +272,22 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ +/* Get/set vmport subcommands */ +#define HVMOP_get_vmport_guest_info 17 +#define HVMOP_set_vmport_guest_info 18 +#define VMPORT_GUEST_INFO_KEY_MAX 40 +#define VMPORT_GUEST_INFO_VAL_MAX 128 +struct xen_hvm_vmport_guest_info { + /* Domain to be accessed */ + domid_t domid; + /* key length */ + uint16_t key_length; + /* value length */ + uint16_t value_length; + /* key and value data */ + char data[VMPORT_GUEST_INFO_KEY_MAX + VMPORT_GUEST_INFO_VAL_MAX]; +}; +typedef struct xen_hvm_vmport_guest_info xen_hvm_vmport_guest_info_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_vmport_guest_info_t); + #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ -- 1.8.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |