[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 5/6] libxl: support mapping static shared memory areas during domain creation
On Wed, 23 Aug 2017, Zhongze Liu wrote: > Add libxl__sshm_add to map shared pages from one DomU to another, The mapping > process involves the follwing steps: > > * Set defaults and check for further errors in the static_shm configs: > overlapping areas, invalid ranges, duplicated master domain, > no master domain etc. > * Write infomation of static shared memory areas into the appropriate > xenstore paths. > * use xc_domain_add_to_physmap_batch to do the page sharing. > > Temporarily mark this as unsupported on x86 because calling p2m_add_foregin on > two domU's is currently not allowd on x86 (see the comments in > x86/mm/p2m.c:p2m_add_foregin for more details). > > This is for the proposal "Allow setting up shared memory areas between VMs > from xl config file" (see [1]). > > [1] https://lists.xenproject.org/archives/html/xen-devel/2017-07/msg03047.html > > Signed-off-by: Zhongze Liu <blackskygg@xxxxxxxxx> > > Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> > Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx> > Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> > Cc: Jan Beulich <jbeulich@xxxxxxxx> > Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> > Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> > Cc: Tim Deegan <tim@xxxxxxx> > Cc: Wei Liu <wei.liu2@xxxxxxxxxx> > Cc: Julien Grall <julien.grall@xxxxxxx> > Cc: xen-devel@xxxxxxxxxxxxx > --- > tools/libxl/Makefile | 2 +- > tools/libxl/libxl_arch.h | 6 + > tools/libxl/libxl_arm.c | 15 ++ > tools/libxl/libxl_create.c | 27 ++++ > tools/libxl/libxl_internal.h | 14 ++ > tools/libxl/libxl_sshm.c | 336 > +++++++++++++++++++++++++++++++++++++++++++ > tools/libxl/libxl_x86.c | 18 +++ > tools/libxl/libxl_xshelp.c | 8 ++ > 8 files changed, 425 insertions(+), 1 deletion(-) > create mode 100644 tools/libxl/libxl_sshm.c > > diff --git a/tools/libxl/Makefile b/tools/libxl/Makefile > index 3b63fb2cad..fd624b28f3 100644 > --- a/tools/libxl/Makefile > +++ b/tools/libxl/Makefile > @@ -138,7 +138,7 @@ LIBXL_OBJS = flexarray.o libxl.o libxl_create.o > libxl_dm.o libxl_pci.o \ > libxl_dom_suspend.o libxl_dom_save.o libxl_usb.o \ > libxl_vtpm.o libxl_nic.o libxl_disk.o libxl_console.o \ > libxl_cpupool.o libxl_mem.o libxl_sched.o libxl_tmem.o \ > - libxl_9pfs.o libxl_domain.o \ > + libxl_9pfs.o libxl_domain.o libxl_sshm.o \ > $(LIBXL_OBJS-y) > LIBXL_OBJS += libxl_genid.o > LIBXL_OBJS += _libxl_types.o libxl_flask.o _libxl_types_internal.o > diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h > index 5e1fc6060e..1d681d8863 100644 > --- a/tools/libxl/libxl_arch.h > +++ b/tools/libxl/libxl_arch.h > @@ -71,6 +71,12 @@ int libxl__arch_extra_memory(libxl__gc *gc, > const libxl_domain_build_info *info, > uint64_t *out); > > +_hidden > +bool libxl__arch_domain_support_sshm(const libxl_domain_build_info *b_info); > + > +_hidden > +int libxl__arch_domain_sshm_cachepolicy_setdefault(libxl_static_shm *sshm); > + > #if defined(__i386__) || defined(__x86_64__) > > #define LAPIC_BASE_ADDRESS 0xfee00000 > diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c > index d842d888eb..0975109c0c 100644 > --- a/tools/libxl/libxl_arm.c > +++ b/tools/libxl/libxl_arm.c > @@ -1065,6 +1065,21 @@ void libxl__arch_domain_build_info_acpi_setdefault( > libxl_defbool_setdefault(&b_info->acpi, false); > } > > +bool libxl__arch_domain_support_sshm(const libxl_domain_build_info *b_info) > +{ > + return true; > +} > + > +int libxl__arch_domain_sshm_cachepolicy_setdefault(libxl_static_shm *sshm) > +{ > + if (sshm->cache_policy == LIBXL_SSHM_CACHEPOLICY_UNKNOWN) > + sshm->cache_policy = LIBXL_SSHM_CACHEPOLICY_ARM_NORMAL; > + if (sshm->cache_policy >= LIBXL_SSHM_CACHEPOLICY_X86_NORMAL) > + return ERROR_INVAL; > + > + return 0; > +} > + > /* > * Local variables: > * mode: C > diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c > index 1158303e1a..8e5ec486d2 100644 > --- a/tools/libxl/libxl_create.c > +++ b/tools/libxl/libxl_create.c > @@ -501,6 +501,14 @@ int libxl__domain_build(libxl__gc *gc, > ret = ERROR_INVAL; > goto out; > } > + > + /* the p2m has been setup, we could map the static shared memory now. */ > + ret = libxl__sshm_add(gc, domid, d_config->sshms, d_config->num_sshms); > + if (ret != 0) { > + LOG(ERROR, "failed to map static shared memory"); > + goto out; > + } > + > ret = libxl__build_post(gc, domid, info, state, vments, localents); > out: > return ret; > @@ -918,6 +926,25 @@ static void initiate_domain_create(libxl__egc *egc, > goto error_out; > } > > + if (d_config->num_sshms != 0 && > + !libxl__arch_domain_support_sshm(&d_config->b_info)) { > + LOGD(ERROR, domid, "static_shm is not supported by this domain > type."); > + ret = ERROR_INVAL; > + goto error_out; > + } > + > + ret = libxl__sshm_check_overlap(gc, domid, > + d_config->sshms, d_config->num_sshms); > + if (ret) goto error_out; I think it makes sense to call libxl__sshm_check_overlap only if num_sshms != 0. > + for (i = 0; i < d_config->num_sshms; ++i) { > + ret = libxl__sshm_setdefault(gc, domid, &d_config->sshms[i]); > + if (ret) { > + LOGD(ERROR, domid, "Unable to set defaults for static sshm"); > + goto error_out; > + } > + } > + > ret = libxl__domain_make(gc, d_config, &domid, &state->config); > if (ret) { > LOGD(ERROR, domid, "cannot make domain: %d", ret); > diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h > index 724750967c..74bc0acb21 100644 > --- a/tools/libxl/libxl_internal.h > +++ b/tools/libxl/libxl_internal.h > @@ -721,6 +721,7 @@ _hidden char **libxl__xs_directory(libxl__gc *gc, > xs_transaction_t t, > const char *path, unsigned int *nb); > /* On error: returns NULL, sets errno (no logging) */ > _hidden char *libxl__xs_libxl_path(libxl__gc *gc, uint32_t domid); > +_hidden char *libxl__xs_get_sshmpath(libxl__gc *gc, const char *id); > > _hidden int libxl__backendpath_parse_domid(libxl__gc *gc, const char > *be_path, > libxl_domid *domid_out); > @@ -4352,6 +4353,19 @@ static inline bool libxl__acpi_defbool_val(const > libxl_domain_build_info *b_info > } > #endif > > +/* > + * Set up static shared ram pages for HVM domains to communicate > + * > + * This function should only be called after the memory map is constructed > + * and before any further memory access. */ > +_hidden int libxl__sshm_add(libxl__gc *gc, uint32_t domid, > + libxl_static_shm *sshm, int len); > + > +_hidden int libxl__sshm_check_overlap(libxl__gc *gc, uint32_t domid, > + libxl_static_shm *sshms, int len); > +_hidden int libxl__sshm_setdefault(libxl__gc *gc, uint32_t domid, > + libxl_static_shm *sshm); > + > /* > * Local variables: > * mode: C > diff --git a/tools/libxl/libxl_sshm.c b/tools/libxl/libxl_sshm.c > new file mode 100644 > index 0000000000..e16c24ccb9 > --- /dev/null > +++ b/tools/libxl/libxl_sshm.c > @@ -0,0 +1,336 @@ > +#include "libxl_osdeps.h" > +#include "libxl_internal.h" > +#include "libxl_arch.h" > + > +#define SSHM_ERROR(domid, sshmid, f, ...) \ > + LOGD(ERROR, domid, "static_shm id = %s: " f, sshmid, ##__VA_ARGS__) > + > + > +/* Set default values for libxl_static_shm */ > +int libxl__sshm_setdefault(libxl__gc *gc, uint32_t domid, > + libxl_static_shm *sshm) > +{ > + int rc; > + > + if (sshm->role == LIBXL_SSHM_ROLE_UNKNOWN) { > + sshm->role = LIBXL_SSHM_ROLE_SLAVE; > + } > + if (sshm->prot == LIBXL_SSHM_PROT_UNKNOWN) { > + sshm->prot = LIBXL_SSHM_PROT_RW; > + } > + /* role-specific checks */ > + if (LIBXL_SSHM_ROLE_SLAVE == sshm->role) { > + if (sshm->offset == LIBXL_SSHM_RANGE_UNKNOWN) { > + sshm->offset = 0; > + } > + if (sshm->cache_policy != LIBXL_SSHM_CACHEPOLICY_UNKNOWN) { > + SSHM_ERROR(domid, sshm->id, > + "cache_policy is only applicable to master domains"); > + return ERROR_INVAL; > + } > + } else { > + if (sshm->offset != LIBXL_SSHM_RANGE_UNKNOWN) { > + SSHM_ERROR(domid, sshm->id, > + "offset is only applicable to slave domains"); > + return ERROR_INVAL; > + } > + rc = libxl__arch_domain_sshm_cachepolicy_setdefault(sshm); > + if (rc) { > + SSHM_ERROR(domid, sshm->id, > + "cache policy not supported on this platform"); > + return ERROR_INVAL; > + } > + } > + > + return 0; > +} > + > +/* Compare function for sorting sshm ranges by sshm->begin */ > +static int sshm_range_cmp(const void *a, const void *b) > +{ > + libxl_static_shm *const *sshma = a, *const *sshmb = b; > + return (*sshma)->begin > (*sshmb)->begin ? 1 : -1; > +} > + > +/* check if the sshm slave configs in @sshm overlap */ > +int libxl__sshm_check_overlap(libxl__gc *gc, uint32_t domid, > + libxl_static_shm *sshms, int len) > +{ > + > + const libxl_static_shm **slave_sshms = NULL; > + int num_slaves; > + int i; > + > + slave_sshms = libxl__calloc(gc, len, sizeof(slave_sshms[0])); > + num_slaves = 0; > + for (i = 0; i < len; ++i) { > + if (LIBXL_SSHM_ROLE_SLAVE == sshms[i].role) > + slave_sshms[num_slaves++] = sshms + i; > + } > + qsort(slave_sshms, num_slaves, sizeof(slave_sshms[0]), sshm_range_cmp); > + > + for (i = 0; i < num_slaves - 1; ++i) { > + if (slave_sshms[i+1]->begin < slave_sshms[i]->end) { > + SSHM_ERROR(domid, slave_sshms[i+1]->id, "slave ranges overlap."); > + return ERROR_INVAL; > + } > + } > + > + return 0; > +} > + > +/* The caller have to guarentee that sshm->begin < sshm->end */ > +static int libxl__sshm_do_map(libxl__gc *gc, uint32_t mid, uint32_t sid, > + libxl_static_shm *sshm, > + uint64_t mbegin, uint64_t mend) > +{ > + int rc; > + int i; > + unsigned int num_mpages, num_spages, offset; > + int *errs; > + xen_ulong_t *idxs; > + xen_pfn_t *gpfns; > + > + num_mpages = (mend - mbegin) >> 12; Please don't hardcode 12, please use XC_PAGE_SHIFT (also in other libxl patches). > + num_spages = (sshm->end - sshm->begin) >> 12; > + offset = sshm->offset >> 12; > + > + /* Check range. Test offset < mpages first to avoid overflow */ > + if ((offset >= num_mpages) || (num_mpages - offset < num_spages)) { > + SSHM_ERROR(sid, sshm->id, "exceeds master's address space."); > + rc = ERROR_INVAL; > + goto out; > + } > + > + /* fill out the pfn's and do the mapping */ > + errs = libxl__calloc(gc, num_spages, sizeof(int)); > + idxs = libxl__calloc(gc, num_spages, sizeof(xen_ulong_t)); > + gpfns = libxl__calloc(gc, num_spages, sizeof(xen_pfn_t)); > + for (i = 0; i < num_spages; i++) { > + idxs[i] = (mbegin >> 12) + offset + i; > + gpfns[i]= (sshm->begin >> 12) + i; > + } > + rc = xc_domain_add_to_physmap_batch(CTX->xch, > + sid, mid, > + XENMAPSPACE_gmfn_foreign, > + num_spages, > + idxs, gpfns, errs); > + > + for (i = 0; i< num_spages; i++) { > + if (errs[i]) { > + SSHM_ERROR(sid, sshm->id, > + "can't map at address 0x%"PRIx64".", > + sshm->begin + (offset << 12) ); > + rc = ERROR_FAIL; > + } > + } > + if (rc) goto out; > + > + rc = 0; > + > +out: > + return rc; > +} > + > +static int libxl__sshm_add_slave(libxl__gc *gc, uint32_t domid, > + libxl_static_shm *sshm) > +{ > + int rc; > + char *sshm_path, *slave_path, *dom_path, *dom_sshm_path, *dom_role_path; > + char *ents[9]; > + const char *xs_value; > + libxl_static_shm master_sshm; > + uint32_t master_domid; > + xs_transaction_t xt = XBT_NULL; > + > + sshm_path = libxl__xs_get_sshmpath(gc, sshm->id); > + slave_path = GCSPRINTF("%s/slaves/%"PRIu32, sshm_path, domid); > + dom_path = libxl__xs_get_dompath(gc, domid); > + /* the domain should be in xenstore by now */ > + assert(dom_path); > + dom_sshm_path = GCSPRINTF("%s/static_shm/%s", dom_path, sshm->id); > + dom_role_path = GCSPRINTF("%s/role", dom_sshm_path); > + > + /* prepare the slave xenstore entries */ > + ents[0] = "begin"; > + ents[1] = GCSPRINTF("0x%"PRIx64, sshm->begin); > + ents[2] = "end"; > + ents[3] = GCSPRINTF("0x%"PRIx64, sshm->end); > + ents[4] = "offset"; > + ents[5] = GCSPRINTF("0x%"PRIx64, sshm->offset); > + ents[6] = "prot"; > + ents[7] = libxl__strdup(gc, libxl_sshm_prot_to_string(sshm->prot)); > + ents[8] = NULL; > + > + for (;;) { > + rc = libxl__xs_transaction_start(gc, &xt); > + if (rc) goto out; > + > + if (!libxl__xs_read(gc, xt, sshm_path)) { > + SSHM_ERROR(domid, sshm->id, "no master found."); > + rc = ERROR_FAIL; > + goto out; > + } > + > + /* every ID can appear in each domain at most once */ > + if (libxl__xs_read(gc, xt, dom_sshm_path)) { > + SSHM_ERROR(domid, sshm->id, > + "domain tried to map the same ID twice."); > + rc = ERROR_FAIL; > + goto out; > + } > + > + /* look at the master info and see if we could do the mapping */ > + rc = libxl__xs_read_checked(gc, xt, > + GCSPRINTF("%s/prot", sshm_path), > + &xs_value); > + if (rc) goto out; > + libxl_sshm_prot_from_string(xs_value, &master_sshm.prot); > + > + rc = libxl__xs_read_checked(gc, xt, > + GCSPRINTF("%s/begin", sshm_path), > + &xs_value); > + if (rc) goto out; > + master_sshm.begin = strtoull(xs_value, NULL, 16); > + > + rc = libxl__xs_read_checked(gc, xt, > + GCSPRINTF("%s/end", sshm_path), > + &xs_value); > + if (rc) goto out; > + master_sshm.end = strtoull(xs_value, NULL, 16); > + > + rc = libxl__xs_read_checked(gc, xt, > + GCSPRINTF("%s/master", sshm_path), > + &xs_value); > + if (rc) goto out; > + master_domid = strtoull(xs_value, NULL, 16); > + > + /* check if the slave is asking too much permission */ this comment doesn't seem to match the check below > + if (LIBXL_SSHM_PROT_UNKNOWN == sshm->prot) { > + sshm->prot = master_sshm.prot; > + } > + if (master_sshm.prot < sshm->prot) { > + SSHM_ERROR(domid, sshm->id, "slave is asking too much > permission."); > + rc = ERROR_INVAL; > + goto out; > + } > + > + /* all checks passed, do the job */ > + rc = libxl__sshm_do_map(gc, master_domid, domid, sshm, > + master_sshm.begin, master_sshm.end); Doesn't libxl__sshm_do_map risk being called twice on xenstore transaction retries? > + if (rc) { > + rc = ERROR_INVAL; > + goto out; > + } > + > + /* write the result to xenstore and commit */ > + rc = libxl__xs_write_checked(gc, xt, dom_role_path, "slave"); > + if (rc) goto out; > + libxl__xs_writev(gc, xt, slave_path, ents); > + > + rc = libxl__xs_transaction_commit(gc, &xt); > + if (!rc) break; > + if (rc < 0) goto out; > + } > + > + rc = 0; > +out: > + libxl__xs_transaction_abort(gc, &xt); > + return rc; > +} > + > +static int libxl__sshm_add_master(libxl__gc *gc, uint32_t domid, > + libxl_static_shm *sshm) > +{ > + int rc; > + char *sshm_path, *dom_path, *dom_role_path; > + char *ents[13]; > + struct xs_permissions noperm; > + xs_transaction_t xt = XBT_NULL; > + > + sshm_path = libxl__xs_get_sshmpath(gc, sshm->id); > + dom_path = libxl__xs_get_dompath(gc, domid); > + /* the domain should be in xenstore by now */ > + assert(dom_path); > + dom_role_path = GCSPRINTF("%s/static_shm/%s/role", dom_path, sshm->id); > + > + /* prepare the xenstore entries */ > + ents[0] = "master"; > + ents[1] = GCSPRINTF("%"PRIu32, domid); > + ents[2] = "begin"; > + ents[3] = GCSPRINTF("0x%"PRIx64, sshm->begin); > + ents[4] = "end"; > + ents[5] = GCSPRINTF("0x%"PRIx64, sshm->end); > + ents[6] = "prot"; > + ents[7] = libxl__strdup(gc, libxl_sshm_prot_to_string(sshm->prot)); > + ents[8] = "cache_policy"; > + ents[9] = libxl__strdup(gc, > + > libxl_sshm_cachepolicy_to_string(sshm->cache_policy)); > + ents[10] = "status"; > + ents[11] = "alive"; > + ents[12] = NULL; > + > + /* could only be accessed by Dom0 */ > + noperm.id = 0; > + noperm.perms = XS_PERM_NONE; > + > + for (;;) { > + rc = libxl__xs_transaction_start(gc, &xt); > + if (rc) goto out; > + > + if (!libxl__xs_read(gc, xt, sshm_path)) { > + /* every ID can appear in each domain at most once */ > + if (libxl__xs_read(gc, xt, dom_role_path)) { > + SSHM_ERROR(domid, sshm->id, > + "domain tried to map the same ID twice."); > + rc = ERROR_FAIL; > + goto out; > + } > + rc = libxl__xs_write_checked(gc, xt, dom_role_path, "master"); > + if (rc) goto out;; > + > + libxl__xs_mknod(gc, xt, sshm_path, &noperm, 1); > + libxl__xs_writev(gc, xt, sshm_path, ents); > + } else { > + SSHM_ERROR(domid, sshm->id, "can only have one master."); > + rc = ERROR_FAIL; > + goto out; > + } > + > + rc = libxl__xs_transaction_commit(gc, &xt); > + if (!rc) break; > + if (rc < 0) goto out; > + } > + > + rc = 0; > +out: > + libxl__xs_transaction_abort(gc, &xt); > + return rc; > +} > + > +int libxl__sshm_add(libxl__gc *gc, uint32_t domid, > + libxl_static_shm *sshms, int len) > +{ > + int rc, i; > + > + if (!len) return 0; > + > + for (i = 0; i < len; ++i) { > + if (LIBXL_SSHM_ROLE_SLAVE == sshms[i].role) { > + rc = libxl__sshm_add_slave(gc, domid, sshms+i); > + } else { > + rc = libxl__sshm_add_master(gc, domid, sshms+i); > + } > + if (rc) return rc; > + } > + > + return 0; > +} > + > +/* > + * Local variables: > + * mode: C > + * c-basic-offset: 4 > + * indent-tabs-mode: nil > + * End: > + */ > diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c > index 455f6f0bed..8dd34fd7ba 100644 > --- a/tools/libxl/libxl_x86.c > +++ b/tools/libxl/libxl_x86.c > @@ -587,6 +587,24 @@ void libxl__arch_domain_build_info_acpi_setdefault( > libxl_defbool_setdefault(&b_info->acpi, true); > } > > +bool libxl__arch_domain_support_sshm(const libxl_domain_build_info *b_info) > +{ > + /* FIXME: Mark this as unsupported since calling p2m_add_foreign on two > + * DomU's is currently not allowd on x86, see the comments in > + * x86/mm/p2m.c: p2m_add_foreign */ > + return false; > +} > + > +int libxl__arch_domain_sshm_cachepolicy_setdefault(libxl_static_shm *sshm) > +{ > + if (sshm->cache_policy == LIBXL_SSHM_CACHEPOLICY_UNKNOWN) > + sshm->cache_policy = LIBXL_SSHM_CACHEPOLICY_X86_NORMAL; > + if (sshm->cache_policy < LIBXL_SSHM_CACHEPOLICY_X86_NORMAL) > + return ERROR_INVAL; > + > + return 0; > +} > + > /* > * Local variables: > * mode: C > diff --git a/tools/libxl/libxl_xshelp.c b/tools/libxl/libxl_xshelp.c > index c4a18df353..d91fbf5fda 100644 > --- a/tools/libxl/libxl_xshelp.c > +++ b/tools/libxl/libxl_xshelp.c > @@ -193,6 +193,14 @@ char *libxl__xs_libxl_path(libxl__gc *gc, uint32_t domid) > return s; > } > > +char *libxl__xs_get_sshmpath(libxl__gc *gc, const char *id) > +{ > + char *s = GCSPRINTF("/local/static_shm/%s", id); > + if (!s) > + LOGE(ERROR, "cannot allocate static shm path"); > + return s; > +} > + > int libxl__xs_read_mandatory(libxl__gc *gc, xs_transaction_t t, > const char *path, const char **result_out) > { _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |