[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC XEN PATCH v3 29/39] tools: reserve guest memory for ACPI from device model
Some virtual devices (e.g. NVDIMM) require complex ACPI tables and definition blocks (in AML), which a device model (e.g. QEMU) has already been able to construct. Instead of introducing the redundant implementation to Xen, we would like to reuse the device model to construct those ACPI stuffs. This commit allows Xen to reserve an area in the guest memory for the device model to pass its ACPI tables and definition blocks to guest, which will be loaded by hvmloader. The base guest physical address and the size of the reserved area are passed to the device model via XenStore keys hvmloader/dm-acpi/{address, length}. An xl config "dm_acpi_pages = N" is added to specify the number of reserved guest memory pages. Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx> --- Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> --- tools/libxc/include/xc_dom.h | 1 + tools/libxc/xc_dom_x86.c | 13 +++++++++++++ tools/libxl/libxl_dom.c | 25 +++++++++++++++++++++++++ tools/libxl/libxl_types.idl | 1 + tools/xl/xl_parse.c | 17 ++++++++++++++++- xen/include/public/hvm/hvm_xs_strings.h | 8 ++++++++ 6 files changed, 64 insertions(+), 1 deletion(-) diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h index ce47058c41..7c541576e7 100644 --- a/tools/libxc/include/xc_dom.h +++ b/tools/libxc/include/xc_dom.h @@ -93,6 +93,7 @@ struct xc_dom_image { struct xc_dom_seg pgtables_seg; struct xc_dom_seg devicetree_seg; struct xc_dom_seg start_info_seg; /* HVMlite only */ + struct xc_dom_seg dm_acpi_seg; /* reserved PFNs for DM ACPI */ xen_pfn_t start_info_pfn; xen_pfn_t console_pfn; xen_pfn_t xenstore_pfn; diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c index cb68efcbd3..8755350295 100644 --- a/tools/libxc/xc_dom_x86.c +++ b/tools/libxc/xc_dom_x86.c @@ -674,6 +674,19 @@ static int alloc_magic_pages_hvm(struct xc_dom_image *dom) ioreq_server_pfn(0)); xc_hvm_param_set(xch, domid, HVM_PARAM_NR_IOREQ_SERVER_PAGES, NR_IOREQ_SERVER_PAGES); + + if ( dom->dm_acpi_seg.pages ) + { + size_t acpi_size = dom->dm_acpi_seg.pages * XC_DOM_PAGE_SIZE(dom); + + rc = xc_dom_alloc_segment(dom, &dom->dm_acpi_seg, "DM ACPI", + 0, acpi_size); + if ( rc != 0 ) + { + DOMPRINTF("Unable to reserve memory for DM ACPI"); + goto out; + } + } } rc = xc_dom_alloc_segment(dom, &dom->start_info_seg, diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c index f54fd49a73..bad1719892 100644 --- a/tools/libxl/libxl_dom.c +++ b/tools/libxl/libxl_dom.c @@ -897,6 +897,29 @@ static int hvm_build_set_xs_values(libxl__gc *gc, goto err; } + if (dom->dm_acpi_seg.pages) { + uint64_t guest_addr_out = dom->dm_acpi_seg.pfn * XC_DOM_PAGE_SIZE(dom); + + if (guest_addr_out >= 0x100000000ULL) { + LOG(ERROR, + "Guest address of DM ACPI is 0x%"PRIx64", but expected below 4G", + guest_addr_out); + goto err; + } + + path = GCSPRINTF("/local/domain/%d/"HVM_XS_DM_ACPI_ADDRESS, domid); + ret = libxl__xs_printf(gc, XBT_NULL, path, "0x%"PRIx64, guest_addr_out); + if (ret) + goto err; + + path = GCSPRINTF("/local/domain/%d/"HVM_XS_DM_ACPI_LENGTH, domid); + ret = libxl__xs_printf(gc, XBT_NULL, path, "0x%"PRIx64, + (uint64_t)(dom->dm_acpi_seg.pages * + XC_DOM_PAGE_SIZE(dom))); + if (ret) + goto err; + } + return 0; err: @@ -1184,6 +1207,8 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid, dom->vnode_to_pnode[i] = info->vnuma_nodes[i].pnode; } + dom->dm_acpi_seg.pages = info->u.hvm.dm_acpi_pages; + rc = libxl__build_dom(gc, domid, info, state, dom); if (rc != 0) goto out; diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl index 173d70acec..4acc0457f4 100644 --- a/tools/libxl/libxl_types.idl +++ b/tools/libxl/libxl_types.idl @@ -565,6 +565,7 @@ libxl_domain_build_info = Struct("domain_build_info",[ ("rdm", libxl_rdm_reserve), ("rdm_mem_boundary_memkb", MemKB), ("mca_caps", uint64), + ("dm_acpi_pages", integer), ])), ("pv", Struct(None, [("kernel", string), ("slack_memkb", MemKB), diff --git a/tools/xl/xl_parse.c b/tools/xl/xl_parse.c index 02ddd2e90d..ed562a1956 100644 --- a/tools/xl/xl_parse.c +++ b/tools/xl/xl_parse.c @@ -810,7 +810,7 @@ void parse_config_data(const char *config_source, libxl_domain_config *d_config) { const char *buf; - long l, vcpus = 0; + long l, vcpus = 0, nr_dm_acpi_pages; XLU_Config *config; XLU_ConfigList *cpus, *vbds, *nics, *pcis, *cvfbs, *cpuids, *vtpms, *usbctrls, *usbdevs, *p9devs; @@ -1929,6 +1929,21 @@ skip_usbdev: #undef parse_extra_args + if (b_info->type == LIBXL_DOMAIN_TYPE_HVM && + b_info->device_model_version != LIBXL_DEVICE_MODEL_VERSION_NONE) { + /* parse 'dm_acpi_pages' */ + e = xlu_cfg_get_long(config, "dm_acpi_pages", &nr_dm_acpi_pages, 0); + if (e && e != ESRCH) { + fprintf(stderr, "ERROR: unable to parse dm_acpi_pages.\n"); + exit(-ERROR_FAIL); + } + if (!e && nr_dm_acpi_pages <= 0) { + fprintf(stderr, "ERROR: require positive dm_acpi_pages.\n"); + exit(-ERROR_FAIL); + } + b_info->u.hvm.dm_acpi_pages = nr_dm_acpi_pages; + } + /* If we've already got vfb=[] for PV guest then ignore top level * VNC config. */ if (c_info->type == LIBXL_DOMAIN_TYPE_PV && !d_config->num_vfbs) { diff --git a/xen/include/public/hvm/hvm_xs_strings.h b/xen/include/public/hvm/hvm_xs_strings.h index fea1dd4407..9f04ff2adc 100644 --- a/xen/include/public/hvm/hvm_xs_strings.h +++ b/xen/include/public/hvm/hvm_xs_strings.h @@ -80,4 +80,12 @@ */ #define HVM_XS_OEM_STRINGS "bios-strings/oem-%d" +/* If a range of guest memory is reserved to pass ACPI from the device + * model (e.g. QEMU), the start address and the size of the reserved + * guest memory are specified by following two xenstore values. + */ +#define HVM_XS_DM_ACPI_ROOT "hvmloader/dm-acpi" +#define HVM_XS_DM_ACPI_ADDRESS HVM_XS_DM_ACPI_ROOT"/address" +#define HVM_XS_DM_ACPI_LENGTH HVM_XS_DM_ACPI_ROOT"/length" + #endif /* __XEN_PUBLIC_HVM_HVM_XS_STRINGS_H__ */ -- 2.14.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |