[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH 8/23] Tools/libacpi: Add a user configurable parameter to control vIOMMU attributes
From: Chao Gao <chao.gao@xxxxxxxxx> a field, viommu_info, is added to struct libxl_domain_build_info. Several attributes can be specified by guest configuration file for the DMAR table building and dummy vIOMMU creation. In domain creation process, a new logic is added to build ACPI DMAR table in tool stack according VM configuration and to pass though it to hvmloader via xenstore ACPI PT channel. If there are ACPI tables needed to pass through, we joint the tables. Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx> Signed-off-by: Lan Tianyu <tianyu.lan@xxxxxxxxx> --- tools/libacpi/build.c | 5 +++ tools/libacpi/libacpi.h | 1 + tools/libxl/libxl_dom.c | 85 +++++++++++++++++++++++++++++++++++++++++++++ tools/libxl/libxl_types.idl | 8 +++++ tools/xl/xl_parse.c | 54 ++++++++++++++++++++++++++++ 5 files changed, 153 insertions(+) diff --git a/tools/libacpi/build.c b/tools/libacpi/build.c index 89a3c6c..080413e 100644 --- a/tools/libacpi/build.c +++ b/tools/libacpi/build.c @@ -550,6 +550,11 @@ static int new_vm_gid(struct acpi_ctxt *ctxt, return 1; } +uint32_t acpi_get_table_size(struct acpi_header * header) +{ + return header ? header->length : 0; +} + int acpi_build_tables(struct acpi_ctxt *ctxt, struct acpi_config *config) { struct acpi_info *acpi_info; diff --git a/tools/libacpi/libacpi.h b/tools/libacpi/libacpi.h index ee08c45..0882729 100644 --- a/tools/libacpi/libacpi.h +++ b/tools/libacpi/libacpi.h @@ -108,6 +108,7 @@ struct acpi_config { #define DMAR_X2APIC_OPT_OUT 0x2 struct acpi_dmar *construct_dmar(struct acpi_ctxt *ctxt, const struct acpi_config *config); +uint32_t acpi_get_table_size(struct acpi_header * header); int acpi_build_tables(struct acpi_ctxt *ctxt, struct acpi_config *config); #endif /* __LIBACPI_H__ */ diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c index d519c8d..99132d0 100644 --- a/tools/libxl/libxl_dom.c +++ b/tools/libxl/libxl_dom.c @@ -19,6 +19,7 @@ #include "libxl_internal.h" #include "libxl_arch.h" +#include "libacpi/libacpi.h" #include <xc_dom.h> #include <xen/hvm/hvm_info_table.h> @@ -908,6 +909,43 @@ out: return rc; } +static unsigned long acpi_v2p(struct acpi_ctxt *ctxt, void *v) +{ + return (unsigned long)v; +} + +static void *acpi_mem_alloc(struct acpi_ctxt *ctxt, + uint32_t size, uint32_t align) +{ + return aligned_alloc(align, size); +} + +static void acpi_mem_free(struct acpi_ctxt *ctxt, + void *v, uint32_t size) +{ + /* ACPI builder currently doesn't free memory so this is just a stub */ +} + +static int libxl__acpi_build_dmar(libxl__gc *gc, + struct acpi_config *config, + void **data_r, int *datalen_r) +{ + struct acpi_ctxt ctxt; + void *table; + + ctxt.mem_ops.alloc = acpi_mem_alloc; + ctxt.mem_ops.free = acpi_mem_free; + ctxt.mem_ops.v2p = acpi_v2p; + + table = construct_dmar(&ctxt, config); + if ( !table ) + return ERROR_FAIL; + + *data_r = table; + *datalen_r = acpi_get_table_size((struct acpi_header *)table); + return 0; +} + static int libxl__domain_firmware(libxl__gc *gc, libxl_domain_build_info *info, struct xc_dom_image *dom) @@ -1028,6 +1066,53 @@ static int libxl__domain_firmware(libxl__gc *gc, } } + /* build DMAR table according guest configuration and joint it with other + * apci tables specified by acpi_modules */ + if (!libxl_defbool_is_default(info->u.hvm.viommu.intremap) && + info->device_model_version == LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN) { + struct acpi_config config; + + memset(&config, 0, sizeof(config)); + if (libxl_defbool_val(info->u.hvm.viommu.intremap)) { + config.table_flags |= ACPI_HAS_DMAR; + config.dmar_flag = DMAR_INTR_REMAP; + if (!libxl_defbool_is_default(info->u.hvm.viommu.x2apic_opt_out) + && libxl_defbool_val(info->u.hvm.viommu.x2apic_opt_out)) + config.dmar_flag |= DMAR_X2APIC_OPT_OUT; + + config.viommu_base_addr = info->u.hvm.viommu.base_addr; + data = NULL; + e = libxl__acpi_build_dmar(gc, &config, &data, &datalen); + if (e) { + LOGE(ERROR, "failed to build DMAR table"); + rc = ERROR_FAIL; + goto out; + } + + libxl__ptr_add(gc, data); + if (datalen) { + if (!dom->acpi_modules[0].data) { + dom->acpi_modules[0].data = data; + dom->acpi_modules[0].length = (uint32_t)datalen; + } else { + /* joint tables */ + void *newdata; + newdata = malloc(datalen + dom->acpi_modules[0].length); + if (!newdata) { + LOGE(ERROR, "failed to joint DMAR table to acpi modules"); + rc = ERROR_FAIL; + goto out; + } + memcpy(newdata, dom->acpi_modules[0].data, + dom->acpi_modules[0].length); + memcpy(newdata + dom->acpi_modules[0].length, data, datalen); + dom->acpi_modules[0].data = newdata; + dom->acpi_modules[0].length += (uint32_t)datalen; + } + } + } + } + return 0; out: assert(rc != 0); diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl index a612d1f..912582a 100644 --- a/tools/libxl/libxl_types.idl +++ b/tools/libxl/libxl_types.idl @@ -440,6 +440,13 @@ libxl_rdm_reserve = Struct("rdm_reserve", [ ("policy", libxl_rdm_reserve_policy), ]) +libxl_viommu_info = Struct("viommu_info", [ + ("intremap", libxl_defbool), + ("x2apic_opt_out", libxl_defbool), + ("cap", uint64), + ("base_addr", uint64), + ]) + libxl_domain_build_info = Struct("domain_build_info",[ ("max_vcpus", integer), ("avail_vcpus", libxl_bitmap), @@ -550,6 +557,7 @@ libxl_domain_build_info = Struct("domain_build_info",[ ("serial_list", libxl_string_list), ("rdm", libxl_rdm_reserve), ("rdm_mem_boundary_memkb", MemKB), + ("viommu", libxl_viommu_info), ])), ("pv", Struct(None, [("kernel", string), ("slack_memkb", MemKB), diff --git a/tools/xl/xl_parse.c b/tools/xl/xl_parse.c index 1ef0c27..9349367 100644 --- a/tools/xl/xl_parse.c +++ b/tools/xl/xl_parse.c @@ -18,6 +18,7 @@ #include <stdio.h> #include <stdlib.h> #include <xen/hvm/e820.h> +#include <xen/viommu.h> #include <libxl.h> #include <libxl_utils.h> @@ -29,6 +30,8 @@ extern void set_default_nic_values(libxl_device_nic *nic); +#define VIOMMU_BASE_ADDR 0xfed90000 + #define ARRAY_EXTEND_INIT__CORE(array,count,initfn,more) \ ({ \ typeof((count)) array_extend_old_count = (count); \ @@ -707,6 +710,25 @@ int parse_usbdev_config(libxl_device_usbdev *usbdev, char *token) return 0; } +/* Parses viommu data and adds info into viommu + * Returns 1 if the input token does not match one of the keys + * or parsed values are not correct. Successful parse returns 0 */ +static int parse_viommu_config(libxl_viommu_info *viommu, char *token) +{ + char *oparg; + + if (MATCH_OPTION("intremap", token, oparg)) { + libxl_defbool_set(&viommu->intremap, !!strtoul(oparg, NULL, 0)); + } else if (MATCH_OPTION("x2apic_opt_out", token, oparg)) { + libxl_defbool_set(&viommu->x2apic_opt_out, !!strtoul(oparg, NULL, 0)); + } else { + fprintf(stderr, "Unknown string `%s' in viommu spec\n", token); + return 1; + } + + return 0; +} + void parse_config_data(const char *config_source, const char *config_data, int config_len, @@ -1084,6 +1106,38 @@ void parse_config_data(const char *config_source, if (!xlu_cfg_get_long (config, "rdm_mem_boundary", &l, 0)) b_info->u.hvm.rdm_mem_boundary_memkb = l * 1024; + + if (!xlu_cfg_get_string(config, "viommu_info", &buf, 0)) { + libxl_viommu_info viommu; + char *p, *str2; + + str2 = strdup(buf); + if (!str2) { + fprintf(stderr, "ERROR: strdup failed\n"); + exit (1); + } + p = strtok(str2, ","); + if (!p) { + fprintf(stderr, "ERROR: invalid viommu_info format\n"); + exit (1); + } + do { + if (*p == ' ') + p++; + if (parse_viommu_config(&viommu, p)) { + fprintf(stderr, "ERROR: invalid viommu settting\n"); + exit (1); + } + } while ((p=strtok(NULL, ",")) != NULL); + free(str2); + b_info->u.hvm.viommu.intremap = viommu.intremap; + b_info->u.hvm.viommu.x2apic_opt_out = viommu.x2apic_opt_out; + if ( libxl_defbool_val(b_info->u.hvm.viommu.intremap) ) + { + b_info->u.hvm.viommu.cap = VIOMMU_CAP_IRQ_REMAPPING; + b_info->u.hvm.viommu.base_addr = VIOMMU_BASE_ADDR; + } + } break; case LIBXL_DOMAIN_TYPE_PV: { -- 1.8.3.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |