|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC][v2][PATCH 10/14] tools: extend XENMEM_set_memory_map
On Fri, May 22, 2015 at 05:35:10PM +0800, Tiejun Chen wrote:
> Here we'll construct a basic guest e820 table via
> XENMEM_set_memory_map. This table includes lowmem, highmem
> and RDMs if they exist. And hvmloader would need this info
> later.
>
> Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
> ---
> tools/libxl/libxl_dom.c | 87
> +++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 87 insertions(+)
>
> diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
> index 84d5465..cc4b1a6 100644
> --- a/tools/libxl/libxl_dom.c
> +++ b/tools/libxl/libxl_dom.c
> @@ -913,6 +913,87 @@ out:
> return rc;
> }
>
> +/*
> + * Here we're just trying to set these kinds of e820 mappings:
> + *
> + * #1. Low memory region
> + *
> + * Low RAM starts at least from 1M to make sure all standard regions
> + * of the PC memory map, like BIOS, VGA memory-mapped I/O and vgabios,
> + * have enough space.
> + * Note: Those stuffs below 1M are still constructed with multiple
> + * e820 entries by hvmloader. At this point we don't change anything.
> + *
> + * #2. RDM region if it exists
> + *
> + * #3. High memory region if it exists
> + *
> + * Note: these regions are not overlapping since we already check
> + * to adjust them. Please refer to libxl__domain_device_construct_rdm().
> + */
> +#define GUEST_LOW_MEM_START_DEFAULT 0x100000
> +static int libxl__domain_construct_memmap(libxl__gc *gc,
> + libxl_domain_config *d_config,
> + uint32_t domid,
> + struct xc_hvm_build_args *args)
This is x86 specific. I think libxl__domain_construct_e820 is better
name.
> +{
> + libxl_ctx *ctx = libxl__gc_owner(gc);
Use CTX.
> + unsigned int nr = 0, i;
> + /* We always own at least one lowmem entry. */
> + unsigned int e820_entries = 1;
> + uint64_t highmem_end = 0, highmem_size = args->mem_size -
> args->lowmem_size;
> + struct e820entry *e820 = NULL;
> +
> + /* Add all rdm entries. */
> + e820_entries += d_config->num_rdms;
> +
> + /* If we should have a highmem range. */
> + if (highmem_size)
> + {
> + highmem_end = (1ull<<32) + highmem_size;
> + e820_entries++;
> + }
> +
> + if (e820_entries >= E820MAX) {
> + LOG(ERROR, "Ooops! Too many entries in the memory map!\n");
> + return -1;
> + }
> +
> + e820 = libxl__malloc(gc, sizeof(struct e820entry) * e820_entries);
> +
> + /* Low memory */
> + e820[nr].addr = GUEST_LOW_MEM_START_DEFAULT;
> + e820[nr].size = args->lowmem_size - GUEST_LOW_MEM_START_DEFAULT;
> + e820[nr].type = E820_RAM;
> + nr++;
> +
> + /* RDM mapping */
> + for (i = 0; i < d_config->num_rdms; i++) {
> + /*
> + * We should drop this kind of rdm entry.
> + */
This comment is not useful.
> + if (d_config->rdms[i].flag == LIBXL_RDM_RESERVE_FLAG_INVALID)
> + continue;
> +
> + e820[nr].addr = d_config->rdms[i].start;
> + e820[nr].size = d_config->rdms[i].size;
> + e820[nr].type = E820_RESERVED;
> + nr++;
> + }
> +
> + /* High memory */
> + if (highmem_size) {
> + e820[nr].addr = ((uint64_t)1 << 32);
> + e820[nr].size = highmem_size;
> + e820[nr].type = E820_RAM;
> + }
> +
> + if (xc_domain_set_memory_map(ctx->xch, domid, e820, e820_entries) != 0)
> + return -1;
> +
> + return 0;
> +}
> +
> int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
> libxl_domain_config *d_config,
> libxl__domain_build_state *state)
> @@ -1016,6 +1097,12 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
> ret = set_vnuma_info(gc, domid, info, state);
> if (ret) goto out;
> }
> +
> + if (libxl__domain_construct_memmap(gc, d_config, domid, &args)) {
> + LOG(ERROR, "setting domain rdm memory map failed");
The error message should not be RDM specific.
Wei.
> + goto out;
> + }
> +
> ret = hvm_build_set_params(ctx->xch, domid, info, state->store_port,
> &state->store_mfn, state->console_port,
> &state->console_mfn, state->store_domid,
> --
> 1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |