[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 01/14] libxc: Rework extra module initialisation



On Mon, Mar 14, 2016 at 05:55:36PM +0000, Anthony PERARD wrote:
> This patch use xc_dom_alloc_segment() to allocate the memory space for the
> ACPI modules and the SMBIOS modules. This is to replace the arbitrary
> placement of 1MB after the hvmloader image.

s/of/at/ ?

Or are you rather staying that the memory for ACPI modules and such
is offset by one 1MB after hvmloader image?

In other words:

/-----------\ <- 0
|           |
| hvmloader |
| binary    |
+-----------| <- 800kB
|           |
|  empty    |
+-----------+ <- 1.8MB
| ACPI DATA |
|           |
+-----------+ <- 2.8MB
|           |
| .....     |
+-----------| <- 4MB
|  scratch  |
| space for |
| hvmloader |
\-----------/ <- 8MB

Is what can happen now? But prior to this you had:

/-----------\ <- 0
|           |
| hvmloader |
| binary    |
+-----------| <- 800kB
|           |
|  empty    |
+-----------+ <- 1MB
| ACPI DATA |
|           |
+-----------+ <- 2MB 
|           |
| .....     |
+-----------| <- 4MB
|  scratch  |
| space for |
| hvmloader |
\-----------/ <- 8MB

?

> 
> In later patches, while trying to load a firmware such as OVMF, the later

.. what later patches? These ones? You may want to mention which ones.

> could easily be loaded past the address 4MB (OVMF is a 2MB binary), but
 
Why would it be loaded past 4MB? Why not in the space right after ACPI
DSDT and such?


> hvmloader use a range of memory from 4MB to 8MB to perform tests and in the
> process, clear the memory, before loading the modules.

s/clear/clears/

> 
> Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
> ---
> Changes in V4:
> - check that guest_addr_out have a reasonable value.
> 
> New patch in V3.
> ---
>  tools/libxc/xc_dom_hvmloader.c | 131 
> ++++++++++++-----------------------------
>  1 file changed, 38 insertions(+), 93 deletions(-)
> 
> diff --git a/tools/libxc/xc_dom_hvmloader.c b/tools/libxc/xc_dom_hvmloader.c
> index 330d5e8..7cf5854 100644
> --- a/tools/libxc/xc_dom_hvmloader.c
> +++ b/tools/libxc/xc_dom_hvmloader.c
> @@ -129,98 +129,52 @@ static elf_errorstatus xc_dom_parse_hvm_kernel(struct 
> xc_dom_image *dom)
>      return rc;
>  }
>  
> -static int modules_init(struct xc_dom_image *dom,
> -                        uint64_t vend, struct elf_binary *elf,
> -                        uint64_t *mstart_out, uint64_t *mend_out)
> +static int module_init_one(struct xc_dom_image *dom,
> +                           struct xc_hvm_firmware_module *module,
> +                           char *name)
>  {
> -#define MODULE_ALIGN 1UL << 7
> -#define MB_ALIGN     1UL << 20
> -#define MKALIGN(x, a) (((uint64_t)(x) + (a) - 1) & ~(uint64_t)((a) - 1))
> -    uint64_t total_len = 0, offset1 = 0;
> +    struct xc_dom_seg seg;
> +    void *dest;
>  
> -    if ( dom->acpi_module.length == 0 && dom->smbios_module.length == 0 )
> -        return 0;
> -
> -    /* Find the total length for the firmware modules with a reasonable large
> -     * alignment size to align each the modules.
> -     */
> -    total_len = MKALIGN(dom->acpi_module.length, MODULE_ALIGN);
> -    offset1 = total_len;
> -    total_len += MKALIGN(dom->smbios_module.length, MODULE_ALIGN);
> -
> -    /* Want to place the modules 1Mb+change behind the loader image. */
> -    *mstart_out = MKALIGN(elf->pend, MB_ALIGN) + (MB_ALIGN);
> -    *mend_out = *mstart_out + total_len;
> -
> -    if ( *mend_out > vend )
> -        return -1;
> -
> -    if ( dom->acpi_module.length != 0 )
> -        dom->acpi_module.guest_addr_out = *mstart_out;
> -    if ( dom->smbios_module.length != 0 )
> -        dom->smbios_module.guest_addr_out = *mstart_out + offset1;
> +    if ( module->length )
> +    {
> +        if ( xc_dom_alloc_segment(dom, &seg, name, 0, module->length) )
> +            goto err;
> +        dest = xc_dom_seg_to_ptr(dom, &seg);
> +        if ( dest == NULL )
> +        {
> +            DOMPRINTF("%s: xc_dom_seg_to_ptr(dom, &seg) => NULL",
> +                      __FUNCTION__);
> +            goto err;
> +        }
> +        memcpy(dest, module->data, module->length);
> +        module->guest_addr_out = seg.vstart;
> +        if ( module->guest_addr_out > UINT32_MAX ||
> +             module->guest_addr_out + module->length > UINT32_MAX )
> +        {
> +            DOMPRINTF("%s: Module %s would be loaded abrove 4GB",
> +                      __FUNCTION__, name);
> +            goto err;
> +        }
> +    }
>  
>      return 0;
> +err:
> +    return -1;
>  }
>  
> -static int loadmodules(struct xc_dom_image *dom,
> -                       uint64_t mstart, uint64_t mend,
> -                       uint32_t domid)
> +static int modules_init(struct xc_dom_image *dom)
>  {
> -    privcmd_mmap_entry_t *entries = NULL;
> -    unsigned long pfn_start;
> -    unsigned long pfn_end;
> -    size_t pages;
> -    uint32_t i;
> -    uint8_t *dest;
> -    int rc = -1;
> -    xc_interface *xch = dom->xch;
> -
> -    if ( mstart == 0 || mend == 0 )
> -        return 0;
> -
> -    pfn_start = (unsigned long)(mstart >> PAGE_SHIFT);
> -    pfn_end = (unsigned long)((mend + PAGE_SIZE - 1) >> PAGE_SHIFT);
> -    pages = pfn_end - pfn_start;
> -
> -    /* Map address space for module list. */
> -    entries = calloc(pages, sizeof(privcmd_mmap_entry_t));
> -    if ( entries == NULL )
> -        goto error_out;
> -
> -    for ( i = 0; i < pages; i++ )
> -        entries[i].mfn = (mstart >> PAGE_SHIFT) + i;
> -
> -    dest = xc_map_foreign_ranges(
> -        xch, domid, pages << PAGE_SHIFT, PROT_READ | PROT_WRITE, 1 << 
> PAGE_SHIFT,
> -        entries, pages);
> -    if ( dest == NULL )
> -        goto error_out;
> -
> -    /* Zero the range so padding is clear between modules */
> -    memset(dest, 0, pages << PAGE_SHIFT);
> -
> -    /* Load modules into range */
> -    if ( dom->acpi_module.length != 0 )
> -    {
> -        memcpy(dest,
> -               dom->acpi_module.data,
> -               dom->acpi_module.length);
> -    }
> -    if ( dom->smbios_module.length != 0 )
> -    {
> -        memcpy(dest + (dom->smbios_module.guest_addr_out - mstart),
> -               dom->smbios_module.data,
> -               dom->smbios_module.length);
> -    }
> -
> -    munmap(dest, pages << PAGE_SHIFT);
> -    rc = 0;
> +    int rc;
>  
> - error_out:
> -    free(entries);
> +    rc = module_init_one(dom, &dom->acpi_module, "acpi module");
> +    if ( rc ) goto err;
> +    rc = module_init_one(dom, &dom->smbios_module, "smbios module");
> +    if ( rc ) goto err;

You can't be that code thought. That is some nice deletion of code.

Now is this safe for migration?
I presume so since this is all E820_RESERVED righT? No funny business
there?

>  
> -    return rc;
> +    return 0;
> +err:
> +    return -1;
>  }
>  
>  static elf_errorstatus xc_dom_load_hvm_kernel(struct xc_dom_image *dom)
> @@ -229,7 +183,6 @@ static elf_errorstatus xc_dom_load_hvm_kernel(struct 
> xc_dom_image *dom)
>      privcmd_mmap_entry_t *entries = NULL;
>      size_t pages = (elf->pend - elf->pstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
>      elf_errorstatus rc;
> -    uint64_t m_start = 0, m_end = 0;
>      int i;
>  
>      /* Map address space for initial elf image. */
> @@ -262,15 +215,7 @@ static elf_errorstatus xc_dom_load_hvm_kernel(struct 
> xc_dom_image *dom)
>  
>      munmap(elf->dest_base, elf->dest_size);
>  
> -    rc = modules_init(dom, dom->total_pages << PAGE_SHIFT, elf, &m_start,
> -                      &m_end);
> -    if ( rc != 0 )
> -    {
> -        DOMPRINTF("%s: insufficient space to load modules.", __func__);
> -        goto error;
> -    }
> -
> -    rc = loadmodules(dom, m_start, m_end, dom->guest_domid);
> +    rc = modules_init(dom);
>      if ( rc != 0 )
>      {
>          DOMPRINTF("%s: unable to load modules.", __func__);
> -- 
> Anthony PERARD
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.