[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/2] libxc: add MMIO hole size parameter to xc_hvm_build()



On Tue, Feb 28, 2012 at 6:27 PM, David Vrabel <david.vrabel@xxxxxxxxxx> wrote:
> From: David Vrabel <david.vrabel@xxxxxxxxxx>
>
> Add a parameter for the MMIO hole size when building a HVM domain.
>
> This is useful for specifying a larger than normal MMIO hole to ensure
> that no PCI device's MMIO region overlaps with the guest physical
> addresses of RAM.  This is needed on certain systems with PCI bridges
> with ACS versions that are broken.  On these systems, if a device
> behind the bridge attempts a DMA to a guest physical address that
> overlaps with the MMIO region of another device behind the bridge,
> then the bridge intercepts the access and forwards it directly to the
> device and the read or write never hits RAM.
>
> Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

> ---
>  tools/libxc/xc_hvm_build.c |   29 ++++++++++++++++++-----------
>  tools/libxc/xenguest.h     |    1 +
>  2 files changed, 19 insertions(+), 11 deletions(-)
>
> diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
> index 31d4734..fbeddac 100644
> --- a/tools/libxc/xc_hvm_build.c
> +++ b/tools/libxc/xc_hvm_build.c
> @@ -46,7 +46,8 @@
>  #define NR_SPECIAL_PAGES     5
>  #define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
>
> -static void build_hvm_info(void *hvm_info_page, uint64_t mem_size)
> +static void build_hvm_info(void *hvm_info_page, uint64_t mem_size,
> +                           uint64_t mmio_start, uint64_t mmio_size)
>  {
>     struct hvm_info_table *hvm_info = (struct hvm_info_table *)
>         (((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET);
> @@ -54,10 +55,10 @@ static void build_hvm_info(void *hvm_info_page, uint64_t 
> mem_size)
>     uint8_t sum;
>     int i;
>
> -    if ( lowmem_end > HVM_BELOW_4G_RAM_END )
> +    if ( lowmem_end > mmio_start )
>     {
> -        highmem_end = lowmem_end + (1ull<<32) - HVM_BELOW_4G_RAM_END;
> -        lowmem_end = HVM_BELOW_4G_RAM_END;
> +        highmem_end = (1ull<<32) + (lowmem_end - mmio_start);
> +        lowmem_end = mmio_start;
>     }
>
>     memset(hvm_info_page, 0, PAGE_SIZE);
> @@ -126,10 +127,10 @@ static int loadelfimage(
>  * Check whether there exists mmio hole in the specified memory range.
>  * Returns 1 if exists, else returns 0.
>  */
> -static int check_mmio_hole(uint64_t start, uint64_t memsize)
> +static int check_mmio_hole(uint64_t start, uint64_t memsize,
> +                           uint64_t mmio_start, uint64_t mmio_size)
>  {
> -    if ( start + memsize <= HVM_BELOW_4G_MMIO_START ||
> -         start >= HVM_BELOW_4G_MMIO_START + HVM_BELOW_4G_MMIO_LENGTH )
> +    if ( start + memsize <= mmio_start || start >= mmio_start + mmio_size )
>         return 0;
>     else
>         return 1;
> @@ -142,6 +143,8 @@ static int setup_guest(xc_interface *xch,
>     xen_pfn_t *page_array = NULL;
>     unsigned long i, nr_pages = params->mem_size >> PAGE_SHIFT;
>     unsigned long target_pages = params->mem_target >> PAGE_SHIFT;
> +    uint64_t mmio_start = (1ull << 32) - params->mmio_size;
> +    uint64_t mmio_size = params->mmio_size;
>     unsigned long entry_eip, cur_pages, cur_pfn;
>     void *hvm_info_page;
>     uint32_t *ident_pt;
> @@ -188,8 +191,8 @@ static int setup_guest(xc_interface *xch,
>
>     for ( i = 0; i < nr_pages; i++ )
>         page_array[i] = i;
> -    for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
> -        page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
> +    for ( i = mmio_start >> PAGE_SHIFT; i < nr_pages; i++ )
> +        page_array[i] += mmio_size >> PAGE_SHIFT;
>
>     /*
>      * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000.
> @@ -230,7 +233,8 @@ static int setup_guest(xc_interface *xch,
>         if ( ((count | cur_pfn) & (SUPERPAGE_1GB_NR_PFNS - 1)) == 0 &&
>              /* Check if there exists MMIO hole in the 1GB memory range */
>              !check_mmio_hole(cur_pfn << PAGE_SHIFT,
> -                              SUPERPAGE_1GB_NR_PFNS << PAGE_SHIFT) )
> +                              SUPERPAGE_1GB_NR_PFNS << PAGE_SHIFT,
> +                              mmio_start, mmio_size) )
>         {
>             long done;
>             unsigned long nr_extents = count >> SUPERPAGE_1GB_SHIFT;
> @@ -327,7 +331,7 @@ static int setup_guest(xc_interface *xch,
>               xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
>               HVM_INFO_PFN)) == NULL )
>         goto error_out;
> -    build_hvm_info(hvm_info_page, v_end);
> +    build_hvm_info(hvm_info_page, v_end, mmio_start, mmio_size);
>     munmap(hvm_info_page, PAGE_SIZE);
>
>     /* Allocate and clear special pages. */
> @@ -408,6 +412,9 @@ int xc_hvm_build(xc_interface *xch, uint32_t domid,
>     if ( params.mem_target == 0 )
>         params.mem_target = params.mem_size;
>
> +    if ( params.mmio_size == 0 )
> +        params.mmio_size = HVM_BELOW_4G_MMIO_LENGTH;
> +
>     /* An HVM guest must be initialised with at least 2MB memory. */
>     if ( params.mem_size < (2ull << 20) || params.mem_target < (2ull << 20) )
>         return -1;
> diff --git a/tools/libxc/xenguest.h b/tools/libxc/xenguest.h
> index 19b0382..b06788c 100644
> --- a/tools/libxc/xenguest.h
> +++ b/tools/libxc/xenguest.h
> @@ -174,6 +174,7 @@ int xc_linux_build_mem(xc_interface *xch,
>  struct xc_hvm_params {
>     uint64_t mem_size;           /**< Memory size in bytes. */
>     uint64_t mem_target;         /**< Memory target in bytes. */
> +    uint64_t mmio_size;          /**< Size of the MMIO hole in bytes. */
>     const char *image_file_name; /**< File name of the image to load. */
>  };
>
> --
> 1.7.2.5
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.