|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [v6][PATCH 4/7] libxc/hvm_info_table: introduce a new field nr_reserved_device_memory_map
> From: Chen, Tiejun
> Sent: Tuesday, September 09, 2014 10:50 PM
>
> In hvm_info_table this field represents the number of all reserved device
> memory maps. It will be convenient to expose such a information to VM.
> While building hvm info, libxc is responsible for constructing this number
> after check_rdm_overlap().
Agree with Jan that putting the entry number here looks dirty. If we really
want to go this way, I prefer to put the whole reserved entries so it removes
the call completely from hvmloader. But since it's a dynamic structure, so
it's not a good option here.
>
> Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
>
> diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
> index 299e33a..8c61422 100644
> --- a/tools/libxc/xc_hvm_build_x86.c
> +++ b/tools/libxc/xc_hvm_build_x86.c
> @@ -89,7 +89,8 @@ static int modules_init(struct xc_hvm_build_args *args,
> }
>
> static void build_hvm_info(void *hvm_info_page, uint64_t mem_size,
> - uint64_t mmio_start, uint64_t mmio_size)
> + uint64_t mmio_start, uint64_t mmio_size,
> + unsigned int num)
> {
> struct hvm_info_table *hvm_info = (struct hvm_info_table *)
> (((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET);
> @@ -119,6 +120,9 @@ static void build_hvm_info(void *hvm_info_page,
> uint64_t mem_size,
> hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT;
> hvm_info->reserved_mem_pgstart = ioreq_server_pfn(0);
>
> + /* Reserved device memory map number. */
> + hvm_info->nr_reserved_device_memory_map = num;
> +
> /* Finish with the checksum. */
> for ( i = 0, sum = 0; i < hvm_info->length; i++ )
> sum += ((uint8_t *)hvm_info)[i];
> @@ -329,6 +333,7 @@ static int setup_guest(xc_interface *xch,
> int claim_enabled = args->claim_enabled;
> xen_pfn_t special_array[NR_SPECIAL_PAGES];
> xen_pfn_t ioreq_server_array[NR_IOREQ_SERVER_PAGES];
> + unsigned int num_reserved = 0;
>
> if ( nr_pages > target_pages )
> pod_mode = XENMEMF_populate_on_demand;
> @@ -371,6 +376,8 @@ static int setup_guest(xc_interface *xch,
> if ( rc < 0 )
> goto error_out;
>
> + num_reserved = rc;
> +
> for ( i = 0; i < nr_pages; i++ )
> page_array[i] = i;
> for ( i = mmio_start >> PAGE_SHIFT; i < nr_pages; i++ )
> @@ -540,7 +547,7 @@ static int setup_guest(xc_interface *xch,
> xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
> HVM_INFO_PFN)) == NULL )
> goto error_out;
> - build_hvm_info(hvm_info_page, v_end, mmio_start, mmio_size);
> + build_hvm_info(hvm_info_page, v_end, mmio_start, mmio_size,
> num_reserved);
> munmap(hvm_info_page, PAGE_SIZE);
>
> /* Allocate and clear special pages. */
> diff --git a/xen/include/public/hvm/hvm_info_table.h
> b/xen/include/public/hvm/hvm_info_table.h
> index 36085fa..bf401d5 100644
> --- a/xen/include/public/hvm/hvm_info_table.h
> +++ b/xen/include/public/hvm/hvm_info_table.h
> @@ -65,6 +65,9 @@ struct hvm_info_table {
> */
> uint32_t high_mem_pgend;
>
> + /* How many reserved device memory maps does we have? */
> + uint32_t nr_reserved_device_memory_map;
> +
> /* Bitmap of which CPUs are online at boot time. */
> uint8_t vcpu_online[(HVM_MAX_VCPUS + 7)/8];
> };
> --
> 1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |