|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 23/23] libxc/xc_dom_core: Copy ACPI tables to guest space
On 2016/8/15 20:49, Boris Ostrovsky wrote:
> On 08/15/2016 03:48 AM, Shannon Zhao wrote:
>> Hi Boris
>>
>> On 2016/8/5 5:06, Boris Ostrovsky wrote:
>>> Load ACPI modules into guest space
>>>
>>> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
>>> ---
>>> v2:
>>> * New patch, loosely based on Shannon's ARM patch
>>>
>>> tools/libxc/xc_dom_core.c | 92
>>> +++++++++++++++++++++++++++++++++++++++++++++++
>>> 1 file changed, 92 insertions(+)
>>>
>>> diff --git a/tools/libxc/xc_dom_core.c b/tools/libxc/xc_dom_core.c
>>> index ebada89..00d870f 100644
>>> --- a/tools/libxc/xc_dom_core.c
>>> +++ b/tools/libxc/xc_dom_core.c
>>> @@ -1040,6 +1040,94 @@ static int xc_dom_build_ramdisk(struct xc_dom_image
>>> *dom)
>>> return -1;
>>> }
>>>
>>> +static int populate_acpi_pages(struct xc_dom_image *dom,
>>> + xen_pfn_t *extents,
>>> + unsigned int num_pages)
>>> +{
>>> + int rc;
>>> + xc_interface *xch = dom->xch;
>>> + uint32_t domid = dom->guest_domid;
>>> + unsigned long idx, first_high_idx = (1ull << (32 - 12));
>>> +
>>> + for ( ; num_pages; num_pages--, extents++ )
>>> + {
>>> +
>>> + if ( xc_domain_populate_physmap(xch, domid, 1, 0, 0, extents) == 1
>>> )
>>> + continue;
>>> +
>>> + if (dom->highmem_end)
>>> + {
>>> + idx = --dom->highmem_end;
>>> + if ( idx == first_high_idx )
>>> + dom->highmem_end = 0;
>>> + }
>>> + else
>>> + idx = --dom->lowmem_end;
>>> +
>>> + rc = xc_domain_add_to_physmap(xch, domid,
>>> + XENMAPSPACE_gmfn,
>>> + idx, *extents);
>>> + if (rc)
>>> + return rc;
>>> + }
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +static int xc_dom_load_acpi(struct xc_dom_image *dom)
>>> +{
>>> + int j, i = 0;
>>> + unsigned num_pages;
>>> + xen_pfn_t *extents, base;
>>> + void *ptr;
>>> +
>>> + while ( (i < MAX_ACPI_MODULES) && dom->acpi_modules[i].length )
>>> + {
>>> + DOMPRINTF("%s: %d bytes at address %lx\n", __FUNCTION__,
>>> + dom->acpi_modules[i].length,
>>> + dom->acpi_modules[i].guest_addr_out);
>>> +
>>> + num_pages = (dom->acpi_modules[i].length + (XC_PAGE_SIZE - 1)) >>
>>> + XC_PAGE_SHIFT;
>>> + extents = malloc(num_pages * sizeof(*extents));
>>> + if ( !extents )
>>> + {
>>> + DOMPRINTF("%s: Out of memory", __FUNCTION__);
>>> + goto err;
>>> + }
>>> +
>>> + base = dom->acpi_modules[i].guest_addr_out >> XC_PAGE_SHIFT;
>>> + for (j=0; j<num_pages; j++)
>>> + extents[j] = base + j;
>>> + if ( populate_acpi_pages(dom, extents, num_pages) )
>>> + {
>>> + DOMPRINTF("%s: Can populate ACPI pages", __FUNCTION__);
>>> + goto err;
>>> + }
>>> +
>>> + ptr = xc_map_foreign_range(dom->xch, dom->guest_domid,
>>> + XC_PAGE_SIZE * num_pages,
>>> + PROT_READ | PROT_WRITE, base);
>>> + if ( !ptr )
>>> + {
>>> + DOMPRINTF("%s: Can't map %d pages at 0x%lx",
>>> + __FUNCTION__, num_pages, base);
>>> + goto err;
>>> + }
>>> +
>>> + memcpy(ptr, dom->acpi_modules[i].data,
>>> dom->acpi_modules[i].length);
>>> +
>>> + free(extents);
>>> + i++;
>>> + }
>>> +
>>> + return 0;
>>> +
>>> +err:
>>> + free(extents);
>>> + return -1;
>>> +}
>>> +
>>> int xc_dom_build_image(struct xc_dom_image *dom)
>>> {
>>> unsigned int page_size;
>>> @@ -1097,6 +1185,10 @@ int xc_dom_build_image(struct xc_dom_image *dom)
>>> memcpy(devicetreemap, dom->devicetree_blob, dom->devicetree_size);
>>> }
>>>
>>> + /* load ACPI tables */
>>> + if ( xc_dom_load_acpi(dom) != 0 )
>>> + goto err;
>> I think it needs to move the definition of xc_dom_load_acpi() to
>> xc_dom_x86.c while I will move the corresponding one of ARM to xc_dom_arm.c.
>
>
> You don't think both x86 and ARM can use the same definition? This
> looks very similar to what you had in your series.
>
So I have to use dom->acpi_modules[i] in my series instead of
dom->acpitable_blob. I think it's fine to reuse what x86 uses.
Thanks,
--
Shannon
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |