|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] libxc: introduce a per architecture scratch pfn for temporary grant mapping
On Tue, 2015-01-13 at 20:10 +0000, Julien Grall wrote:
> +int
> +xc_core_arch_get_scratch_gpfn(xc_interface *xch, domid_t domid,
> + xen_pfn_t *gpfn)
> +{
> + /*
> + * The Grant Table region space is not used until the guest is
> + * booting. Use the first page for the scrach pfn.
"scratch".
> + */
> + XC_BUILD_BUG_ON(GUEST_GNTTAB_SIZE < XC_PAGE_SIZE);
> +
> + *gpfn = GUEST_GNTTAB_BASE >> XC_PAGE_SHIFT;
> +
> + return 0;
> +}
> +
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/tools/libxc/xc_core_x86.c b/tools/libxc/xc_core_x86.c
> index f05060a..b157d85 100644
> --- a/tools/libxc/xc_core_x86.c
> +++ b/tools/libxc/xc_core_x86.c
> @@ -205,6 +205,23 @@ xc_core_arch_map_p2m_writable(xc_interface *xch,
> unsigned int guest_width, xc_do
> return xc_core_arch_map_p2m_rw(xch, dinfo, info,
> live_shinfo, live_p2m, pfnp, 1);
> }
> +
> +int
> +xc_core_arch_get_scratch_gpfn(xc_interface *xch, domid_t domid,
> + xen_pfn_t *gpfn)
> +{
> + int rc;
> +
> + rc = xc_domain_maximum_gpfn(xch, domid);
> +
> + if ( rc <= 0 )
> + return rc;
> +
> + *gpfn = rc;
Shouldn't this be rc + 1 to match the old behaviour?
> +
> + return 0;
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/tools/libxc/xc_dom_boot.c b/tools/libxc/xc_dom_boot.c
> index f0a1c64..a141eb5 100644
> --- a/tools/libxc/xc_dom_boot.c
> +++ b/tools/libxc/xc_dom_boot.c
> @@ -33,6 +33,7 @@
>
> #include "xg_private.h"
> #include "xc_dom.h"
> +#include "xc_core.h"
> #include <xen/hvm/params.h>
> #include <xen/grant_table.h>
>
> @@ -365,7 +366,7 @@ int xc_dom_gnttab_hvm_seed(xc_interface *xch, domid_t
> domid,
> domid_t xenstore_domid)
> {
> int rc;
> - xen_pfn_t max_gfn;
> + xen_pfn_t scratch_gpfn;
> struct xen_add_to_physmap xatp = {
> .domid = domid,
> .space = XENMAPSPACE_grant_table,
> @@ -375,16 +376,21 @@ int xc_dom_gnttab_hvm_seed(xc_interface *xch, domid_t
> domid,
> .domid = domid,
> };
>
> - max_gfn = xc_domain_maximum_gpfn(xch, domid);
> - if ( max_gfn <= 0 ) {
> + rc = xc_core_arch_get_scratch_gpfn(xch, domid, &scratch_gpfn);
> + if ( rc < 0 )
> + {
> xc_dom_panic(xch, XC_INTERNAL_ERROR,
> - "%s: failed to get max gfn "
> + "%s: failed to get a scratch gfn "
> "[errno=%d]\n",
> __FUNCTION__, errno);
> return -1;
> }
> - xatp.gpfn = max_gfn + 1;
> - xrfp.gpfn = max_gfn + 1;
> + xatp.gpfn = scratch_gpfn;
> + xrfp.gpfn = scratch_gpfn;
> +
> + xc_dom_printf(xch, "%s: called, pfn=0x%"PRI_xen_pfn, __FUNCTION__,
> + scratch_gpfn);
> +
>
> rc = do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
> if ( rc != 0 )
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |