|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [for-4.11][PATCH v7 10/16] xen/mm: Switch map_pages_to_xen to use MFN typesafe
On Tue, 3 Apr 2018, Julien Grall wrote:
> The current prototype is slightly confusing because it takes a virtual
> address and a physical frame (not address!). Switching to MFN will improve
> safety and reduce the chance to mistakenly invert the 2 parameters.
>
> Also, take the opportunity to switch (a - b) >> PAGE_SHIFT to
> PFN_DOWN(a - b) in the code modified.
>
> Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> ---
>
> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> Cc: Julien Grall <julien.grall@xxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> Cc: Tim Deegan <tim@xxxxxxx>
> Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
> Cc: Gang Wei <gang.wei@xxxxxxxxx>
> Cc: Shane Wang <shane.wang@xxxxxxxxx>
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
>
> Changes in v6:
> - Add Andrew's acked-by
> - Add Wei's and George's reviewed-by
>
> Changes in v5:
> - Use PFN_DOWN as suggested by Jan
> - Replace _mfn(0) by INVALID_MFN where relevant
>
> Changes in v4:
> - Patch added
> ---
> xen/arch/arm/mm.c | 4 +--
> xen/arch/x86/mm.c | 58
> +++++++++++++++++++-------------------
> xen/arch/x86/setup.c | 20 ++++++-------
> xen/arch/x86/smpboot.c | 2 +-
> xen/arch/x86/tboot.c | 11 ++++----
> xen/arch/x86/x86_64/mm.c | 27 ++++++++++--------
> xen/arch/x86/x86_64/mmconfig_64.c | 6 ++--
> xen/common/efi/boot.c | 2 +-
> xen/common/vmap.c | 10 +++++--
> xen/drivers/acpi/apei/erst.c | 2 +-
> xen/drivers/acpi/apei/hest.c | 2 +-
> xen/drivers/passthrough/vtd/dmar.c | 2 +-
> xen/include/asm-arm/mm.h | 2 +-
> xen/include/xen/mm.h | 2 +-
> 14 files changed, 80 insertions(+), 70 deletions(-)
>
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 436df6936b..7af6baa3d6 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -1065,11 +1065,11 @@ out:
> }
>
> int map_pages_to_xen(unsigned long virt,
> - unsigned long mfn,
> + mfn_t mfn,
> unsigned long nr_mfns,
> unsigned int flags)
> {
> - return create_xen_entries(INSERT, virt, _mfn(mfn), nr_mfns, flags);
> + return create_xen_entries(INSERT, virt, mfn, nr_mfns, flags);
> }
>
> int populate_pt_range(unsigned long virt, unsigned long nr_mfns)
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 6d5f40482e..ec61887d76 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -213,7 +213,7 @@ static void __init init_frametable_chunk(void *start,
> void *end)
> while ( step && s + (step << PAGE_SHIFT) > e + (4 << PAGE_SHIFT) )
> step >>= PAGETABLE_ORDER;
> mfn = alloc_boot_pages(step, step);
> - map_pages_to_xen(s, mfn_x(mfn), step, PAGE_HYPERVISOR);
> + map_pages_to_xen(s, mfn, step, PAGE_HYPERVISOR);
> }
>
> memset(start, 0, end - start);
> @@ -787,12 +787,12 @@ static int update_xen_mappings(unsigned long mfn,
> unsigned int cacheattr)
> XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
>
> if ( unlikely(alias) && cacheattr )
> - err = map_pages_to_xen(xen_va, mfn, 1, 0);
> + err = map_pages_to_xen(xen_va, _mfn(mfn), 1, 0);
> if ( !err )
> - err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
> + err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), _mfn(mfn), 1,
> PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
> if ( unlikely(alias) && !cacheattr && !err )
> - err = map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
> + err = map_pages_to_xen(xen_va, _mfn(mfn), 1, PAGE_HYPERVISOR);
> return err;
> }
>
> @@ -4645,7 +4645,7 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
>
> int map_pages_to_xen(
> unsigned long virt,
> - unsigned long mfn,
> + mfn_t mfn,
> unsigned long nr_mfns,
> unsigned int flags)
> {
> @@ -4677,13 +4677,13 @@ int map_pages_to_xen(
> ol3e = *pl3e;
>
> if ( cpu_has_page1gb &&
> - !(((virt >> PAGE_SHIFT) | mfn) &
> + !(((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
> ((1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1)) &&
> nr_mfns >= (1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) &&
> !(flags & (_PAGE_PAT | MAP_SMALL_PAGES)) )
> {
> /* 1GB-page mapping. */
> - l3e_write_atomic(pl3e, l3e_from_pfn(mfn, l1f_to_lNf(flags)));
> + l3e_write_atomic(pl3e, l3e_from_mfn(mfn, l1f_to_lNf(flags)));
>
> if ( (l3e_get_flags(ol3e) & _PAGE_PRESENT) )
> {
> @@ -4727,8 +4727,8 @@ int map_pages_to_xen(
> }
>
> virt += 1UL << L3_PAGETABLE_SHIFT;
> - if ( !mfn_eq(_mfn(mfn), INVALID_MFN) )
> - mfn += 1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT);
> + if ( !mfn_eq(mfn, INVALID_MFN) )
> + mfn = mfn_add(mfn, 1UL << (L3_PAGETABLE_SHIFT -
> PAGE_SHIFT));
> nr_mfns -= 1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT);
> continue;
> }
> @@ -4743,18 +4743,18 @@ int map_pages_to_xen(
> if ( ((l3e_get_pfn(ol3e) & ~(L2_PAGETABLE_ENTRIES *
> L1_PAGETABLE_ENTRIES - 1)) +
> (l2_table_offset(virt) << PAGETABLE_ORDER) +
> - l1_table_offset(virt) == mfn) &&
> + l1_table_offset(virt) == mfn_x(mfn)) &&
> ((lNf_to_l1f(l3e_get_flags(ol3e)) ^ flags) &
> ~(_PAGE_ACCESSED|_PAGE_DIRTY)) == 0 )
> {
> /* We can skip to end of L3 superpage if we got a match. */
> i = (1u << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) -
> - (mfn & ((1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1));
> + (mfn_x(mfn) & ((1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT))
> - 1));
> if ( i > nr_mfns )
> i = nr_mfns;
> virt += i << PAGE_SHIFT;
> - if ( !mfn_eq(_mfn(mfn), INVALID_MFN) )
> - mfn += i;
> + if ( !mfn_eq(mfn, INVALID_MFN) )
> + mfn = mfn_add(mfn, i);
> nr_mfns -= i;
> continue;
> }
> @@ -4792,14 +4792,14 @@ int map_pages_to_xen(
> if ( !pl2e )
> return -ENOMEM;
>
> - if ( ((((virt >> PAGE_SHIFT) | mfn) &
> + if ( ((((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
> ((1u << PAGETABLE_ORDER) - 1)) == 0) &&
> (nr_mfns >= (1u << PAGETABLE_ORDER)) &&
> !(flags & (_PAGE_PAT|MAP_SMALL_PAGES)) )
> {
> /* Super-page mapping. */
> ol2e = *pl2e;
> - l2e_write_atomic(pl2e, l2e_from_pfn(mfn, l1f_to_lNf(flags)));
> + l2e_write_atomic(pl2e, l2e_from_mfn(mfn, l1f_to_lNf(flags)));
>
> if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
> {
> @@ -4822,8 +4822,8 @@ int map_pages_to_xen(
> }
>
> virt += 1UL << L2_PAGETABLE_SHIFT;
> - if ( !mfn_eq(_mfn(mfn), INVALID_MFN) )
> - mfn += 1UL << PAGETABLE_ORDER;
> + if ( !mfn_eq(mfn, INVALID_MFN) )
> + mfn = mfn_add(mfn, 1UL << PAGETABLE_ORDER);
> nr_mfns -= 1UL << PAGETABLE_ORDER;
> }
> else
> @@ -4842,18 +4842,18 @@ int map_pages_to_xen(
>
> /* Skip this PTE if there is no change. */
> if ( (((l2e_get_pfn(*pl2e) & ~(L1_PAGETABLE_ENTRIES - 1)) +
> - l1_table_offset(virt)) == mfn) &&
> + l1_table_offset(virt)) == mfn_x(mfn)) &&
> (((lNf_to_l1f(l2e_get_flags(*pl2e)) ^ flags) &
> ~(_PAGE_ACCESSED|_PAGE_DIRTY)) == 0) )
> {
> /* We can skip to end of L2 superpage if we got a match.
> */
> i = (1u << (L2_PAGETABLE_SHIFT - PAGE_SHIFT)) -
> - (mfn & ((1u << (L2_PAGETABLE_SHIFT - PAGE_SHIFT)) -
> 1));
> + (mfn_x(mfn) & ((1u << (L2_PAGETABLE_SHIFT -
> PAGE_SHIFT)) - 1));
> if ( i > nr_mfns )
> i = nr_mfns;
> virt += i << L1_PAGETABLE_SHIFT;
> - if ( !mfn_eq(_mfn(mfn), INVALID_MFN) )
> - mfn += i;
> + if ( !mfn_eq(mfn, INVALID_MFN) )
> + mfn = mfn_add(mfn, i);
> nr_mfns -= i;
> goto check_l3;
> }
> @@ -4888,7 +4888,7 @@ int map_pages_to_xen(
>
> pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
> ol1e = *pl1e;
> - l1e_write_atomic(pl1e, l1e_from_pfn(mfn, flags));
> + l1e_write_atomic(pl1e, l1e_from_mfn(mfn, flags));
> if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
> {
> unsigned int flush_flags = FLUSH_TLB | FLUSH_ORDER(0);
> @@ -4898,13 +4898,13 @@ int map_pages_to_xen(
> }
>
> virt += 1UL << L1_PAGETABLE_SHIFT;
> - if ( !mfn_eq(_mfn(mfn), INVALID_MFN) )
> - mfn += 1UL;
> + if ( !mfn_eq(mfn, INVALID_MFN) )
> + mfn = mfn_add(mfn, 1UL);
> nr_mfns -= 1UL;
>
> if ( (flags == PAGE_HYPERVISOR) &&
> ((nr_mfns == 0) ||
> - ((((virt >> PAGE_SHIFT) | mfn) &
> + ((((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
> ((1u << PAGETABLE_ORDER) - 1)) == 0)) )
> {
> unsigned long base_mfn;
> @@ -4957,7 +4957,7 @@ int map_pages_to_xen(
> if ( cpu_has_page1gb &&
> (flags == PAGE_HYPERVISOR) &&
> ((nr_mfns == 0) ||
> - !(((virt >> PAGE_SHIFT) | mfn) &
> + !(((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
> ((1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1))) )
> {
> unsigned long base_mfn;
> @@ -5009,7 +5009,7 @@ int map_pages_to_xen(
>
> int populate_pt_range(unsigned long virt, unsigned long nr_mfns)
> {
> - return map_pages_to_xen(virt, mfn_x(INVALID_MFN), nr_mfns,
> MAP_SMALL_PAGES);
> + return map_pages_to_xen(virt, INVALID_MFN, nr_mfns, MAP_SMALL_PAGES);
> }
>
> /*
> @@ -5270,7 +5270,7 @@ void __set_fixmap(
> enum fixed_addresses idx, unsigned long mfn, unsigned long flags)
> {
> BUG_ON(idx >= __end_of_fixed_addresses);
> - map_pages_to_xen(__fix_to_virt(idx), mfn, 1, flags);
> + map_pages_to_xen(__fix_to_virt(idx), _mfn(mfn), 1, flags);
> }
>
> void *__init arch_vmap_virt_end(void)
> @@ -5541,7 +5541,7 @@ static void __memguard_change_range(void *p, unsigned
> long l, int guard)
> if ( guard )
> flags &= ~_PAGE_PRESENT;
>
> - map_pages_to_xen(_p, mfn_x(virt_to_mfn(p)), PFN_DOWN(_l), flags);
> + map_pages_to_xen(_p, virt_to_mfn(p), PFN_DOWN(_l), flags);
> }
>
> void memguard_guard_range(void *p, unsigned long l)
> diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
> index c0b97a748a..b79ccab49d 100644
> --- a/xen/arch/x86/setup.c
> +++ b/xen/arch/x86/setup.c
> @@ -354,8 +354,8 @@ void *__init bootstrap_map(const module_t *mod)
> if ( end - start > BOOTSTRAP_MAP_LIMIT - map_cur )
> return NULL;
>
> - map_pages_to_xen(map_cur, start >> PAGE_SHIFT,
> - (end - start) >> PAGE_SHIFT, PAGE_HYPERVISOR);
> + map_pages_to_xen(map_cur, maddr_to_mfn(start),
> + PFN_DOWN(end - start), PAGE_HYPERVISOR);
> map_cur += end - start;
> return ret;
> }
> @@ -979,8 +979,8 @@ void __init noreturn __start_xen(unsigned long mbi_p)
> {
> end = min(e, limit);
> set_pdx_range(s >> PAGE_SHIFT, end >> PAGE_SHIFT);
> - map_pages_to_xen((unsigned long)__va(s), s >> PAGE_SHIFT,
> - (end - s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
> + map_pages_to_xen((unsigned long)__va(s), maddr_to_mfn(s),
> + PFN_DOWN(end - s), PAGE_HYPERVISOR);
> }
>
> if ( e > min(HYPERVISOR_VIRT_END - DIRECTMAP_VIRT_START,
> @@ -1294,7 +1294,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
>
> if ( map_e < end )
> {
> - map_pages_to_xen((unsigned long)__va(map_e), PFN_DOWN(map_e),
> + map_pages_to_xen((unsigned long)__va(map_e),
> maddr_to_mfn(map_e),
> PFN_DOWN(end - map_e), PAGE_HYPERVISOR);
> init_boot_pages(map_e, end);
> map_e = end;
> @@ -1304,13 +1304,13 @@ void __init noreturn __start_xen(unsigned long mbi_p)
> {
> /* This range must not be passed to the boot allocator and
> * must also not be mapped with _PAGE_GLOBAL. */
> - map_pages_to_xen((unsigned long)__va(map_e), PFN_DOWN(map_e),
> + map_pages_to_xen((unsigned long)__va(map_e), maddr_to_mfn(map_e),
> PFN_DOWN(e - map_e), __PAGE_HYPERVISOR_RW);
> }
> if ( s < map_s )
> {
> - map_pages_to_xen((unsigned long)__va(s), s >> PAGE_SHIFT,
> - (map_s - s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
> + map_pages_to_xen((unsigned long)__va(s), maddr_to_mfn(s),
> + PFN_DOWN(map_s - s), PAGE_HYPERVISOR);
> init_boot_pages(s, map_s);
> }
> }
> @@ -1320,7 +1320,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
> set_pdx_range(mod[i].mod_start,
> mod[i].mod_start + PFN_UP(mod[i].mod_end));
> map_pages_to_xen((unsigned long)mfn_to_virt(mod[i].mod_start),
> - mod[i].mod_start,
> + _mfn(mod[i].mod_start),
> PFN_UP(mod[i].mod_end), PAGE_HYPERVISOR);
> }
>
> @@ -1333,7 +1333,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
>
> if ( e > s )
> map_pages_to_xen((unsigned long)__va(kexec_crash_area.start),
> - s, e - s, PAGE_HYPERVISOR);
> + _mfn(s), e - s, PAGE_HYPERVISOR);
> }
> #endif
>
> diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
> index 98873df429..80549ad925 100644
> --- a/xen/arch/x86/smpboot.c
> +++ b/xen/arch/x86/smpboot.c
> @@ -623,7 +623,7 @@ unsigned long alloc_stub_page(unsigned int cpu, unsigned
> long *mfn)
> }
>
> stub_va = XEN_VIRT_END - (cpu + 1) * PAGE_SIZE;
> - if ( map_pages_to_xen(stub_va, mfn_x(page_to_mfn(pg)), 1,
> + if ( map_pages_to_xen(stub_va, page_to_mfn(pg), 1,
> PAGE_HYPERVISOR_RX | MAP_SMALL_PAGES) )
> {
> if ( !*mfn )
> diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
> index d36bf33407..71e757c553 100644
> --- a/xen/arch/x86/tboot.c
> +++ b/xen/arch/x86/tboot.c
> @@ -336,22 +336,23 @@ static void tboot_gen_frametable_integrity(const
> uint8_t key[TB_KEY_SIZE],
>
> void tboot_shutdown(uint32_t shutdown_type)
> {
> - uint32_t map_base, map_size;
> + mfn_t map_base;
> + uint32_t map_size;
> int err;
>
> g_tboot_shared->shutdown_type = shutdown_type;
>
> /* Create identity map for tboot shutdown code. */
> /* do before S3 integrity because mapping tboot may change xenheap */
> - map_base = PFN_DOWN(g_tboot_shared->tboot_base);
> + map_base = maddr_to_mfn(g_tboot_shared->tboot_base);
> map_size = PFN_UP(g_tboot_shared->tboot_size);
>
> - err = map_pages_to_xen(map_base << PAGE_SHIFT, map_base, map_size,
> + err = map_pages_to_xen(mfn_to_maddr(map_base), map_base, map_size,
> __PAGE_HYPERVISOR);
> if ( err != 0 )
> {
> - printk("error (%#x) mapping tboot pages (mfns) @ %#x, %#x\n", err,
> - map_base, map_size);
> + printk("error (%#x) mapping tboot pages (mfns) @ %"PRI_mfn", %#x\n",
> + err, mfn_x(map_base), map_size);
> return;
> }
>
> diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
> index 1c83de0451..f6dd95aa47 100644
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -40,6 +40,10 @@ asm(".file \"" __FILE__ "\"");
> #include <asm/mem_sharing.h>
> #include <public/memory.h>
>
> +/* Override macros from asm/page.h to make them work with mfn_t */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> +
> unsigned int __read_mostly m2p_compat_vstart =
> __HYPERVISOR_COMPAT_VIRT_START;
>
> l2_pgentry_t *compat_idle_pg_table_l2;
> @@ -111,14 +115,14 @@ static int hotadd_mem_valid(unsigned long pfn, struct
> mem_hotadd_info *info)
> return (pfn < info->epfn && pfn >= info->spfn);
> }
>
> -static unsigned long alloc_hotadd_mfn(struct mem_hotadd_info *info)
> +static mfn_t alloc_hotadd_mfn(struct mem_hotadd_info *info)
> {
> - unsigned mfn;
> + mfn_t mfn;
>
> ASSERT((info->cur + ( 1UL << PAGETABLE_ORDER) < info->epfn) &&
> info->cur >= info->spfn);
>
> - mfn = info->cur;
> + mfn = _mfn(info->cur);
> info->cur += (1UL << PAGETABLE_ORDER);
> return mfn;
> }
> @@ -317,7 +321,8 @@ static void destroy_m2p_mapping(struct mem_hotadd_info
> *info)
> */
> static int setup_compat_m2p_table(struct mem_hotadd_info *info)
> {
> - unsigned long i, va, smap, emap, rwva, epfn = info->epfn, mfn;
> + unsigned long i, va, smap, emap, rwva, epfn = info->epfn;
> + mfn_t mfn;
> unsigned int n;
> l3_pgentry_t *l3_ro_mpt = NULL;
> l2_pgentry_t *l2_ro_mpt = NULL;
> @@ -378,7 +383,7 @@ static int setup_compat_m2p_table(struct mem_hotadd_info
> *info)
> memset((void *)rwva, 0xFF, 1UL << L2_PAGETABLE_SHIFT);
> /* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */
> l2e_write(&l2_ro_mpt[l2_table_offset(va)],
> - l2e_from_pfn(mfn, _PAGE_PSE|_PAGE_PRESENT));
> + l2e_from_mfn(mfn, _PAGE_PSE|_PAGE_PRESENT));
> }
> #undef CNT
> #undef MFN
> @@ -438,7 +443,7 @@ static int setup_m2p_table(struct mem_hotadd_info *info)
> break;
> if ( n < CNT )
> {
> - unsigned long mfn = alloc_hotadd_mfn(info);
> + mfn_t mfn = alloc_hotadd_mfn(info);
>
> ret = map_pages_to_xen(
> RDWR_MPT_VIRT_START + i * sizeof(unsigned long),
> @@ -473,7 +478,7 @@ static int setup_m2p_table(struct mem_hotadd_info *info)
> }
>
> /* NB. Cannot be GLOBAL: guest user mode should not see it. */
> - l2e_write(l2_ro_mpt, l2e_from_pfn(mfn,
> + l2e_write(l2_ro_mpt, l2e_from_mfn(mfn,
> /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
> }
> if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) )
> @@ -692,7 +697,7 @@ void __init zap_low_mappings(void)
> flush_local(FLUSH_TLB_GLOBAL);
>
> /* Replace with mapping of the boot trampoline only. */
> - map_pages_to_xen(trampoline_phys, trampoline_phys >> PAGE_SHIFT,
> + map_pages_to_xen(trampoline_phys, maddr_to_mfn(trampoline_phys),
> PFN_UP(trampoline_end - trampoline_start),
> __PAGE_HYPERVISOR);
> }
> @@ -769,7 +774,7 @@ static int setup_frametable_chunk(void *start, void *end,
> {
> unsigned long s = (unsigned long)start;
> unsigned long e = (unsigned long)end;
> - unsigned long mfn;
> + mfn_t mfn;
> int err;
>
> ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1)));
> @@ -1364,7 +1369,7 @@ int memory_add(unsigned long spfn, unsigned long epfn,
> unsigned int pxm)
> i = virt_to_mfn(HYPERVISOR_VIRT_END - 1) + 1;
> if ( spfn < i )
> {
> - ret = map_pages_to_xen((unsigned long)mfn_to_virt(spfn), spfn,
> + ret = map_pages_to_xen((unsigned long)mfn_to_virt(spfn), _mfn(spfn),
> min(epfn, i) - spfn, PAGE_HYPERVISOR);
> if ( ret )
> goto destroy_directmap;
> @@ -1373,7 +1378,7 @@ int memory_add(unsigned long spfn, unsigned long epfn,
> unsigned int pxm)
> {
> if ( i < spfn )
> i = spfn;
> - ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), i,
> + ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), _mfn(i),
> epfn - i, __PAGE_HYPERVISOR_RW);
> if ( ret )
> goto destroy_directmap;
> diff --git a/xen/arch/x86/x86_64/mmconfig_64.c
> b/xen/arch/x86/x86_64/mmconfig_64.c
> index 958b6cf2f4..2b3085931e 100644
> --- a/xen/arch/x86/x86_64/mmconfig_64.c
> +++ b/xen/arch/x86/x86_64/mmconfig_64.c
> @@ -125,9 +125,9 @@ static void __iomem *mcfg_ioremap(const struct
> acpi_mcfg_allocation *cfg,
> return NULL;
>
> if (map_pages_to_xen(virt,
> - (cfg->address >> PAGE_SHIFT) +
> - (cfg->start_bus_number << (20 - PAGE_SHIFT)),
> - size >> PAGE_SHIFT, prot))
> + mfn_add(maddr_to_mfn(cfg->address),
> + (cfg->start_bus_number << (20 -
> PAGE_SHIFT))),
> + PFN_DOWN(size), prot))
> return NULL;
>
> return (void __iomem *) virt;
> diff --git a/xen/common/efi/boot.c b/xen/common/efi/boot.c
> index 469bf980cc..64d12685d3 100644
> --- a/xen/common/efi/boot.c
> +++ b/xen/common/efi/boot.c
> @@ -1464,7 +1464,7 @@ void __init efi_init_memory(void)
> if ( (unsigned long)mfn_to_virt(emfn - 1) >= HYPERVISOR_VIRT_END
> )
> prot &= ~_PAGE_GLOBAL;
> if ( map_pages_to_xen((unsigned long)mfn_to_virt(smfn),
> - smfn, emfn - smfn, prot) == 0 )
> + _mfn(smfn), emfn - smfn, prot) == 0 )
> desc->VirtualStart =
> (unsigned long)maddr_to_virt(desc->PhysicalStart);
> else
> diff --git a/xen/common/vmap.c b/xen/common/vmap.c
> index 11785ffb0a..04f5db386d 100644
> --- a/xen/common/vmap.c
> +++ b/xen/common/vmap.c
> @@ -9,6 +9,10 @@
> #include <xen/vmap.h>
> #include <asm/page.h>
>
> +/* Override macros from asm/page.h to make them work with mfn_t */
> +#undef page_to_mfn
> +#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
> +
> static DEFINE_SPINLOCK(vm_lock);
> static void *__read_mostly vm_base[VMAP_REGION_NR];
> #define vm_bitmap(x) ((unsigned long *)vm_base[x])
> @@ -208,7 +212,7 @@ void *__vmap(const mfn_t *mfn, unsigned int granularity,
>
> for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
> {
> - if ( map_pages_to_xen(cur, mfn_x(*mfn), granularity, flags) )
> + if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
> {
> vunmap(va);
> va = NULL;
> @@ -234,7 +238,7 @@ void vunmap(const void *va)
> #ifndef _PAGE_NONE
> destroy_xen_mappings(addr, addr + PAGE_SIZE * pages);
> #else /* Avoid tearing down intermediate page tables. */
> - map_pages_to_xen(addr, 0, pages, _PAGE_NONE);
> + map_pages_to_xen(addr, INVALID_MFN, pages, _PAGE_NONE);
> #endif
> vm_free(va);
> }
> @@ -258,7 +262,7 @@ static void *vmalloc_type(size_t size, enum vmap_region
> type)
> pg = alloc_domheap_page(NULL, 0);
> if ( pg == NULL )
> goto error;
> - mfn[i] = _mfn(page_to_mfn(pg));
> + mfn[i] = page_to_mfn(pg);
> }
>
> va = __vmap(mfn, 1, pages, 1, PAGE_HYPERVISOR, type);
> diff --git a/xen/drivers/acpi/apei/erst.c b/xen/drivers/acpi/apei/erst.c
> index 14acf5d773..7fc4de5de9 100644
> --- a/xen/drivers/acpi/apei/erst.c
> +++ b/xen/drivers/acpi/apei/erst.c
> @@ -799,7 +799,7 @@ int __init erst_init(void)
> printk(KERN_WARNING "Failed to get ERST table: %s\n", msg);
> return -EINVAL;
> }
> - map_pages_to_xen((unsigned long)__va(erst_addr), PFN_DOWN(erst_addr),
> + map_pages_to_xen((unsigned long)__va(erst_addr),
> maddr_to_mfn(erst_addr),
> PFN_UP(erst_addr + erst_len) - PFN_DOWN(erst_addr),
> PAGE_HYPERVISOR);
> erst_tab = __va(erst_addr);
> diff --git a/xen/drivers/acpi/apei/hest.c b/xen/drivers/acpi/apei/hest.c
> index f74e7c2a06..70734ab0e2 100644
> --- a/xen/drivers/acpi/apei/hest.c
> +++ b/xen/drivers/acpi/apei/hest.c
> @@ -184,7 +184,7 @@ void __init acpi_hest_init(void)
> acpi_format_exception(status));
> goto err;
> }
> - map_pages_to_xen((unsigned long)__va(hest_addr), PFN_DOWN(hest_addr),
> + map_pages_to_xen((unsigned long)__va(hest_addr),
> maddr_to_mfn(hest_addr),
> PFN_UP(hest_addr + hest_len) - PFN_DOWN(hest_addr),
> PAGE_HYPERVISOR);
> hest_tab = __va(hest_addr);
> diff --git a/xen/drivers/passthrough/vtd/dmar.c
> b/xen/drivers/passthrough/vtd/dmar.c
> index d713a8ca5d..46decd4eb1 100644
> --- a/xen/drivers/passthrough/vtd/dmar.c
> +++ b/xen/drivers/passthrough/vtd/dmar.c
> @@ -1008,7 +1008,7 @@ int __init acpi_dmar_init(void)
> if ( ACPI_SUCCESS(acpi_get_table_phys(ACPI_SIG_DMAR, 0,
> &dmar_addr, &dmar_len)) )
> {
> - map_pages_to_xen((unsigned long)__va(dmar_addr), PFN_DOWN(dmar_addr),
> + map_pages_to_xen((unsigned long)__va(dmar_addr),
> maddr_to_mfn(dmar_addr),
> PFN_UP(dmar_addr + dmar_len) - PFN_DOWN(dmar_addr),
> PAGE_HYPERVISOR);
> dmar_table = __va(dmar_addr);
> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index 09bec67f63..5a9ca6a55b 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -138,7 +138,7 @@ extern vaddr_t xenheap_virt_start;
> #endif
>
> #ifdef CONFIG_ARM_32
> -#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
> +#define is_xen_heap_page(page) is_xen_heap_mfn(__page_to_mfn(page))
> #define is_xen_heap_mfn(mfn) ({ \
> unsigned long mfn_ = (mfn); \
> (mfn_ >= mfn_x(xenheap_mfn_start) && \
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index 538478fa24..5a7d25e33f 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -165,7 +165,7 @@ bool scrub_free_pages(void);
> /* Map machine page range in Xen virtual address space. */
> int map_pages_to_xen(
> unsigned long virt,
> - unsigned long mfn,
> + mfn_t mfn,
> unsigned long nr_mfns,
> unsigned int flags);
> /* Alter the permissions of a range of Xen virtual address space. */
> --
> 2.11.0
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |