[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Xen-devel] [PATCH 8/8] xen: Swich parameter in get_page_from_gfn to use typesafe gfn
- To: Julien Grall <julien.grall@xxxxxxx>
- From: Andrii Anisov <andrii.anisov@xxxxxxxxx>
- Date: Mon, 12 Nov 2018 18:49:34 +0200
- Cc: kevin.tian@xxxxxxxxx, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Wei Liu <wei.liu2@xxxxxxxxxx>, jun.nakajima@xxxxxxxxx, Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>, George Dunlap <George.Dunlap@xxxxxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Ian Jackson <ian.jackson@xxxxxxxxxxxxx>, "Tim \(Xen.org\)" <tim@xxxxxxx>, julie.grall@xxxxxxx, Jan Beulich <jbeulich@xxxxxxxx>, Paul Durrant <paul.durrant@xxxxxxxxxx>, suravee.suthikulpanit@xxxxxxx, xen-devel@xxxxxxxxxxxxxxxxxxxx, Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>, brian.woods@xxxxxxx
- Delivery-date: Mon, 12 Nov 2018 16:49:51 +0000
- List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
Hello Julien,
I'm just wondering if this patch really belongs to xentrace series. It rather looks like a separate cleanup patch.
No functional change intended.
Only reasonable clean-ups are done in this patch. The rest will use _gfn
for the time being.
Signed-off-by: Julien Grall <julie.grall@xxxxxxx>
---
xen/arch/arm/guestcopy.c | 2 +-
xen/arch/arm/mm.c | 2 +-
xen/arch/x86/cpu/vpmu.c | 2 +-
xen/arch/x86/domain.c | 12 ++++++------
xen/arch/x86/domctl.c | 6 +++---
xen/arch/x86/hvm/dm.c | 2 +-
xen/arch/x86/hvm/domain.c | 2 +-
xen/arch/x86/hvm/hvm.c | 9 +++++----
xen/arch/x86/hvm/svm/svm.c | 8 ++++----
xen/arch/x86/hvm/viridian/viridian.c | 24 ++++++++++++------------
xen/arch/x86/hvm/vmx/vmx.c | 4 ++--
xen/arch/x86/hvm/vmx/vvmx.c | 12 ++++++------
xen/arch/x86/mm.c | 24 ++++++++++++++----------
xen/arch/x86/mm/p2m.c | 2 +-
xen/arch/x86/mm/shadow/hvm.c | 6 +++---
xen/arch/x86/physdev.c | 3 ++-
xen/arch/x86/pv/descriptor-tables.c | 5 ++---
xen/arch/x86/pv/emul-priv-op.c | 6 +++---
xen/arch/x86/pv/mm.c | 2 +-
xen/arch/x86/traps.c | 11 ++++++-----
xen/common/domain.c | 2 +-
xen/common/event_fifo.c | 12 ++++++------
xen/common/memory.c | 4 ++--
xen/common/tmem_xen.c | 2 +-
xen/include/asm-arm/p2m.h | 6 +++---
xen/include/asm-x86/p2m.h | 11 +++++++----
26 files changed, 95 insertions(+), 86 deletions(-)
diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index 7a0f3e9d5f..55892062bb 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -37,7 +37,7 @@ static struct page_info *translate_get_page(copy_info_t info, uint64_t addr,
return get_page_from_gva(info.gva.v, addr,
write ? GV2M_WRITE : GV2M_READ);
- page = get_page_from_gfn(info.gpa.d, paddr_to_pfn(addr), &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(info.gpa.d, gaddr_to_gfn(addr), &p2mt, P2M_ALLOC);
if ( !page )
return NULL;
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 72d0285768..88711096ef 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1268,7 +1268,7 @@ int xenmem_add_to_physmap_one(
/* Take reference to the foreign domain page.
* Reference will be released in XENMEM_remove_from_physmap */
- page = get_page_from_gfn(od, idx, &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(od, _gfn(idx), &p2mt, P2M_ALLOC);
if ( !page )
{
put_pg_owner(od);
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 8a4f753eae..4d8f153031 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -607,7 +607,7 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t *params)
struct vcpu *v;
struct vpmu_struct *vpmu;
struct page_info *page;
- uint64_t gfn = params->val;
+ gfn_t gfn = _gfn(params->val);
if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
return -EINVAL;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f6fe954313..c5cce4b38d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -797,7 +797,7 @@ int arch_set_info_guest(
unsigned long flags;
bool compat;
#ifdef CONFIG_PV
- unsigned long cr3_gfn;
+ gfn_t cr3_gfn;
struct page_info *cr3_page;
unsigned long cr4;
int rc = 0;
@@ -1061,9 +1061,9 @@ int arch_set_info_guest(
set_bit(_VPF_in_reset, &v->pause_flags);
if ( !compat )
- cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[3]);
+ cr3_gfn = _gfn(xen_cr3_to_pfn(c.nat->ctrlreg[3]));
else
- cr3_gfn = compat_cr3_to_pfn(c.cmp->ctrlreg[3]);
+ cr3_gfn = _gfn(compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
if ( !cr3_page )
@@ -1092,7 +1092,7 @@ int arch_set_info_guest(
case 0:
if ( !compat && !VM_ASSIST(d, m2p_strict) &&
!paging_mode_refcounts(d) )
- fill_ro_mpt(_mfn(cr3_gfn));
+ fill_ro_mpt(_mfn(gfn_x(cr3_gfn)));
break;
default:
if ( cr3_page == current->arch.old_guest_table )
@@ -1107,7 +1107,7 @@ int arch_set_info_guest(
v->arch.guest_table = pagetable_from_page(cr3_page);
if ( c.nat->ctrlreg[1] )
{
- cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]);
+ cr3_gfn = _gfn(xen_cr3_to_pfn(c.nat->ctrlreg[1]));
cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
if ( !cr3_page )
@@ -1132,7 +1132,7 @@ int arch_set_info_guest(
break;
case 0:
if ( VM_ASSIST(d, m2p_strict) )
- zap_ro_mpt(_mfn(cr3_gfn));
+ zap_ro_mpt(_mfn(gfn_x(cr3_gfn)));
break;
}
}
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 33f9a869c0..6b0d8075cd 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -448,7 +448,7 @@ long arch_do_domctl(
break;
}
- page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(gfn), &t, P2M_ALLOC);
if ( unlikely(!page) ||
unlikely(is_xen_heap_page(page)) )
@@ -498,11 +498,11 @@ long arch_do_domctl(
case XEN_DOMCTL_hypercall_init:
{
- unsigned long gmfn = domctl->u.hypercall_init.gmfn;
+ gfn_t gfn = _gfn(domctl->u.hypercall_init.gmfn);
struct page_info *page;
void *hypercall_page;
- page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page || !get_page_type(page, PGT_writable_page) )
{
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d6d0e8be89..3b3ad27938 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -186,7 +186,7 @@ static int modified_memory(struct domain *d,
{
struct page_info *page;
- page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
+ page = get_page_from_gfn(d, _gfn(pfn), NULL, P2M_UNSHARE);
if ( page )
{
paging_mark_pfn_dirty(d, _pfn(pfn));
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index 5d5a746a25..73d2da8441 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -297,7 +297,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
struct page_info *page = get_page_from_gfn(v->domain,
- v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
+ gaddr_to_gfn(v->arch.hvm.guest_cr[3]),
NULL, P2M_ALLOC);
if ( !page )
{
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7be9cf4454..be262e5a1d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2146,7 +2146,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned long gfn, old_value = v->arch.hvm.guest_cr[0];
+ unsigned long old_value = v->arch.hvm.guest_cr[0];
struct page_info *page;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -2201,7 +2201,8 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
if ( !paging_mode_hap(d) )
{
/* The guest CR3 must be pointing to the guest physical. */
- gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
+ gfn_t gfn = gaddr_to_gfn(v->arch.hvm.guest_cr[3]);
+
page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page )
{
@@ -2293,7 +2294,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer)
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
- page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
+ page = get_page_from_gfn(v->domain, gaddr_to_gfn(value),
NULL, P2M_ALLOC);
if ( !page )
goto bad_cr3;
@@ -3120,7 +3121,7 @@ enum hvm_translation_result hvm_translate_get_page(
&& hvm_mmio_internal(gfn_to_gaddr(gfn)) )
return HVMTRANS_bad_gfn_to_mfn;
- page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE);
+ page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
if ( !page )
return HVMTRANS_bad_gfn_to_mfn;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 5d00256aaa..a7419bd444 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -317,7 +317,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
{
if ( c->cr0 & X86_CR0_PG )
{
- page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT,
+ page = get_page_from_gfn(v->domain, gaddr_to_gfn(c->cr3),
NULL, P2M_ALLOC);
if ( !page )
{
@@ -2412,9 +2412,9 @@ nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr)
return NULL;
/* Need to translate L1-GPA to MPA */
- page = get_page_from_gfn(v->domain,
- nv->nv_vvmcxaddr >> PAGE_SHIFT,
- &p2mt, P2M_ALLOC | P2M_UNSHARE);
+ page = get_page_from_gfn(v->domain,
+ gaddr_to_gfn(nv->nv_vvmcxaddr >> PAGE_SHIFT),
+ &p2mt, P2M_ALLOC | P2M_UNSHARE);
if ( !page )
return NULL;
diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c
index 2dc86dd0f3..1d3be156db 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -332,16 +332,16 @@ static void dump_reference_tsc(const struct domain *d)
static void enable_hypercall_page(struct domain *d)
{
- unsigned long gmfn = d->arch.hvm.viridian.hypercall_gpa.fields.pfn;
- struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ gfn_t gfn = _gfn(d->arch.hvm.viridian.hypercall_gpa.fields.pfn);
+ struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
uint8_t *p;
if ( !page || !get_page_type(page, PGT_writable_page) )
{
if ( page )
put_page(page);
- gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
+ gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
return;
}
@@ -367,8 +367,8 @@ static void enable_hypercall_page(struct domain *d)
static void initialize_vp_assist(struct vcpu *v)
{
struct domain *d = v->domain;
- unsigned long gmfn = v->arch.hvm.viridian.vp_assist.msr.fields.pfn;
- struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ gfn_t gfn = _gfn(v->arch.hvm.viridian.vp_assist.msr.fields.pfn);
+ struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
void *va;
ASSERT(!v->arch.hvm.viridian.vp_assist.va);
@@ -395,8 +395,8 @@ static void initialize_vp_assist(struct vcpu *v)
return;
fail:
- gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
+ gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
}
static void teardown_vp_assist(struct vcpu *v)
@@ -465,16 +465,16 @@ void viridian_apic_assist_clear(struct vcpu *v)
static void update_reference_tsc(struct domain *d, bool_t initialize)
{
- unsigned long gmfn = d->arch.hvm.viridian.reference_tsc.fields.pfn;
- struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ gfn_t gfn = _gfn(d->arch.hvm.viridian.reference_tsc.fields.pfn);
+ struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
HV_REFERENCE_TSC_PAGE *p;
if ( !page || !get_page_type(page, PGT_writable_page) )
{
if ( page )
put_page(page);
- gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
+ gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
return;
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e065f8bbdb..2070e78358 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -674,7 +674,7 @@ static int vmx_restore_cr0_cr3(
{
if ( cr0 & X86_CR0_PG )
{
- page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT,
+ page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3),
NULL, P2M_ALLOC);
if ( !page )
{
@@ -1373,7 +1373,7 @@ static void vmx_load_pdptrs(struct vcpu *v)
if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
goto crash;
- page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
+ page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3), &p2mt, P2M_UNSHARE);
if ( !page )
{
/* Ideally you don't want to crash but rather go into a wait
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index dfd08e2d0a..2953d05a17 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -649,11 +649,11 @@ static void nvmx_update_apic_access_address(struct vcpu *v)
if ( ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )
{
p2m_type_t p2mt;
- unsigned long apic_gpfn;
+ gfn_t apic_gfn;
struct page_info *apic_pg;
- apic_gpfn = get_vvmcs(v, APIC_ACCESS_ADDR) >> PAGE_SHIFT;
- apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC);
+ apic_gfn = gaddr_to_gfn(get_vvmcs(v, APIC_ACCESS_ADDR));
+ apic_pg = get_page_from_gfn(v->domain, apic_gfn, &p2mt, P2M_ALLOC);
ASSERT(apic_pg && !p2m_is_paging(p2mt));
__vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg));
put_page(apic_pg);
@@ -670,11 +670,11 @@ static void nvmx_update_virtual_apic_address(struct vcpu *v)
if ( ctrl & CPU_BASED_TPR_SHADOW )
{
p2m_type_t p2mt;
- unsigned long vapic_gpfn;
+ gfn_t vapic_gfn;
struct page_info *vapic_pg;
- vapic_gpfn = get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT;
- vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC);
+ vapic_gfn = gaddr_to_gfn(get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR));
+ vapic_pg = get_page_from_gfn(v->domain, vapic_gfn, &p2mt, P2M_ALLOC);
ASSERT(vapic_pg && !p2m_is_paging(p2mt));
__vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg));
put_page(vapic_pg);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 9363e9bd96..e3462f8a77 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2052,7 +2052,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
p2m_query_t q = l1e_get_flags(nl1e) & _PAGE_RW ?
P2M_ALLOC | P2M_UNSHARE : P2M_ALLOC;
- page = get_page_from_gfn(pg_dom, l1e_get_pfn(nl1e), &p2mt, q);
+ page = get_page_from_gfn(pg_dom, _gfn(l1e_get_pfn(nl1e)), &p2mt, q);
if ( p2m_is_paged(p2mt) )
{
@@ -3223,7 +3223,8 @@ long do_mmuext_op(
if ( paging_mode_refcounts(pg_owner) )
break;
- page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL,
+ P2M_ALLOC);
if ( unlikely(!page) )
{
rc = -EINVAL;
@@ -3288,7 +3289,8 @@ long do_mmuext_op(
if ( paging_mode_refcounts(pg_owner) )
break;
- page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL,
+ P2M_ALLOC);
if ( unlikely(!page) )
{
gdprintk(XENLOG_WARNING,
@@ -3504,7 +3506,8 @@ long do_mmuext_op(
}
case MMUEXT_CLEAR_PAGE:
- page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt,
+ P2M_ALLOC);
if ( unlikely(p2mt != p2m_ram_rw) && page )
{
put_page(page);
@@ -3532,7 +3535,7 @@ long do_mmuext_op(
{
struct page_info *src_page, *dst_page;
- src_page = get_page_from_gfn(pg_owner, op.arg2.src_mfn, &p2mt,
+ src_page = get_page_from_gfn(pg_owner, _gfn(op.arg2.src_mfn), &p2mt,
P2M_ALLOC);
if ( unlikely(p2mt != p2m_ram_rw) && src_page )
{
@@ -3548,7 +3551,7 @@ long do_mmuext_op(
break;
}
- dst_page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt,
+ dst_page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt,
P2M_ALLOC);
if ( unlikely(p2mt != p2m_ram_rw) && dst_page )
{
@@ -3636,7 +3639,8 @@ long do_mmu_update(
{
struct mmu_update req;
void *va = NULL;
- unsigned long gpfn, gmfn, mfn;
+ unsigned long gpfn, mfn;
+ gfn_t gfn;
struct page_info *page;
unsigned int cmd, i = 0, done = 0, pt_dom;
struct vcpu *curr = current, *v = curr;
@@ -3749,8 +3753,8 @@ long do_mmu_update(
rc = -EINVAL;
req.ptr -= cmd;
- gmfn = req.ptr >> PAGE_SHIFT;
- page = get_page_from_gfn(pt_owner, gmfn, &p2mt, P2M_ALLOC);
+ gfn = gaddr_to_gfn(req.ptr);
+ page = get_page_from_gfn(pt_owner, gfn, &p2mt, P2M_ALLOC);
if ( unlikely(!page) || p2mt != p2m_ram_rw )
{
@@ -3758,7 +3762,7 @@ long do_mmu_update(
put_page(page);
if ( p2m_is_paged(p2mt) )
{
- p2m_mem_paging_populate(pt_owner, gmfn);
+ p2m_mem_paging_populate(pt_owner, gfn_x(gfn));
rc = -ENOENT;
}
else
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index a00a3c1bff..3b2aac8804 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2718,7 +2718,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
* Take a refcnt on the mfn. NB: following supported for foreign mapping:
* ram_rw | ram_logdirty | ram_ro | paging_out.
*/
- page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(fdom, _gfn(fgfn), &p2mt, P2M_ALLOC);
if ( !page ||
!p2m_is_ram(p2mt) || p2m_is_shared(p2mt) || p2m_is_hole(p2mt) )
{
diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c
index 4cc75916b8..9275ba476c 100644
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -313,15 +313,15 @@ const struct x86_emulate_ops hvm_shadow_emulator_ops = {
static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr,
struct sh_emulate_ctxt *sh_ctxt)
{
- unsigned long gfn;
+ gfn_t gfn;
struct page_info *page;
mfn_t mfn;
p2m_type_t p2mt;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
/* Translate the VA to a GFN. */
- gfn = paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec);
- if ( gfn == gfn_x(INVALID_GFN) )
+ gfn = _gfn(paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec));
+ if ( gfn_eq(gfn, INVALID_GFN) )
{
x86_emul_pagefault(pfec, vaddr, &sh_ctxt->ctxt);
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 3a3c15890b..4f3f438614 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -229,7 +229,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
ret = -EINVAL;
- page = get_page_from_gfn(current->domain, info.gmfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(current->domain, _gfn(info.gmfn),
+ NULL, P2M_ALLOC);
if ( !page )
break;
if ( !get_page_type(page, PGT_writable_page) )
diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c
index 8b2d55fc2e..7e8f41d3fd 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -112,7 +112,7 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames, unsigned int entries)
{
struct page_info *page;
- page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(frames[i]), NULL, P2M_ALLOC);
if ( !page )
goto fail;
if ( !get_page_type(page, PGT_seg_desc_page) )
@@ -209,7 +209,6 @@ int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list,
long do_update_descriptor(uint64_t pa, uint64_t desc)
{
struct domain *currd = current->domain;
- unsigned long gmfn = pa >> PAGE_SHIFT;
unsigned long mfn;
unsigned int offset;
struct desc_struct *gdt_pent, d;
@@ -220,7 +219,7 @@ long do_update_descriptor(uint64_t pa, uint64_t desc)
*(uint64_t *)&d = desc;
- page = get_page_from_gfn(currd, gmfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(currd, gaddr_to_gfn(pa), NULL, P2M_ALLOC);
if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
!page ||
!check_descriptor(currd, &d) )
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index f73ea4a163..a529ebcc3f 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -760,12 +760,12 @@ static int write_cr(unsigned int reg, unsigned long val,
case 3: /* Write CR3 */
{
struct domain *currd = curr->domain;
- unsigned long gfn;
+ gfn_t gfn;
struct page_info *page;
int rc;
- gfn = !is_pv_32bit_domain(currd)
- ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val);
+ gfn = _gfn(!is_pv_32bit_domain(currd)
+ ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val));
page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
if ( !page )
break;
diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c
index f5ea00ca4e..c9ad1152b4 100644
--- a/xen/arch/x86/pv/mm.c
+++ b/xen/arch/x86/pv/mm.c
@@ -106,7 +106,7 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
return false;
- page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
+ page = get_page_from_gfn(currd, _gfn(l1e_get_pfn(gl1e)), NULL, P2M_ALLOC);
if ( unlikely(!page) )
return false;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 9471d89022..d967e49432 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -795,7 +795,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val)
case 0: /* Write hypercall page */
{
void *hypercall_page;
- unsigned long gmfn = val >> PAGE_SHIFT;
+ gfn_t gfn = gaddr_to_gfn(val);
unsigned int page_index = val & (PAGE_SIZE - 1);
struct page_info *page;
p2m_type_t t;
@@ -808,7 +808,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val)
return X86EMUL_EXCEPTION;
}
- page = get_page_from_gfn(d, gmfn, &t, P2M_ALLOC);
+ page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
if ( !page || !get_page_type(page, PGT_writable_page) )
{
@@ -817,13 +817,14 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val)
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(d, gmfn);
+ p2m_mem_paging_populate(d, gfn_x(gfn));
return X86EMUL_RETRY;
}
gdprintk(XENLOG_WARNING,
- "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base);
+ "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn") to MSR %08x\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN),
+ base);
return X86EMUL_EXCEPTION;
}
diff --git a/xen/common/domain.c b/xen/common/domain.c
index d6650f0656..5e3c05b96c 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1250,7 +1250,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
if ( (v != current) && !(v->pause_flags & VPF_down) )
return -EINVAL;
- page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(gfn), NULL, P2M_ALLOC);
if ( !page )
return -EINVAL;
diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c
index c49f446754..71a6f673b2 100644
--- a/xen/common/event_fifo.c
+++ b/xen/common/event_fifo.c
@@ -358,7 +358,7 @@ static const struct evtchn_port_ops evtchn_port_ops_fifo =
.print_state = evtchn_fifo_print_state,
};
-static int map_guest_page(struct domain *d, uint64_t gfn, void **virt)
+static int map_guest_page(struct domain *d, gfn_t gfn, void **virt)
{
struct page_info *p;
@@ -419,7 +419,7 @@ static int setup_control_block(struct vcpu *v)
return 0;
}
-static int map_control_block(struct vcpu *v, uint64_t gfn, uint32_t offset)
+static int map_control_block(struct vcpu *v, gfn_t gfn, uint32_t offset)
{
void *virt;
unsigned int i;
@@ -505,7 +505,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
{
struct domain *d = current->domain;
uint32_t vcpu_id;
- uint64_t gfn;
+ gfn_t gfn;
uint32_t offset;
struct vcpu *v;
int rc;
@@ -513,7 +513,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
init_control->link_bits = EVTCHN_FIFO_LINK_BITS;
vcpu_id = init_control->vcpu;
- gfn = init_control->control_gfn;
+ gfn = _gfn(init_control->control_gfn);
offset = init_control->offset;
if ( vcpu_id >= d->max_vcpus || !d->vcpu[vcpu_id] )
@@ -569,7 +569,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
return rc;
}
-static int add_page_to_event_array(struct domain *d, unsigned long gfn)
+static int add_page_to_event_array(struct domain *d, gfn_t gfn)
{
void *virt;
unsigned int slot;
@@ -619,7 +619,7 @@ int evtchn_fifo_expand_array(const struct evtchn_expand_array *expand_array)
return -EOPNOTSUPP;
spin_lock(&d->event_lock);
- rc = add_page_to_event_array(d, expand_array->array_gfn);
+ rc = add_page_to_event_array(d, _gfn(expand_array->array_gfn));
spin_unlock(&d->event_lock);
return rc;
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 987395fbb3..e02733dba0 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1365,7 +1365,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
return rc;
}
- page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(xrfp.gpfn), NULL, P2M_ALLOC);
if ( page )
{
rc = guest_physmap_remove_page(d, _gfn(xrfp.gpfn),
@@ -1636,7 +1636,7 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly,
p2m_type_t p2mt;
struct page_info *page;
- page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q);
+ page = get_page_from_gfn(d, gfn, &p2mt, q);
#ifdef CONFIG_HAS_MEM_PAGING
if ( p2m_is_paging(p2mt) )
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index bf7b14f79a..72cba7f10c 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -52,7 +52,7 @@ static inline void *cli_get_page(xen_pfn_t cmfn, mfn_t *pcli_mfn,
p2m_type_t t;
struct page_info *page;
- page = get_page_from_gfn(current->domain, cmfn, &t, P2M_ALLOC);
+ page = get_page_from_gfn(current->domain, _gfn(cmfn), &t, P2M_ALLOC);
if ( !page || t != p2m_ram_rw )
{
if ( page )
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 7c67806056..5e598a0b37 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -278,7 +278,7 @@ struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn,
p2m_type_t *t);
static inline struct page_info *get_page_from_gfn(
- struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+ struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q)
{
mfn_t mfn;
p2m_type_t _t;
@@ -289,7 +289,7 @@ static inline struct page_info *get_page_from_gfn(
* not auto-translated.
*/
if ( unlikely(d != dom_xen) )
- return p2m_get_page_from_gfn(d, _gfn(gfn), t);
+ return p2m_get_page_from_gfn(d, gfn, t);
if ( !t )
t = &_t;
@@ -300,7 +300,7 @@ static inline struct page_info *get_page_from_gfn(
* DOMID_XEN see 1-1 RAM. The p2m_type is based on the type of the
* page.
*/
- mfn = _mfn(gfn);
+ mfn = _mfn(gfn_x(gfn));
page = mfn_to_page(mfn);
if ( !mfn_valid(mfn) || !get_page(page, d) )
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d08c595887..db1ec37610 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -489,18 +489,21 @@ struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
p2m_query_t q);
static inline struct page_info *get_page_from_gfn(
- struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+ struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q)
{
struct page_info *page;
+ mfn_t mfn;
if ( paging_mode_translate(d) )
- return p2m_get_page_from_gfn(p2m_get_hostp2m(d), _gfn(gfn), t, NULL, q);
+ return p2m_get_page_from_gfn(p2m_get_hostp2m(d), gfn, t, NULL, q);
/* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */
if ( t )
*t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct;
- page = mfn_to_page(_mfn(gfn));
- return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
+
+ mfn = _mfn(gfn_x(gfn));
+ page = mfn_to_page(mfn);
+ return mfn_valid(mfn) && get_page(page, d) ? page : NULL;
}
/* General conversion function from mfn to gfn */
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|