[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [IA64] Fixes for 4k page support.
# HG changeset patch # User Alex Williamson <alex.williamson@xxxxxx> # Date 1188325831 21600 # Node ID 3cd445aecf592fa1a87fbf5cf6b0511805c50e92 # Parent 9341dd05561935d1ffb22372aba501f08a579e7d [IA64] Fixes for 4k page support. Some code is dependent on PAGE_SIZE and shouldn't be changed. Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx> --- xen/arch/ia64/xen/faults.c | 29 ++++++++++++++++++++--------- xen/arch/ia64/xen/vhpt.c | 7 ++++++- 2 files changed, 26 insertions(+), 10 deletions(-) diff -r 9341dd055619 -r 3cd445aecf59 xen/arch/ia64/xen/faults.c --- a/xen/arch/ia64/xen/faults.c Tue Aug 28 12:27:39 2007 -0600 +++ b/xen/arch/ia64/xen/faults.c Tue Aug 28 12:30:31 2007 -0600 @@ -729,6 +729,17 @@ ia64_shadow_fault(unsigned long ifa, uns unsigned long pte = 0; struct vhpt_lf_entry *vlfe; + /* + * v->arch.vhpt_pg_shift shouldn't be used here. + * Currently dirty page logging bitmap is allocated based + * on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI. + * If we want to log dirty pages in finer grained when + * v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to + * revise the ABI and update this function and the related + * tool stack (live relocation). + */ + unsigned long vhpt_pg_shift = PAGE_SHIFT; + /* There are 2 jobs to do: - marking the page as dirty (the metaphysical address must be extracted to do that). @@ -744,7 +755,7 @@ ia64_shadow_fault(unsigned long ifa, uns if (vlfe->ti_tag == ia64_ttag(ifa)) { /* The VHPT entry is valid. */ gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> - v->arch.vhpt_pg_shift); + vhpt_pg_shift); BUG_ON(gpfn == INVALID_M2P_ENTRY); } else { unsigned long itir, iha; @@ -760,10 +771,10 @@ ia64_shadow_fault(unsigned long ifa, uns /* Try again! */ if (fault != IA64_NO_FAULT) { /* This will trigger a dtlb miss. */ - ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2); - return; - } - gpfn = ((pte & _PAGE_PPN_MASK) >> v->arch.vhpt_pg_shift); + ia64_ptcl(ifa, vhpt_pg_shift << 2); + return; + } + gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift); if (pte & _PAGE_D) pte |= _PAGE_VIRT_D; } @@ -791,7 +802,7 @@ ia64_shadow_fault(unsigned long ifa, uns /* Purge the TC locally. It will be reloaded from the VHPT iff the VHPT entry is still valid. */ - ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2); + ia64_ptcl(ifa, vhpt_pg_shift << 2); atomic64_inc(&d->arch.shadow_fault_count); } else { @@ -803,6 +814,6 @@ ia64_shadow_fault(unsigned long ifa, uns /* We don't know wether or not the fault must be reflected. The VHPT entry is not valid. */ /* FIXME: in metaphysical mode, we could do an ITC now. */ - ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2); - } -} + ia64_ptcl(ifa, vhpt_pg_shift << 2); + } +} diff -r 9341dd055619 -r 3cd445aecf59 xen/arch/ia64/xen/vhpt.c --- a/xen/arch/ia64/xen/vhpt.c Tue Aug 28 12:27:39 2007 -0600 +++ b/xen/arch/ia64/xen/vhpt.c Tue Aug 28 12:30:31 2007 -0600 @@ -384,7 +384,12 @@ __domain_flush_vtlb_track_entry(struct d int cpu; int vcpu; int local_purge = 1; - unsigned char ps = current->arch.vhpt_pg_shift; + + /* tlb inert tracking is done in PAGE_SIZE uint. */ + unsigned char ps = max_t(unsigned char, + current->arch.vhpt_pg_shift, PAGE_SHIFT); + /* This case isn't supported (yet). */ + BUG_ON(current->arch.vhpt_pg_shift > PAGE_SHIFT); BUG_ON((vaddr >> VRN_SHIFT) != VRN7); /* _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |