[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] VTI: updated vtlb, support_non_contiguous memory on vtidomain



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID cfe20f41f043a4c9d4166f117fd3f17adf706224
# Parent  e58ff5fd3550dc48bcaeb7b36bbb5579983e7e55
[IA64] VTI: updated vtlb, support_non_contiguous memory on vtidomain

Previously VTI-domain only supported contiguous memory,
this patch is intended to make VTI-domain support non-contiguous memory.

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>

diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/mm.c
--- a/xen/arch/ia64/vmx/mm.c    Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/mm.c    Wed Mar  1 15:29:00 2006
@@ -117,6 +117,7 @@
         copy_from_user(&req, ureqs, sizeof(req));
         cmd = req.ptr&3;
         req.ptr &= ~3;
+/*
         if(cmd ==MMU_NORMAL_PT_UPDATE){
             entry.page_flags = req.val;
             entry.locked = 1;
@@ -135,8 +136,10 @@
                 panic("Tlb conflict!!");
                 return -1;
             }
-            thash_purge_and_insert(hcb, &entry);
-        }else if(cmd == MMU_MACHPHYS_UPDATE){
+            thash_purge_and_insert(hcb, &entry, req.ptr);
+        }else
+ */
+        if(cmd == MMU_MACHPHYS_UPDATE){
             mfn = req.ptr >>PAGE_SHIFT;
             gpfn = req.val;
             set_machinetophys(mfn,gpfn);
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/vmmu.c  Wed Mar  1 15:29:00 2006
@@ -34,37 +34,23 @@
 #include <xen/irq.h>
 
 /*
- * Architecture ppn is in 4KB unit while XEN
- * page may be different(1<<PAGE_SHIFT).
- */
-static inline u64 arch_ppn_to_xen_ppn(u64 appn)
-{
-    return (appn << ARCH_PAGE_SHIFT) >> PAGE_SHIFT;
-}
-
-static inline u64 xen_ppn_to_arch_ppn(u64 xppn)
-{
-    return (xppn << PAGE_SHIFT) >> ARCH_PAGE_SHIFT;
-}
-
-
-/*
  * Get the machine page frame number in 16KB unit
  * Input:
  *  d: 
  */
-u64 get_mfn(domid_t domid, u64 gpfn, u64 pages)
-{
-    struct domain *d;
+u64 get_mfn(struct domain *d, u64 gpfn)
+{
+//    struct domain *d;
     u64    xen_gppn, xen_mppn, mpfn;
-    
+/*
     if ( domid == DOMID_SELF ) {
         d = current->domain;
     }
     else {
         d = find_domain_by_id(domid);
     }
-    xen_gppn = arch_ppn_to_xen_ppn(gpfn);
+ */
+    xen_gppn = arch_to_xen_ppn(gpfn);
     xen_mppn = gmfn_to_mfn(d, xen_gppn);
 /*
     for (i=0; i<pages; i++) {
@@ -73,8 +59,8 @@
         }
     }
 */
-    mpfn= xen_ppn_to_arch_ppn(xen_mppn);
-    mpfn = mpfn | (((1UL <<(PAGE_SHIFT-12))-1)&gpfn);
+    mpfn= xen_to_arch_ppn(xen_mppn);
+    mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
     return mpfn;
     
 }
@@ -142,66 +128,67 @@
 #endif
 }
 
-static thash_cb_t *init_domain_vhpt(struct vcpu *d)
-{
-    struct page_info *page;
-    void   *vbase,*vcur;
-    vhpt_special *vs;
+static thash_cb_t *init_domain_vhpt(struct vcpu *d, void *vbase, void *vcur)
+{
+//    struct page_info *page;
     thash_cb_t  *vhpt;
     PTA pta_value;
-    
-    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
+/*
+    page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
     if ( page == NULL ) {
         panic("No enough contiguous memory for init_domain_mm\n");
     }
     vbase = page_to_virt(page);
     printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
-    memset(vbase, 0, VCPU_TLB_SIZE);
-    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
+    memset(vbase, 0, VCPU_VHPT_SIZE);
+ */
+//    vcur = (void*)((u64)vbase + VCPU_VHPT_SIZE);
     vcur -= sizeof (thash_cb_t);
     vhpt = vcur;
     vhpt->ht = THASH_VHPT;
     vhpt->vcpu = d;
-    vhpt->hash_func = machine_thash;
-    vcur -= sizeof (vhpt_special);
-    vs = vcur;
+//    vhpt->hash_func = machine_thash;
+//    vcur -= sizeof (vhpt_special);
+//    vs = vcur;
 
     /* Setup guest pta */
     pta_value.val = 0;
     pta_value.ve = 1;
     pta_value.vf = 1;
-    pta_value.size = VCPU_TLB_SHIFT - 1;    /* 2M */
+    pta_value.size = VCPU_VHPT_SHIFT - 1;    /* 16M*/
     pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
     d->arch.arch_vmx.mpta = pta_value.val;
-   
-    vhpt->vs = vs;
-    vhpt->vs->get_mfn = get_mfn;
-    vhpt->vs->tag_func = machine_ttag;
+
+//    vhpt->vs = vs;
+//    vhpt->vs->get_mfn = __gpfn_to_mfn_foreign;
+//    vhpt->vs->tag_func = machine_ttag;
     vhpt->hash = vbase;
-    vhpt->hash_sz = VCPU_TLB_SIZE/2;
+    vhpt->hash_sz = VCPU_VHPT_SIZE/2;
     vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz);
     vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
-    vhpt->recycle_notifier = recycle_message;
-    thash_init(vhpt,VCPU_TLB_SHIFT-1);
+//    vhpt->recycle_notifier = recycle_message;
+    thash_init(vhpt,VCPU_VHPT_SHIFT-1);
     return vhpt;
 }
 
 
+
 thash_cb_t *init_domain_tlb(struct vcpu *d)
 {
     struct page_info *page;
-    void    *vbase,*vcur;
+    void    *vbase, *vhptbase, *vcur;
     tlb_special_t  *ts;
     thash_cb_t  *tlb;
     
-    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
+    page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
     if ( page == NULL ) {
         panic("No enough contiguous memory for init_domain_mm\n");
     }
-    vbase = page_to_virt(page);
-    printk("Allocate domain tlb at 0x%lx\n", (u64)vbase);
-    memset(vbase, 0, VCPU_TLB_SIZE);
-    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
+    vhptbase = page_to_virt(page);
+    memset(vhptbase, 0, VCPU_VHPT_SIZE);
+    printk("Allocate domain tlb&vhpt at 0x%lx\n", (u64)vhptbase);
+    vbase =vhptbase + VCPU_VHPT_SIZE - VCPU_VTLB_SIZE;
+    vcur = (void*)((u64)vbase + VCPU_VTLB_SIZE);
     vcur -= sizeof (thash_cb_t);
     tlb = vcur;
     tlb->ht = THASH_TLB;
@@ -209,14 +196,14 @@
     vcur -= sizeof (tlb_special_t);
     ts = vcur;
     tlb->ts = ts;
-    tlb->ts->vhpt = init_domain_vhpt(d);
-    tlb->hash_func = machine_thash;
+    tlb->ts->vhpt = init_domain_vhpt(d,vhptbase,vbase);
+//    tlb->hash_func = machine_thash;
     tlb->hash = vbase;
-    tlb->hash_sz = VCPU_TLB_SIZE/2;
-    tlb->cch_buf = (void *)((u64)vbase + tlb->hash_sz);
+    tlb->hash_sz = VCPU_VTLB_SIZE/2;
+    tlb->cch_buf = (void *)(vbase + tlb->hash_sz);
     tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
-    tlb->recycle_notifier = recycle_message;
-    thash_init(tlb,VCPU_TLB_SHIFT-1);
+//    tlb->recycle_notifier = recycle_message;
+    thash_init(tlb,VCPU_VTLB_SHIFT-1);
     return tlb;
 }
 
@@ -250,12 +237,12 @@
     u64     psr;
     thash_data_t    mtlb;
     unsigned int    cl = tlb->cl;
-    unsigned long mtlb_ppn; 
+    unsigned long mtlb_ppn;
     mtlb.ifa = tlb->vadr;
     mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
     //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
     mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
-    mtlb.ppn = (unsigned long)get_mfn(DOMID_SELF,tlb->ppn, 1);
+    mtlb.ppn = get_mfn(d->domain,tlb->ppn);
     mtlb_ppn=mtlb.ppn;
     if (mtlb_ppn == INVALID_MFN)
     panic("Machine tlb insert with invalid mfn number.\n");
@@ -289,42 +276,33 @@
 //    ia64_srlz_i();
 //    return;
 }
-
-u64 machine_thash(PTA pta, u64 va)
-{
-    u64     saved_pta;
-    u64     hash_addr;
-    unsigned long psr;
-
-    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
-    psr = ia64_clear_ic();
-    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
-    hash_addr = ia64_thash(va);
-    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
-    ia64_set_psr(psr);
-    ia64_srlz_i();
-    return hash_addr;
-}
-
-u64 machine_ttag(PTA pta, u64 va)
-{
-//    u64     saved_pta;
-//    u64     hash_addr, tag;
-//    u64     psr;
-//    struct vcpu *v = current;
-
-//    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
-//    psr = ia64_clear_ic();
-//    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
-//    tag = ia64_ttag(va);
+/*
+u64 machine_thash(u64 va)
+{
+    return ia64_thash(va);
+}
+
+u64 machine_ttag(u64 va)
+{
     return ia64_ttag(va);
-//    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
-//    ia64_set_psr(psr);
-//    ia64_srlz_i();
-//    return tag;
-}
-
-
+}
+*/
+thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
+{
+    u64 index,pfn,rid,pfn_bits;
+    pfn_bits = vpta.size-5-8;
+    pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr);
+    rid = _REGION_ID(vrr);
+    index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1));
+    *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16);
+    return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5));
+//    return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0);
+}
+
+//u64 vsa_ttag(u64 va, u64 vrr)
+//{
+//    return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0);
+//}
 
 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
 {
@@ -371,11 +349,12 @@
  *  num:  number of dword (8byts) to read.
  */
 int
-fetch_code(VCPU *vcpu, u64 gip, u64 *code)
-{
-    u64     gpip;   // guest physical IP
-    u64     mpa;
+fetch_code(VCPU *vcpu, u64 gip, u64 *code1, u64 *code2)
+{
+    u64     gpip=0;   // guest physical IP
+    u64     *vpa;
     thash_data_t    *tlb;
+    thash_cb_t *hcb;
     ia64_rr vrr;
     u64     mfn;
 
@@ -384,19 +363,26 @@
     }
     else {
         vmx_vcpu_get_rr(vcpu, gip, &vrr.rrval);
-        tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
-                vrr.rid, gip, ISIDE_TLB );
+       hcb = vmx_vcpu_get_vtlb(vcpu);
+        tlb = vtlb_lookup_ex (hcb, vrr.rid, gip, ISIDE_TLB );
         if( tlb == NULL )
-             tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
+             tlb = vtlb_lookup_ex (hcb,
                 vrr.rid, gip, DSIDE_TLB );
-        if ( tlb == NULL ) panic("No entry found in ITLB and DTLB\n");
-        gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
-    }
-    mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
-    if ( mfn == INVALID_MFN ) return 0;
- 
-    mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
-    *code = *(u64*)__va(mpa);
+        if (tlb) 
+               gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
+    }
+    if( gpip){
+        mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
+       if( mfn == INVALID_MFN )  panic("fetch_code: invalid memory\n");
+       vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT));
+    }else{
+       tlb = vhpt_lookup(gip);
+       if( tlb == NULL)
+           panic("No entry found in ITLB and DTLB\n");
+       vpa =(u64 
*)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1)));
+    }
+    *code1 = *vpa++;
+    *code2 = *vpa;
     return 1;
 }
 
@@ -420,13 +406,13 @@
     sections.tr = 1;
     sections.tc = 0;
 
-    ovl = thash_find_overlap(hcb, &data, sections);
+    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
     while (ovl) {
         // generate MCA.
         panic("Tlb conflict!!");
         return IA64_FAULT;
     }
-    thash_purge_and_insert(hcb, &data);
+    thash_purge_and_insert(hcb, &data, ifa);
     return IA64_NO_FAULT;
 }
 
@@ -447,24 +433,26 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 1;
     data.cl=DSIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
+    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
     data.rid = vrr.rid;
     sections.tr = 1;
     sections.tc = 0;
 
-    ovl = thash_find_overlap(hcb, &data, sections);
+    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
     if (ovl) {
           // generate MCA.
         panic("Tlb conflict!!");
         return IA64_FAULT;
     }
-    thash_purge_and_insert(hcb, &data);
+    thash_purge_and_insert(hcb, &data, ifa);
     return IA64_NO_FAULT;
 }
 
 /*
  * Return TRUE/FALSE for success of lock operation
  */
+
+/*
 int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
 {
 
@@ -472,12 +460,15 @@
     ia64_rr vrr;
     u64          preferred_size;
 
-    vmx_vcpu_get_rr(vcpu, va, (UINT64 *)&vrr);
+    vmx_vcpu_get_rr(vcpu, va, &vrr);
     hcb = vmx_vcpu_get_vtlb(vcpu);
     va = PAGEALIGN(va,vrr.ps);
     preferred_size = PSIZE(vrr.ps);
     return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
 }
+ */
+
+
 
 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, 
UINT64 idx)
 {
@@ -486,6 +477,7 @@
     thash_cb_t  *hcb;
     search_section_t sections;
     ia64_rr vrr;
+    u64 mfn,psr;
 
     hcb = vmx_vcpu_get_vtlb(vcpu);
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
@@ -498,7 +490,8 @@
     sections.tr = 1;
     sections.tc = 0;
 
-    ovl = thash_find_overlap(hcb, &data, sections);
+
+    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
     if (ovl) {
         // generate MCA.
         panic("Tlb conflict!!");
@@ -507,7 +500,23 @@
     sections.tr = 0;
     sections.tc = 1;
     thash_purge_entries(hcb, &data, sections);
+/*    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
+        data.contiguous=1;
+    }
+ */
     thash_tr_insert(hcb, &data, ifa, idx);
+/*
+    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
+        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
+        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
+        data.ppn = xen_to_arch_ppn(mfn);
+        psr = ia64_clear_ic();
+        ia64_itr(0x1, IA64_ITR_GUEST_KERNEL, data.vadr, data.page_flags, 
data.ps);
+        ia64_set_psr(psr);      // restore psr
+        ia64_srlz_i();
+//        return IA64_NO_FAULT;
+    }
+*/
     return IA64_NO_FAULT;
 }
 
@@ -518,7 +527,7 @@
     thash_cb_t  *hcb;
     search_section_t sections;
     ia64_rr    vrr;
-
+    u64 mfn,psr;
 
     hcb = vmx_vcpu_get_vtlb(vcpu);
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
@@ -526,12 +535,12 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 0;
     data.cl=DSIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
+    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
     data.rid = vrr.rid;
     sections.tr = 1;
     sections.tc = 0;
 
-    ovl = thash_find_overlap(hcb, &data, sections);
+    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
     while (ovl) {
         // generate MCA.
         panic("Tlb conflict!!");
@@ -540,7 +549,25 @@
     sections.tr = 0;
     sections.tc = 1;
     thash_purge_entries(hcb, &data, sections);
+/*
+    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
+        data.contiguous=1;
+    }
+ */
     thash_tr_insert(hcb, &data, ifa, idx);
+/*
+    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
+        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
+        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
+        data.ppn = xen_to_arch_ppn(mfn);
+        psr = ia64_clear_ic();
+        ia64_itr(0x2,IA64_DTR_GUEST_KERNEL , data.vadr, data.page_flags, 
data.ps);
+        ia64_set_psr(psr);      // restore psr
+        ia64_srlz_i();
+//        return IA64_NO_FAULT;
+    }
+*/
+
     return IA64_NO_FAULT;
 }
 
@@ -685,7 +712,25 @@
             *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1));
             return IA64_NO_FAULT;
         }
-    }else{
+    }
+    data = vhpt_lookup(vadr);
+    if(data){
+        if(data->p==0){
+            visr.na=1;
+            vcpu_set_isr(vcpu,visr.val);
+            page_not_present(vcpu, vadr);
+            return IA64_FAULT;
+        }else if(data->ma == VA_MATTR_NATPAGE){
+            visr.na = 1;
+            vcpu_set_isr(vcpu, visr.val);
+            dnat_page_consumption(vcpu, vadr);
+            return IA64_FAULT;
+        }else{
+            *padr = ((*(mpt_table+arch_to_xen_ppn(data->ppn)))<<PAGE_SHIFT) | 
(vadr&(PAGE_SIZE-1));
+            return IA64_NO_FAULT;
+        }
+    }
+    else{
         if(!vhpt_enabled(vcpu, vadr, NA_REF)){
             if(vpsr.ic){
                 vcpu_set_isr(vcpu, visr.val);
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S     Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/vmx_entry.S     Wed Mar  1 15:29:00 2006
@@ -34,6 +34,7 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 #include <asm/vhpt.h>
+#include <asm/vmmu.h>
 #include "vmx_minstate.h"
 
 /*
@@ -696,7 +697,7 @@
    movl r25=PAGE_KERNEL
    ;;
    or loc5 = r25,loc5          // construct PA | page properties
-   mov r23 = IA64_GRANULE_SHIFT <<2
+   mov r23 = VCPU_VHPT_SHIFT <<2
    ;;
    ptr.d   in3,r23
    ;;
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed Mar  1 15:29:00 2006
@@ -178,6 +178,8 @@
  * Lock guest page in vTLB, so that it's not relinquished by recycle
  * session when HV is servicing that hypercall.
  */
+
+/*
 void hyper_lock_page(void)
 {
 //TODO:
@@ -190,6 +192,7 @@
 
     vmx_vcpu_increment_iip(vcpu);
 }
+ */
 
 static int do_set_shared_page(VCPU *vcpu, u64 gpa)
 {
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/vmx_init.c      Wed Mar  1 15:29:00 2006
@@ -172,7 +172,15 @@
        cpuid3.number = 4;      /* 5 - 1 */
        vpd->vcpuid[3] = cpuid3.value;
 
+    vpd->vac.a_from_int_cr = 1;
+    vpd->vac.a_to_int_cr = 1;
+    vpd->vac.a_from_psr = 1;
+    vpd->vac.a_from_cpuid = 1;
+    vpd->vac.a_cover = 1;
+    vpd->vac.a_bsw = 1;
+
        vpd->vdc.d_vmsw = 1;
+
        return vpd;
 }
 
@@ -300,7 +308,7 @@
 int vmx_alloc_contig_pages(struct domain *d)
 {
        unsigned int order;
-       unsigned long i, j, start, end, pgnr, conf_nr;
+       unsigned long i, j, start,tmp, end, pgnr, conf_nr;
        struct page_info *page;
        struct vcpu *v = d->vcpu[0];
 
@@ -315,52 +323,100 @@
        }
 
        conf_nr = VMX_CONFIG_PAGES(d);
+    if((conf_nr<<PAGE_SHIFT)<(1UL<<(_PAGE_SIZE_64M+1)))
+        panic("vti domain needs 128M memory at least\n");
+/*
        order = get_order_from_pages(conf_nr);
        if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
            printk("Could not allocate order=%d pages for vmx contig alloc\n",
                        order);
            return -1;
        }
+*/
+ 
+/* reserve contiguous 64M for linux kernel */
+
+    if (unlikely((page = 
alloc_domheap_pages(d,(KERNEL_TR_PAGE_SHIFT-PAGE_SHIFT), 0)) == NULL)) {
+        printk("No enough memory for vti domain!!!\n");
+        return -1;
+    }
+    pgnr = page_to_mfn(page);
+       for 
(i=(1UL<<KERNEL_TR_PAGE_SHIFT);i<(1UL<<(KERNEL_TR_PAGE_SHIFT+1));i+=PAGE_SIZE,pgnr++){
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+    }
+
+       for (i = 0; i < (1UL<<KERNEL_TR_PAGE_SHIFT) ; i += PAGE_SIZE){
+        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+            printk("No enough memory for vti domain!!!\n");
+            return -1;
+        }
+           pgnr = page_to_mfn(page);
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+    }
 
        /* Map normal memory below 3G */
-       pgnr = page_to_mfn(page);
        end = conf_nr << PAGE_SHIFT;
-       for (i = 0;
-            i < (end < MMIO_START ? end : MMIO_START);
-            i += PAGE_SIZE, pgnr++)
+    tmp = end < MMIO_START ? end : MMIO_START;
+       for (i = (1UL<<(KERNEL_TR_PAGE_SHIFT+1)); i < tmp; i += PAGE_SIZE){
+        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+            printk("No enough memory for vti domain!!!\n");
+            return -1;
+        }
+           pgnr = page_to_mfn(page);
            assign_domain_page(d, i, pgnr << PAGE_SHIFT);
-
+    }
        /* Map normal memory beyond 4G */
        if (unlikely(end > MMIO_START)) {
            start = 4 * MEM_G;
            end = start + (end - 3 * MEM_G);
-           for (i = start; i < end; i += PAGE_SIZE, pgnr++)
-               assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+           for (i = start; i < end; i += PAGE_SIZE){
+            if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+                printk("No enough memory for vti domain!!!\n");
+                return -1;
+            }
+            pgnr = page_to_mfn(page);
+            assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+        }
        }
 
        d->arch.max_pfn = end >> PAGE_SHIFT;
-
+/*
        order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT);
        if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
            printk("Could not allocate order=%d pages for vmx contig alloc\n",
-                       order);
+                       order);`
            return -1;
        }
-
+*/
        /* Map guest firmware */
-       pgnr = page_to_mfn(page);
-       for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
+       for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++){
+        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+            printk("No enough memory for vti domain!!!\n");
+            return -1;
+        }
+           pgnr = page_to_mfn(page);
            assign_domain_page(d, i, pgnr << PAGE_SHIFT);
-
+    }
+
+/*
        if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
            printk("Could not allocate order=1 pages for vmx contig alloc\n");
            return -1;
        }
-
+*/
        /* Map for shared I/O page and xenstore */
+    if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+        printk("No enough memory for vti domain!!!\n");
+        return -1;
+    }
        pgnr = page_to_mfn(page);
        assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
-       pgnr++;
+
+    if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+        printk("No enough memory for vti domain!!!\n");
+        return -1;
+    }
+       pgnr = page_to_mfn(page);
        assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
 
        set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Wed Mar  1 15:29:00 2006
@@ -269,6 +269,10 @@
 (p7)br.sptk vmx_fault_3
 vmx_alt_itlb_miss_1:
        mov r16=cr.ifa          // get address that caused the TLB miss
+    ;;
+    tbit.z p6,p7=r16,63
+(p6)br.sptk vmx_fault_3
+    ;;
        movl r17=PAGE_KERNEL
        mov r24=cr.ipsr
        movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
@@ -300,6 +304,10 @@
 (p7)br.sptk vmx_fault_4
 vmx_alt_dtlb_miss_1:
        mov r16=cr.ifa          // get address that caused the TLB miss
+    ;;
+    tbit.z p6,p7=r16,63
+(p6)br.sptk vmx_fault_4
+    ;;
        movl r17=PAGE_KERNEL
        mov r20=cr.isr
        movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
@@ -397,7 +405,7 @@
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
 ENTRY(vmx_interrupt)
-    VMX_DBG_FAULT(12)
+//    VMX_DBG_FAULT(12)
        mov r31=pr              // prepare to save predicates
     mov r19=12
     mov r29=cr.ipsr
@@ -734,7 +742,7 @@
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
 ENTRY(vmx_virtualization_fault)
-    VMX_DBG_FAULT(37)
+//    VMX_DBG_FAULT(37)
        mov r31=pr
     mov r19=37
     adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
@@ -1138,5 +1146,5 @@
     data8 hyper_not_support     //hyper_boot_vcpu
     data8 hyper_not_support     //hyper_ni_hypercall       /* 25 */
     data8 hyper_not_support     //hyper_mmuext_op
-    data8 hyper_lock_page
+    data8 hyper_not_support     //tata8 hyper_lock_page
     data8 hyper_set_shared_page
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Wed Mar  1 15:29:00 2006
@@ -27,7 +27,7 @@
 #include <asm/vmx_phy_mode.h>
 #include <xen/sched.h>
 #include <asm/pgtable.h>
-
+#include <asm/vmmu.h>
 int valid_mm_mode[8] = {
     GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
     INV_MODE,
@@ -94,7 +94,7 @@
      *  (1,1,1)->(1,0,0)
      */
 
-    {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF}
+    {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
 };
 
 void
@@ -104,9 +104,8 @@
     vcpu->arch.mode_flags = GUEST_IN_PHY;
 }
 
-extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
+extern u64 get_mfn(struct domain *d, u64 gpfn);
 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
-
 void
 physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
 {
@@ -115,7 +114,7 @@
     u64 mppn,gppn;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     gppn=(vadr<<1)>>13;
-    mppn = get_mfn(DOMID_SELF,gppn,1);
+    mppn = get_mfn(vcpu->domain,gppn);
     mppn=(mppn<<12)|(vpsr.cpl<<7); 
 //    if(vadr>>63)
 //       mppn |= PHY_PAGE_UC;
@@ -147,7 +146,7 @@
 //        panic("dom n physical dtlb miss happen\n");
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     gppn=(vadr<<1)>>13;
-    mppn = get_mfn(DOMID_SELF,gppn,1);
+    mppn = get_mfn(vcpu->domain, gppn);
     mppn=(mppn<<12)|(vpsr.cpl<<7);
     if(vadr>>63)
         mppn |= PHY_PAGE_UC;
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/vmx_process.c   Wed Mar  1 15:29:00 2006
@@ -47,6 +47,7 @@
 #include <asm/vmx_vcpu.h>
 #include <asm/kregs.h>
 #include <asm/vmx.h>
+#include <asm/vmmu.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/vmx_phy_mode.h>
 #include <xen/mm.h>
@@ -314,6 +315,10 @@
         return;
     }
 */
+    if(vadr == 0x1ea18c00 ){
+        ia64_clear_ic();
+        while(1);
+    }
     if(is_physical_mode(v)&&(!(vadr<<1>>62))){
         if(vec==1){
             physical_itlb_miss(v, vadr);
@@ -342,12 +347,18 @@
             return IA64_FAULT;
         }
 
-       if ( data->ps != vrr.ps ) {
+//     if ( data->ps != vrr.ps ) {
+//             machine_tlb_insert(v, data);
+//     }
+//     else {
+/*        if ( data->contiguous&&(!data->tc)){
                machine_tlb_insert(v, data);
-       }
-       else {
-               thash_insert(vtlb->ts->vhpt,data,vadr);
-           }
+        }
+        else{
+ */
+            thash_vhpt_insert(vtlb->ts->vhpt,data,vadr);
+//        }
+//         }
     }else if(type == DSIDE_TLB){
         if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
             if(vpsr.ic){
@@ -367,8 +378,7 @@
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
             vrr=vmx_vcpu_rr(v,vhpt_adr);
-            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
-            if(data){
+            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup_ex(vtlb, vrr.rid, 
vhpt_adr, DSIDE_TLB)){
                 if(vpsr.ic){
                     vcpu_set_isr(v, misr.val);
                     dtlb_fault(v, vadr);
@@ -411,8 +421,7 @@
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
             vrr=vmx_vcpu_rr(v,vhpt_adr);
-            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
-            if(data){
+            if(vhpt_lookup(vhpt_adr) || vtlb_lookup_ex(vtlb, vrr.rid, 
vhpt_adr, DSIDE_TLB)){
                 if(!vpsr.ic){
                     misr.ni=1;
                 }
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Wed Mar  1 15:29:00 2006
@@ -1300,9 +1300,7 @@
 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
 {
        IA64_BUNDLE bundle;
-
-       fetch_code( current,iip, &bundle.i64[0]);
-       fetch_code( current,iip+8, &bundle.i64[1]);
+       fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]);
        return bundle;
 }
 
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/vmx/vtlb.c  Wed Mar  1 15:29:00 2006
@@ -28,8 +28,10 @@
 #include <asm/gcc_intrin.h>
 #include <linux/interrupt.h>
 #include <asm/vmx_vcpu.h>
+#include <asm/vmmu.h>
 #define  MAX_CCH_LENGTH     40
 
+thash_data_t *__alloc_chain(thash_cb_t *, thash_data_t *);
 
 static void cch_mem_init(thash_cb_t *hcb)
 {
@@ -50,8 +52,10 @@
 
     if ( (p = hcb->cch_freelist) != NULL ) {
         hcb->cch_freelist = p->next;
-    }
-    return &(p->data);
+        return p;
+    }else{
+        return NULL;
+    }
 }
 
 static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
@@ -65,36 +69,38 @@
 /*
  * Check to see if the address rid:va is translated by the TLB
  */
-static int __is_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE 
cl)
-{
-    u64  size1,sa1,ea1;
-    if ( tlb->rid != rid ||(!tlb->tc && tlb->cl != cl) )
-        return 0;
-    size1 = PSIZE(tlb->ps);
-    sa1 = tlb->vadr & ~(size1-1);   // mask the low address bits
-    ea1 = sa1 + size1;
-
-    if ( va >= sa1 && (va < ea1 || ea1 == 0) )
+
+static int __is_tr_translated(thash_data_t *tlb, u64 rid, u64 va, 
CACHE_LINE_TYPE cl)
+{
+    u64  size;
+    size = PSIZE(tlb->ps);
+    if(tlb->vadr&(size-1))
+        while(1);
+    if ((tlb->rid == rid) && ((va-tlb->vadr)<size))
         return 1;
     else
         return 0;
 }
 
 /*
- * Only for TLB format.
+ * Only for GUEST TR format.
  */
 static int
-__is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 
sva, u64 eva)
-{
-    uint64_t size1,sa1,ea1;
-
-    if ( entry->invalid || entry->rid != rid || (!entry->tc && entry->cl != cl 
) ) {
+__is_tr_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, 
u64 eva)
+{
+    uint64_t size, sa1, ea1;
+
+//    if ( entry->invalid || entry->rid != rid || (entry->cl != cl ) ) {
+    if ( entry->invalid || entry->rid != rid ) {
         return 0;
     }
-    size1=PSIZE(entry->ps);
-    sa1 = entry->vadr & ~(size1-1); // mask the low address bits
-    ea1 = sa1 + size1;
-    if ( (sva >= ea1 && ea1 != 0) || (eva <= sa1 && eva != 0) ) 
+    size = PSIZE(entry->ps);
+    sa1 = entry->vadr;
+    ea1 = sa1 + size -1;
+    eva -= 1;
+    if(sa1&(size-1))
+        while(1);
+    if ( (sva>ea1) || (sa1>eva) )
         return 0;
     else
         return 1;
@@ -103,9 +109,11 @@
 
 static void __rem_tr (thash_cb_t *hcb, thash_data_t *tr)
 {
+/*
     if ( hcb->remove_notifier ) {
         (hcb->remove_notifier)(hcb,tr);
     }
+*/
     tr->invalid = 1;
 }
 
@@ -142,7 +150,7 @@
     else {
         tr = &DTR(hcb,idx);
     }
-    if ( !INVALID_TLB(tr) ) {
+    if ( !INVALID_TR(tr) ) {
         __rem_tr(hcb, tr);
     }
     __set_tr (tr, insert, idx);
@@ -151,6 +159,7 @@
 /*
  * remove TR entry.
  */
+/*
 static void rem_tr(thash_cb_t *hcb,CACHE_LINE_TYPE cl, int idx)
 {
     thash_data_t *tr;
@@ -161,17 +170,18 @@
     else {
         tr = &DTR(hcb,idx);
     }
-    if ( !INVALID_TLB(tr) ) {
+    if ( !INVALID_TR(tr) ) {
         __rem_tr(hcb, tr);
     }
 }
-
+ */
 /*
  * Delete an thash entry in collision chain.
  *  prev: the previous entry.
  *  rem: the removed entry.
  */
-static void __rem_chain(thash_cb_t *hcb/*, thash_data_t *prev*/, thash_data_t 
*rem)
+/*
+static void __rem_chain(thash_cb_t *hcb, thash_data_t *prev, thash_data_t *rem)
 {
     //prev->next = rem->next;
     if ( hcb->remove_notifier ) {
@@ -179,6 +189,7 @@
     }
     cch_free (hcb, rem);
 }
+ */
 
 /*
  * Delete an thash entry leading collision chain.
@@ -187,15 +198,16 @@
 {
     thash_data_t *next=hash->next;
 
-    if ( hcb->remove_notifier ) {
+/*    if ( hcb->remove_notifier ) {
         (hcb->remove_notifier)(hcb,hash);
-    }
+    } */
     if ( next != NULL ) {
+        next->len=hash->len-1;
         *hash = *next;
         cch_free (hcb, next);
     }
     else {
-        INVALIDATE_HASH(hcb, hash);
+        INVALIDATE_HASH_HEADER(hcb, hash);
     }
 }
 
@@ -215,8 +227,8 @@
         num = NDTRS;
     }
     for ( i=0; i<num; i++ ) {
-        if ( !INVALID_ENTRY(hcb,&tr[i]) &&
-            __is_translated(&tr[i], rid, va, cl) )
+        if ( !INVALID_TR(&tr[i]) &&
+            __is_tr_translated(&tr[i], rid, va, cl) )
             return &tr[i];
     }
     return NULL;
@@ -227,6 +239,7 @@
  * Find overlap VHPT entry within current collision chain
  * base on internal priv info.
  */
+/*
 static inline thash_data_t* _vhpt_next_overlap_in_chain(thash_cb_t *hcb)
 {
     thash_data_t    *cch;
@@ -240,26 +253,27 @@
     }
     return NULL;
 }
-
+*/
 /*
  * Find overlap TLB/VHPT entry within current collision chain
  * base on internal priv info.
  */
+/*
 static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
 {
     thash_data_t    *cch;
     thash_internal_t *priv = &hcb->priv;
 
-    /* Find overlap TLB entry */
+    // Find overlap TLB entry
     for (cch=priv->cur_cch; cch; cch = cch->next) {
         if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr )  &&
-            __is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
-                priv->_curva, priv->_eva) ) {
+            __is_translated( cch, priv->rid, priv->_curva, priv->cl)) {
             return cch;
         }
     }
     return NULL;
 }
+ */
 
 /*
  * Get the machine format of VHPT entry.
@@ -281,26 +295,190 @@
             thash_data_t *tlb, u64 va,
             thash_data_t *vhpt)
 {
-    u64 pages,mfn;
-    ia64_rr vrr;
-
+    u64 pages,mfn,padr,pte;
+//    ia64_rr vrr;
     ASSERT ( hcb->ht == THASH_VHPT );
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
-    pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
-    mfn = (unsigned long)(hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages);
-    if ( mfn == INVALID_MFN ) return 0;
-
+//    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
+    padr = tlb->ppn >>(tlb->ps-ARCH_PAGE_SHIFT)<<tlb->ps;
+    padr += va&((1UL<<tlb->ps)-1);
+    pte=lookup_domain_mpa(current->domain,padr);
+    if((pte>>56))
+        return 0;
     // TODO with machine discontinuous address space issue.
-    vhpt->etag =(unsigned long) (hcb->vs->tag_func)( hcb->pta, tlb->vadr);
+    vhpt->etag = ia64_ttag(va);
     //vhpt->ti = 0;
     vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
     vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
-    vhpt->ppn = mfn;
+    vhpt->ps = PAGE_SHIFT;
+    vhpt->ppn = 
(pte&((1UL<<IA64_MAX_PHYS_BITS)-(1UL<<PAGE_SHIFT)))>>ARCH_PAGE_SHIFT;
     vhpt->next = 0;
     return 1;
 }
 
-
+static void thash_remove_cch(thash_cb_t *hcb, thash_data_t *hash)
+{
+    thash_data_t *prev, *next;
+    prev = hash; next= hash->next;
+    while(next){
+       prev=next;
+       next=prev->next;
+       cch_free(hcb, prev);
+    }
+    hash->next = NULL;
+    hash->len = 0;
+}
+
+/*  vhpt only has entries with PAGE_SIZE page size */
+
+void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
+{
+    thash_data_t   vhpt_entry, *hash_table, *cch;
+//    ia64_rr vrr;
+
+    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
+        return;
+    //panic("Can't convert to machine VHPT entry\n");
+    }
+
+    hash_table = ia64_thash(va);
+    if( INVALID_VHPT(hash_table) ) {
+        *hash_table = vhpt_entry;
+        hash_table->next = 0;
+       return;
+    }
+
+    cch = hash_table;
+    while(cch){
+        if(cch->etag == vhpt_entry.etag){
+            if(cch->ppn == vhpt_entry.ppn)
+                return;
+            else
+                while(1);
+        }
+        cch = cch->next;
+    }
+    if(hash_table->len>=MAX_CCN_DEPTH){
+       thash_remove_cch(hcb, hash_table);
+       cch = cch_alloc(hcb);
+       *cch = *hash_table;
+        *hash_table = vhpt_entry;
+       hash_table->len = 1;
+        hash_table->next = cch;
+       return;
+    }
+       
+    // TODO: Add collision chain length limitation.
+     cch = __alloc_chain(hcb,entry);
+     if(cch == NULL){
+           *hash_table = vhpt_entry;
+            hash_table->next = 0;
+     }else{
+            *cch = *hash_table;
+            *hash_table = vhpt_entry;
+            hash_table->next = cch;
+           hash_table->len = cch->len + 1;
+           cch->len = 0;       
+//            if(hash_table->tag==hash_table->next->tag)
+//                while(1);
+
+    }
+    return /*hash_table*/;
+}
+
+/*
+ *   vhpt lookup
+ */
+
+thash_data_t * vhpt_lookup(u64 va)
+{
+    thash_data_t *hash;
+    u64 tag;
+    hash = ia64_thash(va);
+    tag = ia64_ttag(va);
+    while(hash){
+       if(hash->etag == tag)
+               return hash;
+        hash=hash->next;
+    }
+    return NULL;
+}
+
+
+/*
+ *  purge software guest tlb
+ */
+
+static void vtlb_purge(thash_cb_t *hcb, u64 va, u64 ps)
+{
+    thash_data_t *hash_table, *prev, *next;
+    u64 start, end, size, tag, rid;
+    ia64_rr vrr;
+    vrr=vmx_vcpu_rr(current, va);
+    rid = vrr.rid;
+    size = PSIZE(ps);
+    start = va & (-size);
+    end = start + size;
+    while(start < end){
+        hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
+//         tag = ia64_ttag(start);
+        if(!INVALID_TLB(hash_table)){
+       if(hash_table->etag == tag){
+            __rem_hash_head(hcb, hash_table);
+       }
+           else{
+           prev=hash_table;
+               next=prev->next;
+               while(next){
+                       if(next->etag == tag){
+                           prev->next=next->next;
+                           cch_free(hcb,next);
+                           hash_table->len--;
+                           break;
+                       }
+                       prev=next;
+                   next=next->next;
+           }
+       }
+        }
+           start += PAGE_SIZE;
+    }
+//    machine_tlb_purge(va, ps);
+}
+/*
+ *  purge VHPT and machine TLB
+ */
+
+static void vhpt_purge(thash_cb_t *hcb, u64 va, u64 ps)
+{
+    thash_data_t *hash_table, *prev, *next;
+    u64 start, end, size, tag;
+    size = PSIZE(ps);
+    start = va & (-size);
+    end = start + size;
+    while(start < end){
+       hash_table = ia64_thash(start);
+           tag = ia64_ttag(start);
+       if(hash_table->etag == tag ){
+            __rem_hash_head(hcb, hash_table);
+       }
+           else{
+           prev=hash_table;
+               next=prev->next;
+               while(next){
+                       if(next->etag == tag){
+                           prev->next=next->next;
+                           cch_free(hcb,next);
+                           hash_table->len--;
+                           break;
+                       }
+                       prev=next;
+                   next=next->next;
+           }
+       }
+           start += PAGE_SIZE;
+    }
+    machine_tlb_purge(va, ps);
+}
 /*
  * Insert an entry to hash table. 
  *    NOTES:
@@ -327,43 +505,62 @@
     entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
     entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
     rep_tr(hcb, entry, idx);
+//    thash_vhpt_insert(hcb->ts->vhpt, entry, va);
     return ;
 }
+
+
+/*
+ * Recycle all collisions chain in VTLB or VHPT.
+ *
+ */
+
+void thash_recycle_cch(thash_cb_t *hcb)
+{
+    thash_data_t    *hash_table;
+
+    hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
+    for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
+        thash_remove_cch(hcb,hash_table);
+    }
+}
+/*
 thash_data_t *vtlb_alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
 {
     thash_data_t *cch;
-    
+
     cch = cch_alloc(hcb);
     if(cch == NULL){
-        thash_purge_all(hcb);
+        thash_recycle_cch(hcb);
+        cch = cch_alloc(hcb);
     }
     return cch;
 }
- 
+*/
 
 thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
 {
     thash_data_t *cch;
-    
+
     cch = cch_alloc(hcb);
     if(cch == NULL){
         // recycle
-        if ( hcb->recycle_notifier ) {
-                hcb->recycle_notifier(hcb,(u64)entry);
-        }
-        thash_purge_all(hcb);
-//        cch = cch_alloc(hcb);
+//        if ( hcb->recycle_notifier ) {
+//                hcb->recycle_notifier(hcb,(u64)entry);
+//        }
+        thash_recycle_cch(hcb);
+        cch = cch_alloc(hcb);
     }
     return cch;
 }
- 
+
 /*
  * Insert an entry into hash TLB or VHPT.
  * NOTES:
  *  1: When inserting VHPT to thash, "va" is a must covered
  *  address by the inserted machine VHPT entry.
  *  2: The format of entry is always in TLB.
- *  3: The caller need to make sure the new entry will not overlap 
+ *  3: The caller need to make sure the new entry will not overlap
  *     with any existed entry.
  */
 void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
@@ -372,16 +569,32 @@
     int flag;
     ia64_rr vrr;
     u64 gppn;
-    u64 ppns, ppne;
-
-    hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va);
-    if( INVALID_ENTRY(hcb, hash_table) ) {
+    u64 ppns, ppne, tag;
+    vrr=vmx_vcpu_rr(current, va);
+    if (vrr.ps != entry->ps) {
+//        machine_tlb_insert(hcb->vcpu, entry);
+       panic("not preferred ps with va: 0x%lx\n", va);
+       return;
+    }
+    entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
+    entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
+    hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
+    entry->etag = tag;
+    if( INVALID_TLB(hash_table) ) {
         *hash_table = *entry;
         hash_table->next = 0;
     }
+    else if (hash_table->len>=MAX_CCN_DEPTH){
+        thash_remove_cch(hcb, hash_table);
+        cch = cch_alloc(hcb);
+        *cch = *hash_table;
+        *hash_table = *entry;
+        hash_table->len = 1;
+        hash_table->next = cch;
+    }
     else {
         // TODO: Add collision chain length limitation.
-        cch = vtlb_alloc_chain(hcb,entry);
+        cch = __alloc_chain(hcb,entry);
         if(cch == NULL){
             *hash_table = *entry;
             hash_table->next = 0;
@@ -389,22 +602,17 @@
             *cch = *hash_table;
             *hash_table = *entry;
             hash_table->next = cch;
-        }
-    }
+            hash_table->len = cch->len + 1;
+            cch->len = 0;
+        }
+    }
+#if 0
     if(hcb->vcpu->domain->domain_id==0){
        thash_insert(hcb->ts->vhpt, entry, va);
         return;
     }
-
-#if 1
-    vrr=vmx_vcpu_rr(current, va);
-    if (vrr.ps != entry->ps) {
-        machine_tlb_insert(hcb->vcpu, entry);
-       printk("not preferred ps with va: 0x%lx\n", va);
-       return;
-    }
-#endif 
-
+#endif
+/*
     flag = 1;
     gppn = 
(POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
     ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
@@ -413,46 +621,18 @@
         flag = 0;
     if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
        thash_insert(hcb->ts->vhpt, entry, va);
+*/
     return ;
 }
 
-static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
-{
-    thash_data_t   vhpt_entry, *hash_table, *cch;
-
-    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
-        panic("Can't convert to machine VHPT entry\n");
-    }
-    hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va);
-    if( INVALID_ENTRY(hcb, hash_table) ) {
-        *hash_table = vhpt_entry;
-        hash_table->next = 0;
-    }
-    else {
-        // TODO: Add collision chain length limitation.
-        cch = __alloc_chain(hcb,entry);
-        if(cch == NULL){
-            *hash_table = vhpt_entry;
-            hash_table->next = 0;
-        }else{
-            *cch = *hash_table;
-            *hash_table = vhpt_entry;
-            hash_table->next = cch;
-            if(hash_table->tag==hash_table->next->tag)
-                while(1);
-
-        }
-
-    }
-    return /*hash_table*/;
-}
-
+
+/*
 void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
-    //thash_data_t    *hash_table;
+    thash_data_t    *hash_table;
     ia64_rr vrr;
     
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
+    vrr = vmx_vcpu_rr(hcb->vcpu,entry->vadr);
     if ( entry->ps != vrr.ps && entry->tc ) {
         panic("Not support for multiple page size now\n");
     }
@@ -461,11 +641,13 @@
     (hcb->ins_hash)(hcb, entry, va);
     
 }
-
+*/
+/*
 static void rem_thash(thash_cb_t *hcb, thash_data_t *entry)
 {
     thash_data_t    *hash_table, *p, *q;
     thash_internal_t *priv = &hcb->priv;
+    int idx;
 
     hash_table = priv->hash_base;
     if ( hash_table == entry ) {
@@ -481,6 +663,7 @@
 //            if ( PURGABLE_ENTRY(hcb,q ) ) {
                 p->next = q->next;
                 __rem_chain(hcb, entry);
+                hash_table->len--;
 //            }
             return ;
         }
@@ -488,16 +671,20 @@
     }
     panic("Entry not existed or bad sequence\n");
 }
-
+*/
+/*
 static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
 {
+    thash_data_t    *hash_table, *p, *q;
+    thash_internal_t *priv = &hcb->priv;
+    int idx;
     
     if ( !entry->tc ) {
         return rem_tr(hcb, entry->cl, entry->tr_idx);
     }
     rem_thash(hcb, entry);
 }    
-
+*/
 int   cch_depth=0;
 /*
  * Purge the collision chain starting from cch.
@@ -505,6 +692,7 @@
  *     For those UN-Purgable entries(FM), this function will return
  * the head of left collision chain.
  */
+/*
 static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
 {
     thash_data_t *next;
@@ -536,10 +724,11 @@
  *  hash: The head of collision chain (hash table)
  *
  */
+/*
 static void thash_rem_line(thash_cb_t *hcb, thash_data_t *hash)
 {
     if ( INVALID_ENTRY(hcb, hash) ) return;
-    
+
     if ( hash->next ) {
         cch_depth = 0;
         hash->next = thash_rem_cch(hcb, hash->next);
@@ -549,6 +738,7 @@
         __rem_hash_head(hcb, hash);
     }
 }
+ */
 
 /*
  * Find an overlap entry in hash table and its collision chain.
@@ -563,26 +753,31 @@
  *    NOTES:
  *
  */
-thash_data_t *thash_find_overlap(thash_cb_t *hcb, 
+
+/*
+thash_data_t *thash_find_overlap(thash_cb_t *hcb,
             thash_data_t *in, search_section_t s_sect)
 {
-    return (hcb->find_overlap)(hcb, in->vadr, 
+    return (hcb->find_overlap)(hcb, in->vadr,
             PSIZE(in->ps), in->rid, in->cl, s_sect);
 }
-
-static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb, 
+*/
+
+/*
+static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
         u64 va, u64 size, int rid, char cl, search_section_t s_sect)
 {
     thash_data_t    *hash_table;
     thash_internal_t *priv = &hcb->priv;
+    u64     tag;
     ia64_rr vrr;
 
     priv->_curva = va & ~(size-1);
     priv->_eva = priv->_curva + size;
     priv->rid = rid;
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
+    vrr = vmx_vcpu_rr(hcb->vcpu,va);
     priv->ps = vrr.ps;
-    hash_table =(thash_data_t *)(hcb->hash_func)(hcb->pta, priv->_curva);
+    hash_table = vsa_thash(hcb->pta, priv->_curva, vrr.rrval, &tag);
     priv->s_sect = s_sect;
     priv->cl = cl;
     priv->_tr_idx = 0;
@@ -590,8 +785,10 @@
     priv->cur_cch = hash_table;
     return (hcb->next_overlap)(hcb);
 }
-
-static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb, 
+*/
+
+/*
+static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
         u64 va, u64 size, int rid, char cl, search_section_t s_sect)
 {
     thash_data_t    *hash_table;
@@ -602,17 +799,43 @@
     priv->_curva = va & ~(size-1);
     priv->_eva = priv->_curva + size;
     priv->rid = rid;
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
+    vrr = vmx_vcpu_rr(hcb->vcpu,va);
     priv->ps = vrr.ps;
-    hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva);
-    tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, priv->_curva);
+    hash_table = ia64_thash(priv->_curva);
+    tag = ia64_ttag(priv->_curva);
     priv->tag = tag;
     priv->hash_base = hash_table;
     priv->cur_cch = hash_table;
     return (hcb->next_overlap)(hcb);
 }
-
-
+*/
+
+
+thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, char cl)
+{
+    thash_data_t    *tr;
+    int  i,num;
+    u64 end;
+
+    if (cl == ISIDE_TLB ) {
+        num = NITRS;
+        tr = &ITR(hcb,0);
+    }
+    else {
+        num = NDTRS;
+        tr = &DTR(hcb,0);
+    }
+    end=data->vadr + PSIZE(data->ps);
+    for (i=0; i<num; i++ ) {
+        if ( __is_tr_overlap(hcb, &tr[i], data->rid, cl, data->vadr, end )) {
+            return &tr[i];
+        }
+    }
+    return NULL;
+}
+
+
+/*
 static thash_data_t *vtr_find_next_overlap(thash_cb_t *hcb)
 {
     thash_data_t    *tr;
@@ -628,25 +851,27 @@
         tr = &DTR(hcb,0);
     }
     for (; priv->_tr_idx < num; priv->_tr_idx ++ ) {
-        if ( __is_tlb_overlap(hcb, &tr[(unsigned)priv->_tr_idx],
+        if ( __is_tr_overlap(hcb, &tr[priv->_tr_idx],
                 priv->rid, priv->cl,
                 priv->_curva, priv->_eva) ) {
-            return &tr[(unsigned)priv->_tr_idx++];
+            return &tr[priv->_tr_idx++];
         }
     }
     return NULL;
 }
+*/
 
 /*
  * Similar with vtlb_next_overlap but find next entry.
  *    NOTES:
  *  Intermediate position information is stored in hcb->priv.
  */
+/*
 static thash_data_t *vtlb_next_overlap(thash_cb_t *hcb)
 {
     thash_data_t    *ovl;
     thash_internal_t *priv = &hcb->priv;
-    u64 rr_psize;
+    u64 addr,rr_psize,tag;
     ia64_rr vrr;
 
     if ( priv->s_sect.tr ) {
@@ -655,7 +880,7 @@
         priv->s_sect.tr = 0;
     }
     if ( priv->s_sect.v == 0 ) return NULL;
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
+    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
     rr_psize = PSIZE(vrr.ps);
 
     while ( priv->_curva < priv->_eva ) {
@@ -667,20 +892,23 @@
             }
         }
         priv->_curva += rr_psize;
-        priv->hash_base = (thash_data_t *)(hcb->hash_func)( hcb->pta, 
priv->_curva);
+        priv->hash_base = vsa_thash( hcb->pta, priv->_curva, vrr.rrval, &tag);
         priv->cur_cch = priv->hash_base;
     }
     return NULL;
 }
-
+ */
+
+
+/*
 static thash_data_t *vhpt_next_overlap(thash_cb_t *hcb)
 {
     thash_data_t    *ovl;
     thash_internal_t *priv = &hcb->priv;
-    u64 rr_psize;
+    u64 addr,rr_psize;
     ia64_rr vrr;
 
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
+    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
     rr_psize = PSIZE(vrr.ps);
 
     while ( priv->_curva < priv->_eva ) {
@@ -692,13 +920,13 @@
             }
         }
         priv->_curva += rr_psize;
-        priv->hash_base =(thash_data_t *)(hcb->hash_func)( hcb->pta, 
priv->_curva);
-        priv->tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, 
priv->_curva);
+        priv->hash_base = ia64_thash(priv->_curva);
+        priv->tag = ia64_ttag(priv->_curva);
         priv->cur_cch = priv->hash_base;
     }
     return NULL;
 }
-
+*/
 
 /*
  * Find and purge overlap entries in hash table and its collision chain.
@@ -710,7 +938,7 @@
  *    NOTES:
  *
  */
-void thash_purge_entries(thash_cb_t *hcb, 
+void thash_purge_entries(thash_cb_t *hcb,
             thash_data_t *in, search_section_t p_sect)
 {
     return thash_purge_entries_ex(hcb, in->rid, in->vadr,
@@ -718,30 +946,33 @@
 }
 
 void thash_purge_entries_ex(thash_cb_t *hcb,
-            u64 rid, u64 va, u64 ps, 
-            search_section_t p_sect, 
+            u64 rid, u64 va, u64 ps,
+            search_section_t p_sect,
             CACHE_LINE_TYPE cl)
 {
     thash_data_t    *ovl;
 
-    ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
+/*    ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
     while ( ovl != NULL ) {
         (hcb->rem_hash)(hcb, ovl);
         ovl = (hcb->next_overlap)(hcb);
     };
+ */
+    vtlb_purge(hcb, va, ps);
+    vhpt_purge(hcb->ts->vhpt, va, ps);
 }
 
 /*
  * Purge overlap TCs and then insert the new entry to emulate itc ops.
  *    Notes: Only TC entry can purge and insert.
  */
-void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
+void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va)
 {
     thash_data_t    *ovl;
     search_section_t sections;
 
 #ifdef   XEN_DEBUGGER
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
+    vrr = vmx_vcpu_rr(hcb->vcpu,in->vadr);
        if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
                panic ("Oops, wrong call for purge_and_insert\n");
                return;
@@ -751,10 +982,14 @@
     in->ppn = PAGEALIGN(in->ppn, in->ps-12);
     sections.tr = 0;
     sections.tc = 1;
+/*
     ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
                                 in->rid, in->cl, sections);
     if(ovl)
         (hcb->rem_hash)(hcb, ovl);
+ */
+    vtlb_purge(hcb, va, in->ps);
+    vhpt_purge(hcb->ts->vhpt, va, in->ps);
 #ifdef   XEN_DEBUGGER
     ovl = (hcb->next_overlap)(hcb);
     if ( ovl ) {
@@ -762,7 +997,9 @@
                return;
     }
 #endif
-    (hcb->ins_hash)(hcb, in, in->vadr);
+    if(in->ps!=PAGE_SHIFT)
+        vtlb_insert(hcb, in, va);
+    thash_vhpt_insert(hcb->ts->vhpt, in, va);
 }
 /*
  * Purge one hash line (include the entry in hash table).
@@ -771,6 +1008,7 @@
  *  hash: The head of collision chain (hash table)
  *
  */
+/*
 static void thash_purge_line(thash_cb_t *hcb, thash_data_t *hash)
 {
     if ( INVALID_ENTRY(hcb, hash) ) return;
@@ -784,6 +1022,16 @@
     // Then hash table itself.
     INVALIDATE_HASH(hcb, hash);
 }
+*/
+
+
+
+
+
+
+
+
+
 /*
  * Purge all TCs or VHPT entries including those in Hash table.
  *
@@ -792,8 +1040,10 @@
 // TODO: add sections.
 void thash_purge_all(thash_cb_t *hcb)
 {
-    thash_data_t    *hash_table;
-    
+    thash_data_t    *hash_table, *entry;
+    thash_cb_t  *vhpt;
+    u64 i, start, end;
+
 #ifdef  VTLB_DEBUG
        extern u64  sanity_check;
     static u64 statistics_before_purge_all=0;
@@ -802,18 +1052,35 @@
         check_vtlb_sanity(hcb);
     }
 #endif
+    ASSERT ( hcb->ht == THASH_TLB );
 
     hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
     for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
-        thash_purge_line(hcb, hash_table);
-    }
-    if(hcb->ht== THASH_TLB) {
-        hcb = hcb->ts->vhpt;
-        hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
-        for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
-            thash_purge_line(hcb, hash_table);
-        }
-    }
+        INVALIDATE_TLB_HEADER(hash_table);
+    }
+    cch_mem_init (hcb);
+
+    vhpt = hcb->ts->vhpt;
+    hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz);
+    for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) {
+        INVALIDATE_VHPT_HEADER(hash_table);
+    }
+    cch_mem_init (vhpt);
+    
+/*
+    entry = &hcb->ts->itr[0];
+    for(i=0; i< (NITRS+NDTRS); i++){
+        if(!INVALID_TLB(entry)){
+            start=entry->vadr & (-PSIZE(entry->ps));
+            end = start + PSIZE(entry->ps);
+            while(start<end){
+                thash_vhpt_insert(vhpt, entry, start);
+                start += PAGE_SIZE;
+            }
+        }
+        entry++;
+    }
+*/
     local_flush_tlb_all();
 }
 
@@ -836,22 +1103,24 @@
             CACHE_LINE_TYPE cl)
 {
     thash_data_t    *hash_table, *cch;
+    u64     tag;
     ia64_rr vrr;
    
-    ASSERT ( hcb->ht == THASH_VTLB );
+    ASSERT ( hcb->ht == THASH_TLB );
     
     cch = __vtr_lookup(hcb, rid, va, cl);;
     if ( cch ) return cch;
 
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
-    hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, va);
+    vrr = vmx_vcpu_rr(hcb->vcpu,va);
+    hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
 
     if ( INVALID_ENTRY(hcb, hash_table ) )
         return NULL;
 
         
     for (cch=hash_table; cch; cch = cch->next) {
-        if ( __is_translated(cch, rid, va, cl) )
+//        if ( __is_translated(cch, rid, va, cl) )
+        if(cch->etag == tag)
             return cch;
     }
     return NULL;
@@ -864,6 +1133,7 @@
  *          1: failure
  *          0: success
  */
+/*
 int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int 
lock)
 {
        thash_data_t    *ovl;
@@ -893,6 +1163,7 @@
        }
        return 1;
 }
+*/
 
 /*
  * Notifier when TLB is deleted from hash table and its collision chain.
@@ -904,16 +1175,17 @@
  *  2: The format of entry is always in TLB.
  *
  */
-void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
-{
+//void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
+//{
+//    vhpt_purge(hcb->ts->vhpt,entry->vadr,entry->ps);
 //    thash_cb_t  *vhpt;
-    search_section_t    s_sect;
     
-    s_sect.v = 0;
-    thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
-    machine_tlb_purge(entry->vadr, entry->ps);
-    return;
-}
+//    search_section_t    s_sect;
+    
+//    s_sect.v = 0;
+//    thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
+//    machine_tlb_purge(entry->vadr, entry->ps);
+//}
 
 /*
  * Initialize internal control data before service.
@@ -928,30 +1200,29 @@
     hcb->pta.vf = 1;
     hcb->pta.ve = 1;
     hcb->pta.size = sz;
-    hcb->get_rr_fn = vmmu_get_rr;
+//    hcb->get_rr_fn = vmmu_get_rr;
     ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
     if ( hcb->ht == THASH_TLB ) {
-        hcb->remove_notifier =  tlb_remove_notifier;
-        hcb->find_overlap = vtlb_find_overlap;
-        hcb->next_overlap = vtlb_next_overlap;
-        hcb->rem_hash = rem_vtlb;
-        hcb->ins_hash = vtlb_insert;
+//        hcb->remove_notifier =  NULL;        //tlb_remove_notifier;
+//        hcb->find_overlap = vtlb_find_overlap;
+//        hcb->next_overlap = vtlb_next_overlap;
+//        hcb->rem_hash = rem_vtlb;
+//        hcb->ins_hash = vtlb_insert;
         __init_tr(hcb);
     }
     else {
-        hcb->remove_notifier =  NULL;
-        hcb->find_overlap = vhpt_find_overlap;
-        hcb->next_overlap = vhpt_next_overlap;
-        hcb->rem_hash = rem_thash;
-        hcb->ins_hash = vhpt_insert;
+//        hcb->remove_notifier =  NULL;
+//        hcb->find_overlap = vhpt_find_overlap;
+//        hcb->next_overlap = vhpt_next_overlap;
+//        hcb->rem_hash = rem_thash;
+//        hcb->ins_hash = thash_vhpt_insert;
     }
     hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
-    
+
     for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
-        INVALIDATE_HASH(hcb,hash_table);
-    }
-}
-#define VTLB_DEBUG
+        INVALIDATE_HASH_HEADER(hcb,hash_table);
+    }
+}
 #ifdef  VTLB_DEBUG
 static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
 u64  sanity_check=0;
@@ -961,7 +1232,7 @@
     thash_data_t    *ovl;
     search_section_t s_sect;
     u64     num=0;
-    
+
     s_sect.v = 0;
     for (cch=hash; cch; cch=cch->next) {
         ovl = thash_find_overlap(vhpt, cch, s_sect);
@@ -991,7 +1262,7 @@
     search_section_t s_sect;
     thash_cb_t *vhpt = vtlb->ts->vhpt;
     u64   invalid_ratio;
-    
+ 
     if ( sanity_check == 0 ) return;
     sanity_check --;
     s_sect.v = 0;
@@ -1012,9 +1283,9 @@
     for ( i=0; i < 
sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
        cch_length_statistics[i] = 0;
     }
-    
+
     local_irq_save(psr);
-    
+
     hash = vhpt->hash;
     for (i=0; i < hash_num; i++) {
         if ( !INVALID_ENTRY(vhpt, hash) ) {
@@ -1097,12 +1368,12 @@
     static u64  dump_vtlb=0;
     thash_data_t  *hash, *cch, *tr;
     u64     hash_num,i;
-    
+
     if ( dump_vtlb == 0 ) return;
     dump_vtlb --;
     hash_num = vtlb->hash_sz / sizeof(thash_data_t);
     hash = vtlb->hash;
-    
+
     printf("Dump vTC\n");
     for ( i = 0; i < hash_num; i++ ) {
         if ( !INVALID_ENTRY(vtlb, hash) ) {
diff -r e58ff5fd3550 -r cfe20f41f043 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Tue Feb 28 20:18:08 2006
+++ b/xen/arch/ia64/xen/domain.c        Wed Mar  1 15:29:00 2006
@@ -484,6 +484,9 @@
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
        }
        else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+    if((physaddr>>PAGE_SHIFT)<max_page){
+        *(mpt_table + (physaddr>>PAGE_SHIFT))=(mpaddr>>PAGE_SHIFT);
+    }
 }
 #if 0
 /* map a physical address with specified I/O flag */
diff -r e58ff5fd3550 -r cfe20f41f043 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Tue Feb 28 20:18:08 2006
+++ b/xen/include/asm-ia64/config.h     Wed Mar  1 15:29:00 2006
@@ -67,7 +67,7 @@
 extern unsigned long xenheap_phys_end;
 extern unsigned long xen_pstart;
 extern unsigned long xenheap_size;
-extern struct domain *dom0;
+//extern struct domain *dom0;
 extern unsigned long dom0_start;
 extern unsigned long dom0_size;
 
diff -r e58ff5fd3550 -r cfe20f41f043 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Tue Feb 28 20:18:08 2006
+++ b/xen/include/asm-ia64/mm.h Wed Mar  1 15:29:00 2006
@@ -133,6 +133,8 @@
 extern void __init init_frametable(void);
 #endif
 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
+
+extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
 
 static inline void put_page(struct page_info *page)
 {
@@ -215,8 +217,8 @@
 #endif
 
 // prototype of misc memory stuff
-unsigned long __get_free_pages(unsigned int mask, unsigned int order);
-void __free_pages(struct page *page, unsigned int order);
+//unsigned long __get_free_pages(unsigned int mask, unsigned int order);
+//void __free_pages(struct page *page, unsigned int order);
 void *pgtable_quicklist_alloc(void);
 void pgtable_quicklist_free(void *pgtable_entry);
 
@@ -436,12 +438,22 @@
 
 /* Return I/O type if trye */
 #define __gpfn_is_io(_d, gpfn)                         \
-       (__gmfn_valid(_d, gpfn) ?                       \
-       (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) : 0)
+({                                          \
+    u64 pte, ret=0;                                \
+    pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT));      \
+    if(!(pte&GPFN_INV_MASK))        \
+        ret = pte & GPFN_IO_MASK;        \
+    ret;                \
+})
 
 #define __gpfn_is_mem(_d, gpfn)                                \
-       (__gmfn_valid(_d, gpfn) ?                       \
-       ((lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) == 
GPFN_MEM) : 0)
+({                                          \
+    u64 pte, ret=0;                                \
+    pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT));      \
+    if((!(pte&GPFN_INV_MASK))&&((pte & GPFN_IO_MASK)==GPFN_MEM))   \
+        ret = 1;             \
+    ret;                \
+})
 
 
 #define __gpa_to_mpa(_d, gpa)   \
diff -r e58ff5fd3550 -r cfe20f41f043 xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Tue Feb 28 20:18:08 2006
+++ b/xen/include/asm-ia64/vcpu.h       Wed Mar  1 15:29:00 2006
@@ -104,7 +104,6 @@
 extern IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val);
 extern IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val);
 /* interval timer registers */
-extern IA64FAULT vcpu_set_itm(VCPU *vcpu,UINT64 val);
 extern IA64FAULT vcpu_set_itc(VCPU *vcpu,UINT64 val);
 extern UINT64 vcpu_timer_pending_early(VCPU *vcpu);
 /* debug breakpoint registers */
diff -r e58ff5fd3550 -r cfe20f41f043 xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Tue Feb 28 20:18:08 2006
+++ b/xen/include/asm-ia64/vmmu.h       Wed Mar  1 15:29:00 2006
@@ -22,13 +22,27 @@
 
 #ifndef XEN_TLBthash_H
 #define XEN_TLBthash_H
+
+#define         MAX_CCN_DEPTH           15       // collision chain depth
+#define         VCPU_VTLB_SHIFT          (20)    // 1M for VTLB
+#define         VCPU_VTLB_SIZE           (1UL<<VCPU_VTLB_SHIFT)
+#define         VCPU_VTLB_ORDER          (VCPU_VTLB_SHIFT - PAGE_SHIFT)
+#define         VCPU_VHPT_SHIFT          (24)    // 16M for VTLB
+#define         VCPU_VHPT_SIZE           (1UL<<VCPU_VHPT_SHIFT)
+#define         VCPU_VHPT_ORDER          (VCPU_VHPT_SHIFT - PAGE_SHIFT)
+
+#define         PTA_BASE_SHIFT          (15)
+
+
+
+#ifndef __ASSEMBLY__
 
 #include <xen/config.h>
 #include <xen/types.h>
 #include <public/xen.h>
 #include <asm/tlb.h>
 #include <asm/regionreg.h>
-
+#include <asm/vmx_mm_def.h>
 //#define         THASH_TLB_TR            0
 //#define         THASH_TLB_TC            1
 
@@ -39,7 +53,15 @@
 
 /*
  * Next bit definition must be same with THASH_TLB_XX
- */
+#define         PTA_BASE_SHIFT          (15)
+ */
+
+
+
+
+#define HIGH_32BITS(x)  bits(x,32,63)
+#define LOW_32BITS(x)   bits(x,0,31)
+
 typedef union search_section {
         struct {
                 u32 tr : 1;
@@ -49,15 +71,6 @@
         u32     v;
 } search_section_t;
 
-#define         MAX_CCN_DEPTH           4       // collision chain depth
-#define         VCPU_TLB_SHIFT          (22)
-#define         VCPU_TLB_SIZE           (1UL<<VCPU_TLB_SHIFT)
-#define         VCPU_TLB_ORDER          VCPU_TLB_SHIFT - PAGE_SHIFT
-#define         PTA_BASE_SHIFT          (15)
-
-#ifndef __ASSEMBLY__
-#define HIGH_32BITS(x)  bits(x,32,63)
-#define LOW_32BITS(x)   bits(x,0,31)
 
 typedef enum {
         ISIDE_TLB=0,
@@ -77,18 +90,21 @@
             u64 ppn  : 38; // 12-49
             u64 rv2  :  2; // 50-51
             u64 ed   :  1; // 52
-            u64 ig1  :  11; //53-63
+            u64 ig1  :  3; // 53-55
+            u64 len  :  4; // 56-59
+            u64 ig2  :  3; // 60-63
         };
         struct {
             u64 __rv1 : 53;    // 0-52
+            u64 contiguous : 1; //53
+            u64 tc : 1;     // 54 TR or TC
+            CACHE_LINE_TYPE cl : 1; // 55 I side or D side cache line
             // next extension to ig1, only for TLB instance
-            u64 tc : 1;     // 53 TR or TC
-            u64 locked  : 1;   // 54 entry locked or not
-            CACHE_LINE_TYPE cl : 1; // I side or D side cache line
-            u64 nomap : 1;   // entry cann't be inserted into machine TLB.
-            u64 __ig1  :  5; // 56-61
-            u64 checked : 1; // for VTLB/VHPT sanity check
-            u64 invalid : 1; // invalid entry
+            u64 __ig1  :  4; // 56-59
+            u64 locked  : 1;   // 60 entry locked or not
+            u64 nomap : 1;   // 61 entry cann't be inserted into machine TLB.
+            u64 checked : 1; // 62 for VTLB/VHPT sanity check
+            u64 invalid : 1; // 63 invalid entry
         };
         u64 page_flags;
     };                  // same for VHPT and TLB
@@ -128,10 +144,37 @@
     };
 } thash_data_t;
 
+#define INVALIDATE_VHPT_HEADER(hdata)   \
+{      ((hdata)->page_flags)=0;        \
+       ((hdata)->ti)=1;        \
+       ((hdata)->next)=0; }
+
+#define INVALIDATE_TLB_HEADER(hdata)   \
+{      ((hdata)->page_flags)=0;        \
+       ((hdata)->ti)=1;                \
+       ((hdata)->next)=0; }
+
 #define INVALID_VHPT(hdata)     ((hdata)->ti)
-#define INVALID_TLB(hdata)      ((hdata)->invalid)
-#define INVALID_ENTRY(hcb, hdata)                       \
-        ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata))
+#define INVALID_TLB(hdata)      ((hdata)->ti)
+#define INVALID_TR(hdata)      ((hdata)->invalid)
+#define INVALID_ENTRY(hcb, hdata)       INVALID_VHPT(hdata)
+
+/*        ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata)) */
+
+
+/*
+ * Architecture ppn is in 4KB unit while XEN
+ * page may be different(1<<PAGE_SHIFT).
+ */
+static inline u64 arch_to_xen_ppn(u64 appn)
+{
+    return (appn >>(PAGE_SHIFT-ARCH_PAGE_SHIFT));
+}
+
+static inline u64 xen_to_arch_ppn(u64 xppn)
+{
+    return (xppn <<(PAGE_SHIFT- ARCH_PAGE_SHIFT));
+}
 
 typedef enum {
         THASH_TLB=0,
@@ -166,11 +209,11 @@
         struct thash_cb  *vhpt;
 } tlb_special_t;
 
-typedef struct vhpt_cb {
+//typedef struct vhpt_cb {
         //u64     pta;    // pta value.
-        GET_MFN_FN      *get_mfn;
-        TTAG_FN         *tag_func;
-} vhpt_special;
+//        GET_MFN_FN      *get_mfn;
+//        TTAG_FN         *tag_func;
+//} vhpt_special;
 
 typedef struct thash_internal {
         thash_data_t *hash_base;
@@ -198,36 +241,38 @@
         u64     hash_sz;        // size of above data.
         void    *cch_buf;       // base address of collision chain.
         u64     cch_sz;         // size of above data.
-        THASH_FN        *hash_func;
-        GET_RR_FN       *get_rr_fn;
-        RECYCLE_FN      *recycle_notifier;
+//        THASH_FN        *hash_func;
+//        GET_RR_FN       *get_rr_fn;
+//        RECYCLE_FN      *recycle_notifier;
         thash_cch_mem_t *cch_freelist;
         struct vcpu *vcpu;
         PTA     pta;
         /* VTLB/VHPT common information */
-        FIND_OVERLAP_FN *find_overlap;
-        FIND_NEXT_OVL_FN *next_overlap;
-        REM_THASH_FN    *rem_hash; // remove hash entry.
-        INS_THASH_FN    *ins_hash; // insert hash entry.
-        REM_NOTIFIER_FN *remove_notifier;
+//        FIND_OVERLAP_FN *find_overlap;
+//        FIND_NEXT_OVL_FN *next_overlap;
+//        REM_THASH_FN    *rem_hash; // remove hash entry.
+//        INS_THASH_FN    *ins_hash; // insert hash entry.
+//        REM_NOTIFIER_FN *remove_notifier;
         /* private information */
-        thash_internal_t  priv;
+//        thash_internal_t  priv;
         union {
                 tlb_special_t  *ts;
-                vhpt_special   *vs;
+//                vhpt_special   *vs;
         };
         // Internal positon information, buffer and storage etc. TBD
 } thash_cb_t;
 
 #define ITR(hcb,id)             ((hcb)->ts->itr[id])
 #define DTR(hcb,id)             ((hcb)->ts->dtr[id])
-#define INVALIDATE_HASH(hcb,hash)           {   \
-           if ((hcb)->ht==THASH_TLB)            \
-             INVALID_TLB(hash) = 1;             \
-           else                                 \
-             INVALID_VHPT(hash) = 1;            \
-           hash->next = NULL; }
-
+#define INVALIDATE_HASH_HEADER(hcb,hash)    INVALIDATE_TLB_HEADER(hash)
+/*              \
+{           if ((hcb)->ht==THASH_TLB){            \
+            INVALIDATE_TLB_HEADER(hash);             \
+           }else{                                 \
+             INVALIDATE_VHPT_HEADER(hash);            \
+            }                                       \
+}
+ */
 #define PURGABLE_ENTRY(hcb,en)  1
 //             ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
 
@@ -242,18 +287,20 @@
  *    NOTES:
  *      1: TLB entry may be TR, TC or Foreign Map. For TR entry,
  *         itr[]/dtr[] need to be updated too.
- *      2: Inserting to collision chain may trigger recycling if 
+ *      2: Inserting to collision chain may trigger recycling if
  *         the buffer for collision chain is empty.
  *      3: The new entry is inserted at the hash table.
  *         (I.e. head of the collision chain)
  *      4: Return the entry in hash table or collision chain.
  *
  */
-extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
+extern void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
+//extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
 extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int 
idx);
-
-/*
- * Force to delete a found entry no matter TR or foreign map for TLB. 
+extern thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, 
char cl);
+extern u64 get_mfn(struct domain *d, u64 gpfn);
+/*
+ * Force to delete a found entry no matter TR or foreign map for TLB.
  *    NOTES:
  *      1: TLB entry may be TR, TC or Foreign Map. For TR entry,
  *         itr[]/dtr[] need to be updated too.
@@ -307,7 +354,7 @@
                         u64 rid, u64 va, u64 sz, 
                         search_section_t p_sect, 
                         CACHE_LINE_TYPE cl);
-extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in);
+extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va);
 
 /*
  * Purge all TCs or VHPT entries including those in Hash table.
@@ -335,8 +382,10 @@
 extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
 extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);
 extern thash_cb_t *init_domain_tlb(struct vcpu *d);
-
-#define   VTLB_DEBUG
+extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag);
+extern thash_data_t * vhpt_lookup(u64 va);
+
+//#define   VTLB_DEBUG
 #ifdef   VTLB_DEBUG
 extern void check_vtlb_sanity(thash_cb_t *vtlb);
 extern void dump_vtlb(thash_cb_t *vtlb);
diff -r e58ff5fd3550 -r cfe20f41f043 xen/include/asm-ia64/vmx_mm_def.h
--- a/xen/include/asm-ia64/vmx_mm_def.h Tue Feb 28 20:18:08 2006
+++ b/xen/include/asm-ia64/vmx_mm_def.h Wed Mar  1 15:29:00 2006
@@ -34,7 +34,7 @@
 #define POFFSET(vaddr, ps)  ((vaddr) & (PSIZE(ps) - 1))
 #define PPN_2_PA(ppn)       ((ppn)<<12)
 #define CLEARLSB(ppn, nbits)    ((((uint64_t)ppn) >> (nbits)) << (nbits))
-#define PAGEALIGN(va, ps)      (va & ~(PSIZE(ps)-1))
+#define PAGEALIGN(va, ps)      CLEARLSB(va, ps)
 
 #define TLB_AR_R        0
 #define TLB_AR_RX       1
@@ -104,6 +104,7 @@
 
 #define VRN_MASK        0xe000000000000000L
 #define PTA_BASE_MASK       0x3fffffffffffL
+#define PTA_BASE_SHIFT      15
 #define VHPT_OFFSET_MASK    0x7fff
 
 #define BITS_SHIFT_256MB    28
diff -r e58ff5fd3550 -r cfe20f41f043 xen/include/asm-ia64/vmx_platform.h
--- a/xen/include/asm-ia64/vmx_platform.h       Tue Feb 28 20:18:08 2006
+++ b/xen/include/asm-ia64/vmx_platform.h       Wed Mar  1 15:29:00 2006
@@ -54,7 +54,7 @@
 #define VCPU(_v,_x)    _v->arch.privregs->_x
 #define VLAPIC_ID(l) (uint16_t)(VCPU((l)->vcpu, lid) >> 16)
 #define VLAPIC_IRR(l) VCPU((l)->vcpu, irr[0])
-
+struct vlapic* apic_round_robin(struct domain *d, uint8_t dest_mode, uint8_t 
vector, uint32_t bitmap);
 extern int vmx_vcpu_pend_interrupt(struct vcpu *vcpu, uint8_t vector);
 static inline int vlapic_set_irq(struct vlapic *t, uint8_t vec, uint8_t trig)
 {
diff -r e58ff5fd3550 -r cfe20f41f043 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Tue Feb 28 20:18:08 2006
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Wed Mar  1 15:29:00 2006
@@ -464,6 +464,7 @@
 
     rr.rrval=val;
     rr.rid = rr.rid + v->arch.starting_rid;
+    rr.ps = PAGE_SHIFT;
     rr.ve = 1;
     return  vmMangleRID(rr.rrval);
 /* Disable this rid allocation algorithm for now */
diff -r e58ff5fd3550 -r cfe20f41f043 xen/include/asm-ia64/xenkregs.h
--- a/xen/include/asm-ia64/xenkregs.h   Tue Feb 28 20:18:08 2006
+++ b/xen/include/asm-ia64/xenkregs.h   Wed Mar  1 15:29:00 2006
@@ -8,7 +8,8 @@
 #define        IA64_TR_VHPT            4       /* dtr4: vhpt */
 #define IA64_TR_ARCH_INFO      5
 #define IA64_TR_PERVP_VHPT     6
-
+#define IA64_DTR_GUEST_KERNEL   7
+#define IA64_ITR_GUEST_KERNEL   2
 /* Processor status register bits: */
 #define IA64_PSR_VM_BIT                46
 #define IA64_PSR_VM    (__IA64_UL(1) << IA64_PSR_VM_BIT)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.