[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-ia64-devel] Re: [PATCH] Fix mca handler so as not to destroy ar



On Tue, Aug 05, 2008 at 12:37:17PM +0900, Isaku Yamahata wrote:
> 
> Hi Kazu. Sorry for late alert.
> VIRTUAL_MODE_ENTER() still refers ar.k6.
> Could you fix it?

One more.
Before pinning down vpd and vhpt, it's necessary to
make sure that they doesn't overlap with stack like __vmxswitch_rr7()
and ia64_new_rr7().

thanks,

> 
> 
> On Wed, Jul 30, 2008 at 01:29:39PM +0900, SUZUKI Kazuhiro wrote:
> > Hi,
> > 
> > Thank you for your comments.
> > I attached an updated patch following to the comments.
> > 
> > Thanks,
> > KAZ
> > 
> > Signed-off-by: Kazuhiro Suzuki <kaz@xxxxxxxxxxxxxx>
> > 
> > 
> > From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
> > Subject: Re: [PATCH] Fix mca handler so as not to destroy ar(was: Re: 
> > [Xen-ia64-devel] Re: mca handler)
> > Date: Mon, 28 Jul 2008 11:20:05 +0900
> > 
> > > On Fri, Jul 25, 2008 at 05:47:37PM +0900, SUZUKI Kazuhiro wrote:
> > > > The following patch fixes the mca handler so as not to destroy ar
> > > > and some bugs.
> > > 
> > > Thank you for fixing some bugs and it looks basically good.
> > > Some comments below.
> > > 
> > > 
> > > > @@ -524,24 +457,111 @@ ia64_reload_tr:
> > > >         srlz.d
> > > >         ;;
> > > >  #ifdef XEN
> > > > -.reload_vhpt:
> > > > -       // 5. VHPT
> > > > -       GET_THIS_PADDR(r1, inserted_vhpt);;
> > > > -       cmp.eq p7,p0=r2,r0
> > > > -(p7)   br.cond.sptk    .overlap_vhpt   // vhpt isn't mapped.
> > > > +       // 5. shared_info
> > > > +       GET_THIS_PADDR(r2, inserted_shared_info);;
> > > > +       ld8 r16=[r2]
> > > > +       mov r18=XSI_SHIFT<<2
> > > > +       movl r20=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW)
> > > > +       ;;
> > > > +       GET_THIS_PADDR(r2, domain_shared_info);;
> > > > +       ld8 r17=[r2]
> > > > +       ;;
> > > > +       dep r17=0,r17,60,4
> > > > +       ;; 
> > > > +       or r17=r17,r20                  // construct PA | page 
> > > > properties
> > > > +       mov cr.itir=r18
> > > > +       mov cr.ifa=r16
> > > > +       ;;
> > > > +       mov r16=IA64_TR_SHARED_INFO
> > > > +       ;;
> > > > +       itr.d dtr[r16]=r17              // wire in new mapping...
> > > > +       ;; 
> > > > +       srlz.d
> > > > +       ;; 
> > > 
> > > Unconditionally pinning down shared_info into inserted_shared_info
> > > is wrong because shared_info is shared only with PV domain and xen VMM.
> > > So In VMX domain case, it shouldn't pinned down. Otherwise VMX guest
> > > wrongly accesses to shared_info.
> > > In ia64_do_tlb_purge() case, unconditionally purging
> > > DTR[IA64_TR_SHARED_INFO] is okay, but unconditionally inserting
> > > DTR[IA64_TR_SHARED_INFO] is bad.
> > > 
> > > thanks,
> 
> > diff -r 1970781956c7 xen/arch/ia64/linux-xen/mca_asm.S
> > --- a/xen/arch/ia64/linux-xen/mca_asm.S     Wed Jul 23 12:10:20 2008 +0900
> > +++ b/xen/arch/ia64/linux-xen/mca_asm.S     Wed Jul 30 11:33:51 2008 +0900
> > @@ -154,71 +154,6 @@
> >     .text
> >     .align 16
> >  
> > -#ifdef     XEN
> > -/*
> > - * void set_per_cpu_data(void)
> > - * {
> > - *   int i;
> > - *   for (i = 0; i < 64; i++) {
> > - *     if (ia64_mca_tlb_list[i].cr_lid == ia64_getreg(_IA64_REG_CR_LID)) {
> > - *       ia64_set_kr(IA64_KR_PER_CPU_DATA, 
> > ia64_mca_tlb_list[i].percpu_paddr);
> > - *       return;
> > - *     }
> > - *   }
> > - *   while(1);     // Endless loop on error
> > - * }
> > - */
> > -#define SET_PER_CPU_DATA()                                 \
> > -   LOAD_PHYSICAL(p0,r2,ia64_mca_tlb_list);;                \
> > -   mov r7 = r0;                                            \
> > -   mov r6 = r0;;                                           \
> > -   adds r3 = IA64_MCA_PERCPU_OFFSET, r2;                   \
> > -1: add r4 = r6, r2;                                        \
> > -   mov r5=cr.lid;;                                         \
> > -   adds r7 = 1, r7;                                        \
> > -   ld8 r4 = [r4];;                                         \
> > -   cmp.ne p6, p7 = r5, r4;                                 \
> > -   cmp4.lt p8, p9 = NR_CPUS-1, r7;                         \
> > -(p7)       br.cond.dpnt 3f;                                        \
> > -   adds r6 = 16, r6;                                       \
> > -(p9)       br.cond.sptk 1b;                                        \
> > -2: br 2b;;                 /* Endless loop on error */     \
> > -3: add r4 = r6, r3;;                                       \
> > -   ld8 r4 = [r4];;                                         \
> > -   mov ar.k3=r4
> > -
> > -/*
> > - * GET_VA_VCPU_VHPT_MADDR() emulates 'reg = __va_ul(vcpu_vhpt_maddr(v))'.
> > - */
> > -#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
> > -#define HAS_PERVCPU_VHPT_MASK      0x2
> > -#define GET_VA_VCPU_VHPT_MADDR(reg,tmp)                            \
> > -   GET_THIS_PADDR(reg,cpu_kr);;                            \
> > -   add reg=IA64_KR_CURRENT_OFFSET,reg;;                    \
> > -   ld8 reg=[reg];;                                         \
> > -   dep tmp=0,reg,60,4;;                    /* V to P */    \
> > -   add tmp=IA64_VCPU_VHPT_PAGE_OFFSET,tmp;;                \
> > -   ld8 tmp=[tmp];;                                         \
> > -   cmp.eq p6,p0=tmp,r0;    /* v->arch.vhpt_page == NULL */ \
> > -(p6)       br.cond.sptk 1f;                                        \
> > -   add reg=IA64_VCPU_VHPT_MADDR_OFFSET,reg;;               \
> > -   dep reg=0,reg,60,4;;                    /* V to P */    \
> > -   ld8 reg=[reg];;                                         \
> > -   dep reg=-1,reg,60,4;                    /* P to V */    \
> > -   br.sptk 2f;                                             \
> > -1:                                                         \
> > -   GET_THIS_PADDR(reg, vhpt_paddr);;                       \
> > -   ld8 reg=[reg];;                                         \
> > -   dep reg=-1,reg,60,4;                    /* P to V */    \
> > -2:
> > -#else /* CONFIG_XEN_IA64_PERVCPU_VHPT */
> > -#define GET_VA_VCPU_VHPT_MADDR(reg,tmp)                            \
> > -   GET_THIS_PADDR(reg, vhpt_paddr);;                       \
> > -   ld8 reg=[reg];;                                         \
> > -   dep reg=-1,reg,60,4                     /* P to V */
> > -#endif /* CONFIG_XEN_IA64_PERVCPU_VHPT */
> > -#endif     /* XEN */
> > -
> >  /*
> >   * Just the TLB purge part is moved to a separate function
> >   * so we can re-use the code for cpu hotplug code as well
> > @@ -227,10 +162,6 @@ 2:
> >   */
> >  
> >  ia64_do_tlb_purge:
> > -#ifdef XEN
> > -   // This needs to be called in order for GET_THIS_PADDR to work
> > -   SET_PER_CPU_DATA();;
> > -#endif
> >  #define O(member)  IA64_CPUINFO_##member##_OFFSET
> >  
> >     GET_THIS_PADDR(r2, cpu_info)    // load phys addr of cpu_info into r2
> > @@ -346,15 +277,19 @@ 4:
> >     // a VMX domain hasn't been started since boot
> >     GET_THIS_PADDR(r2, inserted_vpd);;
> >     ld8 r16=[r2]
> > -   mov r18=XMAPPEDREGS_SHIFT<<2
> > -   ;;
> > -   cmp.eq p7,p0=r2,r0
> > +   mov r18=IA64_GRANULE_SHIFT<<2
> > +   ;;
> > +   cmp.eq p7,p0=r16,r0
> >     ;;
> >  (p7)       br.cond.sptk .vpd_not_mapped
> >     ;;
> >     ptr.i r16,r18
> >     ;;
> > +   ptr.d r16,r18
> > +   ;; 
> >     srlz.i
> > +   ;;
> > +   srlz.d
> >     ;;
> >  .vpd_not_mapped:
> >  
> > @@ -362,6 +297,7 @@ 4:
> >     // GET_VA_VCPU_VHPT_MADDR() may not give the
> >     // value of the VHPT currently pinned into the TLB
> >     GET_THIS_PADDR(r2, inserted_vhpt);;
> > +   ld8 r2=[r2]
> >     ;;
> >     cmp.eq p7,p0=r2,r0
> >     ;;
> > @@ -389,9 +325,6 @@ ia64_os_mca_spin:
> >     cmp.ne  p6,p0=r4,r0
> >  (p6)       br ia64_os_mca_spin
> >  
> > -#ifdef XEN
> > -   SET_PER_CPU_DATA();;
> > -#endif
> >     // Save the SAL to OS MCA handoff state as defined
> >     // by SAL SPEC 3.0
> >     // NOTE : The order in which the state gets saved
> > @@ -524,24 +457,129 @@ ia64_reload_tr:
> >     srlz.d
> >     ;;
> >  #ifdef XEN
> > -.reload_vhpt:
> > -   // 5. VHPT
> > -       GET_THIS_PADDR(r1, inserted_vhpt);;
> > -       cmp.eq p7,p0=r2,r0
> > -(p7)   br.cond.sptk    .overlap_vhpt   // vhpt isn't mapped.
> > +   // if !VMX_DOMAIN(current)
> > +   //      pin down shared_info and mapped_regs
> > +   // else
> > +   //      pin down VPD
> > +   GET_THIS_PADDR(r2,cpu_kr);;
> > +   add r2=IA64_KR_CURRENT_OFFSET,r2
> > +   ;;
> > +   ld8 r2=[r2]
> > +   ;;
> > +   dep r2=0,r2,60,4
> > +   ;;
> > +   add r2=IA64_VCPU_FLAGS_OFFSET,r2
> > +   ;;
> > +   ld8 r2=[r2]
> > +   ;;
> > +   cmp.eq p6,p7 = r2,r0
> > +(p7)       br.cond.sptk .vmx_domain
> > +
> > +   // 5. shared_info
> > +   GET_THIS_PADDR(r2, inserted_shared_info);;
> > +   ld8 r16=[r2]
> > +   mov r18=XSI_SHIFT<<2
> > +   movl r20=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW)
> > +   ;;
> > +   GET_THIS_PADDR(r2, domain_shared_info);;
> > +   ld8 r17=[r2]
> > +   ;;
> > +   dep r17=0,r17,60,4
> > +   ;; 
> > +   or r17=r17,r20                  // construct PA | page properties
> > +   mov cr.itir=r18
> > +   mov cr.ifa=r16
> > +   ;;
> > +   mov r16=IA64_TR_SHARED_INFO
> > +   ;;
> > +   itr.d dtr[r16]=r17              // wire in new mapping...
> > +   ;; 
> > +   srlz.d
> > +   ;; 
> > +
> > +   // 6. mapped_regs
> > +   GET_THIS_PADDR(r2, inserted_mapped_regs);;
> > +   ld8 r16=[r2]
> > +   mov r18=XMAPPEDREGS_SHIFT<<2
> > +   ;; 
> > +   GET_THIS_PADDR(r2,cpu_kr);;
> > +   add r2=IA64_KR_CURRENT_OFFSET,r2
> > +   ;;
> > +   ld8 r2=[r2]
> > +   ;;
> > +   dep r2=0,r2,60,4
> > +   ;;
> > +   add r2=IA64_VPD_BASE_OFFSET,r2
> > +   ;;
> > +   ld8 r17=[r2]
> > +   ;;
> > +   dep r17=0,r17,60,4
> > +   ;;
> > +   or r17=r17,r20                  // construct PA | page properties
> > +   mov cr.itir=r18
> > +   mov cr.ifa=r16
> > +   ;;
> > +   mov r16=IA64_TR_MAPPED_REGS
> > +   ;;
> > +   itr.d dtr[r16]=r17              // wire in new mapping...
> > +   ;;
> > +   srlz.d
> > +   ;;
> > +   br.sptk.many .reload_vpd_not_mapped;;
> > +.vmx_domain:       
> > +
> > +   // 7. VPD
> > +   GET_THIS_PADDR(r2, inserted_vpd);;
> > +   ld8 r16=[r2]
> > +   mov r18=IA64_GRANULE_SHIFT<<2
> > +   ;;
> > +   cmp.eq p7,p0=r16,r0
> > +   ;;
> > +(p7)       br.cond.sptk .reload_vpd_not_mapped
> > +   dep r17=0,r16,60,4
> > +   ;;
> > +   dep r17=0,r17,0,IA64_GRANULE_SHIFT
> > +   ;;
> > +   movl r20=PAGE_KERNEL
> > +   ;;
> > +   or r17=r20,r17          // construct PA | page properties
> > +   ;;
> > +   mov cr.itir=r18
> > +   mov cr.ifa=r16
> > +   ;;
> > +   mov r16=IA64_TR_VPD
> > +   mov r18=IA64_TR_MAPPED_REGS
> > +   ;;
> > +   itr.i itr[r16]=r17
> > +   ;;
> > +   itr.d dtr[r18]=r17
> > +   ;;
> > +   srlz.i
> > +   ;;
> > +   srlz.d
> > +   ;;
> > +.reload_vpd_not_mapped:
> > +
> > +   // 8. VHPT
> > +   GET_THIS_PADDR(r2, inserted_vhpt);;
> > +   ld8 r2=[r2]
> > +   ;;
> > +   cmp.eq p7,p0=r2,r0
> > +   ;;
> > +(p7)       br.cond.sptk    .overlap_vhpt   // vhpt isn't mapped.
> >  
> >     // avoid overlapping with stack TR
> > -   shr.u r17=r2,IA64_GRANULE_SHIFT
> > -   GET_THIS_PADDR(r3, cpu_kr);;
> > -   add r3=IA64_KR_CURRENT_STACK_OFFSET,r3
> > -   ;;
> > -   ld8 r3=[r3]
> > -   ;;
> > -   cmp.eq p7,p0=r3,r17
> > +   dep r16=0,r2,0,IA64_GRANULE_SHIFT
> > +   ;;
> > +   GET_THIS_PADDR(r2,cpu_kr);;
> > +   add r2=IA64_KR_CURRENT_OFFSET,r2
> > +   ;;
> > +   ld8 r2=[r2]
> > +   ;;
> > +   dep r17=0,r2,0,IA64_GRANULE_SHIFT
> > +   ;; 
> > +   cmp.eq p7,p0=r16,r17
> >  (p7)       br.cond.sptk    .overlap_vhpt
> > -   ;;
> > -
> > -   dep r16=0,r2,0,IA64_GRANULE_SHIFT
> >     movl r20=PAGE_KERNEL
> >     ;;
> >     mov r18=IA64_TR_VHPT
> > @@ -1123,8 +1161,6 @@ GLOBAL_ENTRY(ia64_monarch_init_handler)
> >  GLOBAL_ENTRY(ia64_monarch_init_handler)
> >     .prologue
> >  #ifdef XEN /* Need in ia64_monarch_init_handler? */
> > -   SET_PER_CPU_DATA();;
> > -
> >     // Set current to ar.k6
> >     GET_THIS_PADDR(r2,cpu_kr);;
> >     add r2=IA64_KR_CURRENT_OFFSET,r2;;
> > diff -r 1970781956c7 xen/arch/ia64/xen/regionreg.c
> > --- a/xen/arch/ia64/xen/regionreg.c Wed Jul 23 12:10:20 2008 +0900
> > +++ b/xen/arch/ia64/xen/regionreg.c Tue Jul 29 17:43:24 2008 +0900
> > @@ -52,6 +52,7 @@ static unsigned int domain_rid_bits_defa
> >  static unsigned int domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
> >  integer_param("dom_rid_bits", domain_rid_bits_default); 
> >  
> > +DEFINE_PER_CPU(unsigned long, domain_shared_info);
> >  DEFINE_PER_CPU(unsigned long, inserted_vhpt);
> >  DEFINE_PER_CPU(unsigned long, inserted_shared_info);
> >  DEFINE_PER_CPU(unsigned long, inserted_mapped_regs);
> > @@ -270,6 +271,8 @@ int set_one_rr(unsigned long rr, unsigne
> >             if (!PSCB(v,metaphysical_mode))
> >                     set_rr(rr,newrrv.rrval);
> >     } else if (rreg == 7) {
> > +           __get_cpu_var(domain_shared_info) =
> > +                                   (unsigned long)v->domain->shared_info;
> >  #if VHPT_ENABLED
> >             __get_cpu_var(inserted_vhpt) = __va_ul(vcpu_vhpt_maddr(v));
> >  #endif
> > diff -r 1970781956c7 xen/include/asm-ia64/linux-xen/asm/mca_asm.h
> > --- a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h  Wed Jul 23 12:10:20 
> > 2008 +0900
> > +++ b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h  Mon Jul 28 11:42:09 
> > 2008 +0900
> > @@ -58,8 +58,35 @@
> >  #endif
> >  
> >  #ifdef XEN
> > +/*
> > + * void set_per_cpu_data(*ret)
> > + * {
> > + *   int i;
> > + *   for (i = 0; i < 64; i++) {
> > + *     if (ia64_mca_tlb_list[i].cr_lid == ia64_getreg(_IA64_REG_CR_LID)) {
> > + *       *ret = ia64_mca_tlb_list[i].percpu_paddr;
> > + *       return;
> > + *     }
> > + *   }
> > + *   while(1);     // Endless loop on error
> > + * }
> > + */
> > +#define SET_PER_CPU_DATA(reg,_tmp1,_tmp2,_tmp3)    \
> > +   LOAD_PHYSICAL(p0,reg,ia64_mca_tlb_list);;\
> > +   mov _tmp1 = ar.lc;;                     \
> > +   mov ar.lc = NR_CPUS-1;                  \
> > +   mov _tmp2 = cr.lid;;                    \
> > +10:        ld8 _tmp3 = [reg],16;;                  \
> > +   cmp.ne p6, p7 = _tmp3, _tmp2;;          \
> > +(p7)       br.cond.dpnt 30f;;                      \
> > +   br.cloop.sptk.few 10b;;                 \
> > +20:        br 20b;;/* Endless loop on error */     \
> > +30:        mov ar.lc = _tmp1;                      \
> > +   adds reg = IA64_MCA_PERCPU_OFFSET-IA64_MCA_TLB_INFO_SIZE, reg;; \
> > +   ld8 reg = [reg]
> > +
> >  #define GET_THIS_PADDR(reg, var)           \
> > -   mov     reg = IA64_KR(PER_CPU_DATA);;   \
> > +   SET_PER_CPU_DATA(reg,r5,r6,r7);;        \
> >     addl    reg = THIS_CPU(var) - PERCPU_ADDR, reg
> >  #else
> >  #define GET_THIS_PADDR(reg, var)           \
> > diff -r 1970781956c7 xen/include/asm-ia64/regionreg.h
> > --- a/xen/include/asm-ia64/regionreg.h      Wed Jul 23 12:10:20 2008 +0900
> > +++ b/xen/include/asm-ia64/regionreg.h      Fri Jul 25 10:36:14 2008 +0900
> > @@ -36,6 +36,7 @@ typedef union ia64_rr {
> >  #define RR_RID(arg)     (((arg) & 0x0000000000ffffff) << 8)
> >  #define RR_RID_MASK     0x00000000ffffff00L
> >  
> > +DECLARE_PER_CPU(unsigned long, domain_shared_info);
> >  DECLARE_PER_CPU(unsigned long, inserted_vhpt);
> >  DECLARE_PER_CPU(unsigned long, inserted_shared_info);
> >  DECLARE_PER_CPU(unsigned long, inserted_mapped_regs);
> 
> > _______________________________________________
> > Xen-ia64-devel mailing list
> > Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
> > http://lists.xensource.com/xen-ia64-devel
> 

-- 
yamahata

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.