[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [Xen-ia64-devel] RE: SMP patch


  • To: "Tristan Gingold" <Tristan.Gingold@xxxxxxxx>
  • From: "Magenheimer, Dan (HP Labs Fort Collins)" <dan.magenheimer@xxxxxx>
  • Date: Wed, 12 Oct 2005 15:06:02 -0700
  • Cc: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
  • Delivery-date: Wed, 12 Oct 2005 22:03:14 +0000
  • List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
  • Thread-index: AcXPQW8dapmppMutSwaJf1tN5aQvFwAFTHLwAAiNNXA=
  • Thread-topic: [Xen-ia64-devel] RE: SMP patch

> 2) Is there a reason you reduced the VHPT size 4x?  It
>    appears to have slowed performance down noticably
>    (about 1%).

I can confirm that just by resetting the VHPT size to
the previous value, performance sped up again.  The
actual decrease/increase on my "compiling Linux"
benchmark is closer to 1.25%.

My testing on the SMP code with CONFIG_XEN_SMP off has
passed (dom0 only), so I will check it in... without
the VHPT change.

Dan

> > -----Original Message-----
> > From: Tristan Gingold [mailto:Tristan.Gingold@xxxxxxxx] 
> > Sent: Wednesday, October 12, 2005 10:30 AM
> > To: Magenheimer, Dan (HP Labs Fort Collins)
> > Cc: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
> > Subject: SMP patch
> > 
> > Here is the SMP patch.
> > I was able to boot with #undef CONFIG_XEN_SMP.
> > 
> > Note: with SMP, Xen can freeze during boot.
> > I will work on this issue ASAP, but tomorrow I don't have 
> the machine.
> > 
> > Integrate or not.
> > 
> > Tristan.
> > 
> > # HG changeset patch
> > # User tristan.gingold@xxxxxxxx
> > # Node ID b4d53809ce796e2081fe7968abf41b610db8409f
> > # Parent  d962821aa0e7c024bf7f4a978504ef7f028703d2
> > SMP patch.
> > 
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/linux-xen/head.S
> > --- a/xen/arch/ia64/linux-xen/head.S        Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/linux-xen/head.S        Wed Oct 12 14:12:44 2005
> > @@ -324,6 +324,9 @@
> >     mov r16=-1
> >  (isBP)     br.cond.dpnt .load_current // BP stack is on 
> > region 5 --- no need to 
> > map it
> >  
> > +#ifndef XEN
> > +   // XEN: stack is allocated in xenheap, which is currently always
> > +   //  mapped.
> >     // load mapping for stack (virtaddr in r2, physaddr in r3)
> >     rsm psr.ic
> >     movl r17=PAGE_KERNEL
> > @@ -353,7 +356,8 @@
> >     ssm psr.ic
> >     srlz.d
> >     ;;
> > -
> > +#endif
> > +   
> >  .load_current:
> >     // load the "current" pointer (r13) and ar.k6 with the 
> > current task
> >  #if defined(XEN) && defined(VALIDATE_VT)
> > diff -r d962821aa0e7 -r b4d53809ce79 
> > xen/arch/ia64/linux-xen/irq_ia64.c
> > --- a/xen/arch/ia64/linux-xen/irq_ia64.c    Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/linux-xen/irq_ia64.c    Wed Oct 12 14:12:44 2005
> > @@ -281,5 +281,8 @@
> >     ipi_data = (delivery_mode << 8) | (vector & 0xff);
> >     ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | 
> > ((redirect & 1) << 3));
> >  
> > +#ifdef XEN
> > +   printf ("send_ipi to %d (%x)\n", cpu, phys_cpu_id);
> > +#endif
> >     writeq(ipi_data, ipi_addr);
> >  }
> > diff -r d962821aa0e7 -r b4d53809ce79 
> > xen/arch/ia64/linux-xen/mm_contig.c
> > --- a/xen/arch/ia64/linux-xen/mm_contig.c   Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/linux-xen/mm_contig.c   Wed Oct 12 14:12:44 2005
> > @@ -193,8 +193,8 @@
> >      */
> >     if (smp_processor_id() == 0) {
> >  #ifdef XEN
> > -           cpu_data = alloc_xenheap_pages(PERCPU_PAGE_SHIFT -
> > -                   PAGE_SHIFT + get_order(NR_CPUS));
> > +           cpu_data = alloc_xenheap_pages(get_order(NR_CPUS
> > +                                                    * 
> > PERCPU_PAGE_SIZE));
> >  #else
> >             cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
> >                                        PERCPU_PAGE_SIZE, 
> > __pa(MAX_DMA_ADDRESS));
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/linux-xen/setup.c
> > --- a/xen/arch/ia64/linux-xen/setup.c       Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/linux-xen/setup.c       Wed Oct 12 14:12:44 2005
> > @@ -366,6 +366,7 @@
> >  }
> >  #endif
> >  
> > +void __init
> >  #ifdef XEN
> >  early_setup_arch (char **cmdline_p)
> >  #else
> > @@ -377,14 +378,12 @@
> >     ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) 
> > __end___vtop_patchlist);
> >  
> >     *cmdline_p = __va(ia64_boot_param->command_line);
> > -#ifdef XEN
> > -   efi_init();
> > -#else
> > +#ifndef XEN
> >     strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
> > +#endif
> >  
> >     efi_init();
> >     io_port_init();
> > -#endif
> >  
> >  #ifdef CONFIG_IA64_GENERIC
> >     {
> > @@ -414,11 +413,17 @@
> >  #ifdef XEN
> >     early_cmdline_parse(cmdline_p);
> >     cmdline_parse(*cmdline_p);
> > -#undef CONFIG_ACPI_BOOT
> >  #endif
> >     if (early_console_setup(*cmdline_p) == 0)
> >             mark_bsp_online();
> >  
> > +#ifdef XEN
> > +}
> > +
> > +void __init
> > +late_setup_arch (char **cmdline_p)
> > +{
> > +#endif
> >  #ifdef CONFIG_ACPI_BOOT
> >     /* Initialize the ACPI boot-time table parser */
> >     acpi_table_init();
> > @@ -433,20 +438,16 @@
> >  
> >  #ifndef XEN
> >     find_memory();
> > -#else
> > -   io_port_init();
> > -}
> > -
> > -void __init
> > -late_setup_arch (char **cmdline_p)
> > -{
> > -#undef CONFIG_ACPI_BOOT
> > -   acpi_table_init();
> > -#endif
> > +#endif
> > +
> >     /* process SAL system table: */
> >     ia64_sal_init(efi.sal_systab);
> >  
> >  #ifdef CONFIG_SMP
> > +#ifdef XEN
> > +   init_smp_config ();
> > +#endif
> > +
> >     cpu_physical_id(0) = hard_smp_processor_id();
> >  
> >     cpu_set(0, cpu_sibling_map[0]);
> > @@ -768,6 +769,11 @@
> >  
> >     cpu_data = per_cpu_init();
> >  
> > +#ifdef XEN
> > +   printf ("cpu_init: current=%p, current->domain->arch.mm=%p\n",
> > +           current, current->domain->arch.mm);
> > +#endif
> > +
> >     /*
> >      * We set ar.k3 so that assembly code in MCA handler can compute
> >      * physical addresses of per cpu variables with a simple:
> > @@ -887,6 +893,16 @@
> >  #ifndef XEN
> >     pm_idle = default_idle;
> >  #endif
> > +
> > +#ifdef XEN
> > +    /* surrender usage of kernel registers to domain, use 
> > percpu area instead 
> > */
> > +    __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = 
> > ia64_get_kr(IA64_KR_IO_BASE);
> > +    __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = 
> > ia64_get_kr(IA64_KR_PER_CPU_DATA);
> > +    __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = 
> > ia64_get_kr(IA64_KR_CURRENT_STACK);
> > +    __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = 
> > ia64_get_kr(IA64_KR_FPU_OWNER);
> > +    __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = 
> > ia64_get_kr(IA64_KR_CURRENT);
> > +    __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = 
> > ia64_get_kr(IA64_KR_PT_BASE);
> > +#endif
> >  }
> >  
> >  void
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/linux-xen/smp.c
> > --- a/xen/arch/ia64/linux-xen/smp.c Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/linux-xen/smp.c Wed Oct 12 14:12:44 2005
> > @@ -63,9 +63,18 @@
> >  //Huh? This seems to be used on ia64 even if !CONFIG_SMP
> >  void smp_send_event_check_mask(cpumask_t mask)
> >  {
> > -   printf("smp_send_event_check_mask called\n");
> > -   //dummy();
> > -   //send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
> > +    int cpu;
> > +
> > +    /*  Not for me.  */
> > +    cpu_clear(smp_processor_id(), mask);
> > +    if (cpus_empty(mask))
> > +        return;
> > +
> > +    printf("smp_send_event_check_mask called\n");
> > +
> > +    for (cpu = 0; cpu < NR_CPUS; ++cpu)
> > +        if (cpu_isset(cpu, mask))
> > +       platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, 
> > IA64_IPI_DM_INT, 0);
> >  }
> >  
> >  
> > @@ -249,6 +258,7 @@
> >     send_IPI_single(smp_processor_id(), op);
> >  }
> >  
> > +#ifndef XEN
> >  /*
> >   * Called with preeemption disabled.
> >   */
> > @@ -257,6 +267,7 @@
> >  {
> >     platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
> >  }
> > +#endif
> >  
> >  void
> >  smp_flush_tlb_all (void)
> > @@ -395,15 +406,14 @@
> >     if (wait)
> >             atomic_set(&data.finished, 0);
> >  
> > -   printk("smp_call_function: about to spin_lock \n");
> >     spin_lock(&call_lock);
> > -   printk("smp_call_function: done with spin_lock \n");
> > +#if 0 //def XEN
> > +   printk("smp_call_function: %d lock\n", smp_processor_id ());
> > +#endif
> >  
> >     call_data = &data;
> >     mb();   /* ensure store to call_data precedes setting 
> > of IPI_CALL_FUNC */
> > -   printk("smp_call_function: about to send_IPI \n");
> >     send_IPI_allbutself(IPI_CALL_FUNC);
> > -   printk("smp_call_function: done with send_IPI \n");
> >  
> >     /* Wait for response */
> >     while (atomic_read(&data.started) != cpus)
> > @@ -414,9 +424,10 @@
> >                     cpu_relax();
> >     call_data = NULL;
> >  
> > -   printk("smp_call_function: about to spin_unlock \n");
> >     spin_unlock(&call_lock);
> > +#if 0 //def XEN
> >     printk("smp_call_function: DONE WITH spin_unlock, 
> > returning \n");
> > +#endif
> >     return 0;
> >  }
> >  EXPORT_SYMBOL(smp_call_function);
> > diff -r d962821aa0e7 -r b4d53809ce79 
> xen/arch/ia64/linux-xen/smpboot.c
> > --- a/xen/arch/ia64/linux-xen/smpboot.c     Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/linux-xen/smpboot.c     Wed Oct 12 14:12:44 2005
> > @@ -477,6 +477,22 @@
> >  
> >  do_rest:
> >     task_for_booting_cpu = c_idle.idle;
> > +#else
> > +   struct domain *idle;
> > +   struct vcpu *v;
> > +   void *stack;
> > +
> > +   if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
> > +           panic("failed 'createdomain' for CPU %d", cpu);
> > +   set_bit(_DOMF_idle_domain, &idle->domain_flags);
> > +   v = idle->vcpu[0];
> > +
> > +   printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", 
> > cpu, idle, v);
> > +
> > +   task_for_booting_cpu = v;
> > +
> > +   /* Set cpu number.  */
> > +   get_thread_info(v)->cpu = cpu;
> >  #endif
> >  
> >     Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", 
> > ap_wakeup_vector, 
> > cpu, sapicid);
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/xen/acpi.c
> > --- a/xen/arch/ia64/xen/acpi.c      Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/xen/acpi.c      Wed Oct 12 14:12:44 2005
> > @@ -121,6 +121,7 @@
> >  #ifdef CONFIG_ACPI_BOOT
> >  
> >  #define ACPI_MAX_PLATFORM_INTERRUPTS       256
> > +#define NR_IOSAPICS 4
> >  
> >  #if 0
> >  /* Array to record platform interrupt vectors for generic 
> > interrupt routing. 
> > */
> > @@ -162,7 +163,6 @@
> >  struct acpi_table_madt *   acpi_madt __initdata;
> >  static u8                  has_8259;
> >  
> > -#if 0
> >  static int __init
> >  acpi_parse_lapic_addr_ovr (
> >     acpi_table_entry_header *header, const unsigned long end)
> > @@ -247,11 +247,12 @@
> >  
> >     acpi_table_print_madt_entry(header);
> >  
> > +#if 0
> >     iosapic_init(iosapic->address, iosapic->global_irq_base);
> > -
> > -   return 0;
> > -}
> > -
> > +#endif
> > +
> > +   return 0;
> > +}
> >  
> >  static int __init
> >  acpi_parse_plat_int_src (
> > @@ -267,6 +268,7 @@
> >  
> >     acpi_table_print_madt_entry(header);
> >  
> > +#if 0
> >     /*
> >      * Get vector assignment for this interrupt, set attributes,
> >      * and program the IOSAPIC routing table.
> > @@ -280,6 +282,7 @@
> >                                             
> > (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
> >  
> >     platform_intr_list[plintsrc->type] = vector;
> > +#endif
> >     return 0;
> >  }
> >  
> > @@ -297,12 +300,13 @@
> >  
> >     acpi_table_print_madt_entry(header);
> >  
> > +#if 0
> >     iosapic_override_isa_irq(p->bus_irq, p->global_irq,
> >                              (p->flags.polarity == 1) ? 
> > IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
> >                              (p->flags.trigger == 1) ? 
> > IOSAPIC_EDGE : IOSAPIC_LEVEL);
> > -   return 0;
> > -}
> > -
> > +#endif
> > +   return 0;
> > +}
> >  
> >  static int __init
> >  acpi_parse_nmi_src (acpi_table_entry_header *header, const 
> > unsigned long end)
> > @@ -331,8 +335,10 @@
> >              */
> >             sal_platform_features |= 
> > IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
> >  
> > +#if 0
> >             /*Start cyclone clock*/
> >             cyclone_setup(0);
> > +#endif
> >     }
> >  }
> >  
> > @@ -350,7 +356,9 @@
> >  #else
> >     has_8259 = acpi_madt->flags.pcat_compat;
> >  #endif
> > +#if 0
> >     iosapic_system_init(has_8259);
> > +#endif
> >  
> >     /* Get base address of IPI Message Block */
> >  
> > @@ -364,7 +372,6 @@
> >  
> >     return 0;
> >  }
> > -#endif
> >  
> >  #ifdef CONFIG_ACPI_NUMA
> >  
> > @@ -529,6 +536,7 @@
> >     return acpi_register_irq(gsi, polarity, trigger);
> >  }
> >  EXPORT_SYMBOL(acpi_register_gsi);
> > +#endif
> >  static int __init
> >  acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
> >  {
> > @@ -550,10 +558,11 @@
> >     if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
> >             acpi_legacy_devices = 1;
> >  
> > +#if 0
> >     acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, 
> > ACPI_LEVEL_SENSITIVE);
> > -   return 0;
> > -}
> > -#endif
> > +#endif
> > +   return 0;
> > +}
> >  
> >  unsigned long __init
> >  acpi_find_rsdp (void)
> > @@ -567,7 +576,6 @@
> >     return rsdp_phys;
> >  }
> >  
> > -#if 0
> >  int __init
> >  acpi_boot_init (void)
> >  {
> > @@ -646,6 +654,7 @@
> >     printk(KERN_INFO "%d CPUs available, %d CPUs total\n", 
> > available_cpus, 
> > total_cpus);
> >     return 0;
> >  }
> > +#if 0
> >  int
> >  acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
> >  {
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/xen/domain.c
> > --- a/xen/arch/ia64/xen/domain.c    Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/xen/domain.c    Wed Oct 12 14:12:44 2005
> > @@ -23,6 +23,7 @@
> >  #include <asm/io.h>
> >  #include <asm/processor.h>
> >  #include <asm/desc.h>
> > +#include <asm/hw_irq.h>
> >  //#include <asm/mpspec.h>
> >  #include <xen/irq.h>
> >  #include <xen/event.h>
> > @@ -75,35 +76,21 @@
> >     //free_page((unsigned long)d->mm.perdomain_pt);
> >  }
> >  
> > -int hlt_counter;
> > -
> > -void disable_hlt(void)
> > -{
> > -   hlt_counter++;
> > -}
> > -
> > -void enable_hlt(void)
> > -{
> > -   hlt_counter--;
> > -}
> > -
> >  static void default_idle(void)
> >  {
> > -   if ( hlt_counter == 0 )
> > -   {
> > +   int cpu = smp_processor_id();
> >     local_irq_disable();
> > -       if ( !softirq_pending(smp_processor_id()) )
> > +   if ( !softirq_pending(cpu))
> >             safe_halt();
> > -       //else
> > -           local_irq_enable();
> > -   }
> > -}
> > -
> > -void continue_cpu_idle_loop(void)
> > +   local_irq_enable();
> > +}
> > +
> > +static void continue_cpu_idle_loop(void)
> >  {
> >     int cpu = smp_processor_id();
> >     for ( ; ; )
> >     {
> > +   printf ("idle%dD\n", cpu);
> >  #ifdef IA64
> >  //        __IRQ_STAT(cpu, idle_timestamp) = jiffies
> >  #else
> > @@ -111,23 +98,32 @@
> >  #endif
> >         while ( !softirq_pending(cpu) )
> >             default_idle();
> > +       add_preempt_count(SOFTIRQ_OFFSET);
> >         raise_softirq(SCHEDULE_SOFTIRQ);
> >         do_softirq();
> > +       sub_preempt_count(SOFTIRQ_OFFSET);
> >     }
> >  }
> >  
> >  void startup_cpu_idle_loop(void)
> >  {
> > +   int cpu = smp_processor_id ();
> >     /* Just some sanity to ensure that the scheduler is set 
> > up okay. */
> >     ASSERT(current->domain == IDLE_DOMAIN_ID);
> > +   printf ("idle%dA\n", cpu);
> >     raise_softirq(SCHEDULE_SOFTIRQ);
> > +#if 0   /* All this work is done within continue_cpu_idle_loop  */
> > +   printf ("idle%dB\n", cpu);
> > +   asm volatile ("mov ar.k2=r0");
> >     do_softirq();
> > +   printf ("idle%dC\n", cpu);
> >  
> >     /*
> >      * Declares CPU setup done to the boot processor.
> >      * Therefore memory barrier to ensure state is visible.
> >      */
> >     smp_mb();
> > +#endif
> >  #if 0
> >  //do we have to ensure the idle task has a shared page so 
> > that, for example,
> >  //region registers can be loaded from it.  Apparently not...
> > @@ -229,17 +225,21 @@
> >     v->arch.breakimm = d->arch.breakimm;
> >  
> >     d->arch.sys_pgnr = 0;
> > -   d->arch.mm = xmalloc(struct mm_struct);
> > -   if (unlikely(!d->arch.mm)) {
> > -           printk("Can't allocate mm_struct for domain 
> > %d\n",d->domain_id);
> > -           return -ENOMEM;
> > -   }
> > -   memset(d->arch.mm, 0, sizeof(*d->arch.mm));
> > -   d->arch.mm->pgd = pgd_alloc(d->arch.mm);
> > -   if (unlikely(!d->arch.mm->pgd)) {
> > -           printk("Can't allocate pgd for domain 
> > %d\n",d->domain_id);
> > -           return -ENOMEM;
> > -   }
> > +   if (d->domain_id != IDLE_DOMAIN_ID) {
> > +           d->arch.mm = xmalloc(struct mm_struct);
> > +           if (unlikely(!d->arch.mm)) {
> > +                   printk("Can't allocate mm_struct for 
> > domain %d\n",d->domain_id);
> > +                   return -ENOMEM;
> > +           }
> > +           memset(d->arch.mm, 0, sizeof(*d->arch.mm));
> > +           d->arch.mm->pgd = pgd_alloc(d->arch.mm);
> > +           if (unlikely(!d->arch.mm->pgd)) {
> > +                   printk("Can't allocate pgd for domain 
> > %d\n",d->domain_id);
> > +                   return -ENOMEM;
> > +           }
> > +   } else
> > +           d->arch.mm = NULL;
> > +   printf ("arch_do_create_domain: domain=%p\n", d);
> >  }
> >  
> >  void arch_getdomaininfo_ctxt(struct vcpu *v, struct 
> > vcpu_guest_context *c)
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/xen/process.c
> > --- a/xen/arch/ia64/xen/process.c   Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/xen/process.c   Wed Oct 12 14:12:44 2005
> > @@ -62,11 +62,23 @@
> >     return 0;
> >  }
> >  
> > +#include <xen/sched-if.h>
> > +
> > +extern struct schedule_data schedule_data[NR_CPUS];
> > +
> >  void schedule_tail(struct vcpu *next)
> >  {
> >     unsigned long rr7;
> >     
> > 
> //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
> >     //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
> > +
> > +    // TG: Real HACK FIXME.
> > +    // This is currently necessary because when a new domain 
> > is started, 
> > +    // the context_switch function of 
> > xen/common/schedule.c(__enter_scheduler)
> > +    // never returns.  Therefore, the lock must be released.
> > +    // schedule_tail is only called when a domain is started.
> > +    
> > spin_unlock_irq(&schedule_data[current->processor].schedule_lock);
> > +
> >     /* rr7 will be postponed to last point when resuming 
> > back to guest */
> >      if(VMX_DOMAIN(current)){
> >             vmx_load_all_rr(current);
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/xen/xenirq.c
> > --- a/xen/arch/ia64/xen/xenirq.c    Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/xen/xenirq.c    Wed Oct 12 14:12:44 2005
> > @@ -35,7 +35,7 @@
> >  int
> >  xen_do_IRQ(ia64_vector vector)
> >  {
> > -   if (vector != 0xef) {
> > +   if (vector != IA64_TIMER_VECTOR && vector != IA64_IPI_VECTOR) {
> >             extern void vcpu_pend_interrupt(void *, int);
> >  #if 0
> >             if (firsttime[vector]) {
> > @@ -57,22 +57,18 @@
> >     return(0);
> >  }
> >  
> > -/* From linux/kernel/softirq.c */
> > -#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
> > -# define invoke_softirq()  __do_softirq()
> > -#else
> > -# define invoke_softirq()  do_softirq()
> > -#endif
> > -
> >  /*
> >   * Exit an interrupt context. Process softirqs if needed and 
> > possible:
> >   */
> >  void irq_exit(void)
> >  {
> >     //account_system_vtime(current);
> > -   //sub_preempt_count(IRQ_EXIT_OFFSET);
> > -   if (!in_interrupt() && local_softirq_pending())
> > -           invoke_softirq();
> > +   sub_preempt_count(IRQ_EXIT_OFFSET);
> > +   if (!in_interrupt() && local_softirq_pending()) {
> > +           add_preempt_count(SOFTIRQ_OFFSET);
> > +           do_softirq();
> > +           sub_preempt_count(SOFTIRQ_OFFSET);
> > +   }
> >     //preempt_enable_no_resched();
> >  }
> >  /* end from linux/kernel/softirq.c */
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/xen/xenmisc.c
> > --- a/xen/arch/ia64/xen/xenmisc.c   Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/xen/xenmisc.c   Wed Oct 12 14:12:44 2005
> > @@ -280,6 +280,8 @@
> >  
> >  unsigned long context_switch_count = 0;
> >  
> > +#include <asm/vcpu.h>
> > +
> >  void context_switch(struct vcpu *prev, struct vcpu *next)
> >  {
> >  //printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
> > @@ -287,7 +289,8 @@
> >  
> > //prev->domain->domain_id,(long)prev&0xffffff,next->domain->do
> > main_id,(long)next&0xffffff);
> >  //if (prev->domain->domain_id == 1 && 
> next->domain->domain_id == 0) 
> > cs10foo();
> >  //if (prev->domain->domain_id == 0 && 
> next->domain->domain_id == 1) 
> > cs01foo();
> > -//printk("@@sw 
> > %d->%d\n",prev->domain->domain_id,next->domain->domain_id);
> > +printk("@@sw%d/%x %d->%d\n",smp_processor_id(), 
> > hard_smp_processor_id (),
> > +       prev->domain->domain_id,next->domain->domain_id);
> >      if(VMX_DOMAIN(prev)){
> >             vtm_domain_out(prev);
> >      }
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/xen/xensetup.c
> > --- a/xen/arch/ia64/xen/xensetup.c  Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/xen/xensetup.c  Wed Oct 12 14:12:44 2005
> > @@ -253,11 +253,11 @@
> >  printk("About to call scheduler_init()\n");
> >      scheduler_init();
> >      local_irq_disable();
> > +    init_IRQ ();
> >  printk("About to call init_xen_time()\n");
> >      init_xen_time(); /* initialise the time */
> >  printk("About to call ac_timer_init()\n");
> >      ac_timer_init();
> > -// init_xen_time(); ???
> >  
> >  #ifdef CONFIG_SMP
> >      if ( opt_nosmp )
> > @@ -275,6 +275,9 @@
> >          cpu_set(i, cpu_present_map);
> >  
> >      //BUG_ON(!local_irq_is_enabled());
> > +
> > +    /*  Enable IRQ to receive IPI (needed for ITC sync).  */
> > +    local_irq_enable();
> >  
> >  printk("num_online_cpus=%d, 
> > max_cpus=%d\n",num_online_cpus(),max_cpus);
> >      for_each_present_cpu ( i )
> > @@ -287,24 +290,16 @@
> >     }
> >      }
> >  
> > +    local_irq_disable();
> > +
> >      printk("Brought up %ld CPUs\n", (long)num_online_cpus());
> >      smp_cpus_done(max_cpus);
> >  #endif
> >  
> > -
> > -   // FIXME: Should the following be swapped and moved later?
> > -    schedulers_start();
> >      do_initcalls();
> >  printk("About to call sort_main_extable()\n");
> >      sort_main_extable();
> >  
> > -    /* surrender usage of kernel registers to domain, use 
> > percpu area instead 
> > */
> > -    __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = 
> > ia64_get_kr(IA64_KR_IO_BASE);
> > -    __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = 
> > ia64_get_kr(IA64_KR_PER_CPU_DATA);
> > -    __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = 
> > ia64_get_kr(IA64_KR_CURRENT_STACK);
> > -    __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = 
> > ia64_get_kr(IA64_KR_FPU_OWNER);
> > -    __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = 
> > ia64_get_kr(IA64_KR_CURRENT);
> > -    __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = 
> > ia64_get_kr(IA64_KR_PT_BASE);
> >  
> >      /* Create initial domain 0. */
> >  printk("About to call do_createdomain()\n");
> > @@ -342,6 +337,11 @@
> >                          0,
> >                     0) != 0)
> >          panic("Could not set up DOM0 guest OS\n");
> > +
> > +    /* PIN domain0 on CPU 0.  */
> > +    dom0->vcpu[0]->cpumap=1;
> > +    set_bit(_VCPUF_cpu_pinned, &dom0->vcpu[0]->vcpu_flags);
> > +
> >  #ifdef CLONE_DOMAIN0
> >      {
> >      int i;
> > @@ -379,9 +379,16 @@
> >     domain_unpause_by_systemcontroller(clones[i]);
> >      }
> >  #endif
> > +    domain0_ready = 1;
> > +
> > +    local_irq_enable();
> > +
> > +    printf("About to call schedulers_start dom0=%p, 
> idle0_dom=%p\n",
> > +      dom0, &idle0_domain);
> > +    schedulers_start();
> > +
> >      domain_unpause_by_systemcontroller(dom0);
> > -    domain0_ready = 1;
> > -    local_irq_enable();
> > +
> >  printk("About to call startup_cpu_idle_loop()\n");
> >      startup_cpu_idle_loop();
> >  }
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/arch/ia64/xen/xentime.c
> > --- a/xen/arch/ia64/xen/xentime.c   Wed Oct 12 14:03:22 2005
> > +++ b/xen/arch/ia64/xen/xentime.c   Wed Oct 12 14:12:44 2005
> > @@ -103,10 +103,10 @@
> >  #ifdef HEARTBEAT_FREQ
> >     static long count = 0;
> >     if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
> > -           printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n",
> > -                   regs->cr_iip,
> > +           printf("Heartbeat... iip=%p\n", 
> > /*",psr.i=%d,pend=%d\n", */
> > +                   regs->cr_iip /*,
> >                     VCPU(current,interrupt_delivery_enabled),
> > -                   VCPU(current,pending_interruption));
> > +                   VCPU(current,pending_interruption) */);
> >             count = 0;
> >     }
> >  #endif
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/include/asm-ia64/config.h
> > --- a/xen/include/asm-ia64/config.h Wed Oct 12 14:03:22 2005
> > +++ b/xen/include/asm-ia64/config.h Wed Oct 12 14:12:44 2005
> > @@ -28,8 +28,8 @@
> >  
> >  #ifdef CONFIG_XEN_SMP
> >  #define CONFIG_SMP 1
> > -#define NR_CPUS 2
> > -#define CONFIG_NR_CPUS 2
> > +#define NR_CPUS 8
> > +#define CONFIG_NR_CPUS 8
> >  #else
> >  #undef CONFIG_SMP
> >  #define NR_CPUS 1
> > @@ -123,8 +123,7 @@
> >  #ifdef CONFIG_SMP
> >  #warning "Lots of things to fix to enable CONFIG_SMP!"
> >  #endif
> > -// FIXME SMP
> > -#define    get_cpu()       0
> > +#define    get_cpu()       smp_processor_id()
> >  #define put_cpu()  do {} while(0)
> >  
> >  // needed for common/dom0_ops.c until hyperthreading is supported
> > @@ -140,6 +139,7 @@
> >  // function calls; see decl in xen/include/xen/sched.h
> >  #undef free_task_struct
> >  #undef alloc_task_struct
> > +#define get_thread_info(v) alloc_thread_info(v)
> >  
> >  // initial task has a different name in Xen
> >  //#define  idle0_task      init_task
> > @@ -299,7 +299,11 @@
> >  #endif /* __XEN_IA64_CONFIG_H__ */
> >  
> >  // needed for include/xen/smp.h
> > +#ifdef CONFIG_SMP
> > +#define __smp_processor_id()       current_thread_info()->cpu
> > +#else
> >  #define __smp_processor_id()       0
> > +#endif
> >  
> >  
> >  // FOLLOWING ADDED FOR XEN POST-NGIO and/or LINUX 2.6.7
> > diff -r d962821aa0e7 -r b4d53809ce79 
> > xen/include/asm-ia64/linux-xen/asm/spinlock.h
> > --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h Wed Oct 
> > 12 14:03:22 2005
> > +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h Wed Oct 
> > 12 14:12:44 2005
> > @@ -17,10 +17,15 @@
> >  #include <asm/intrinsics.h>
> >  #include <asm/system.h>
> >  
> > +#define DEBUG_SPINLOCK
> > +
> >  typedef struct {
> >     volatile unsigned int lock;
> >  #ifdef CONFIG_PREEMPT
> >     unsigned int break_lock;
> > +#endif
> > +#ifdef DEBUG_SPINLOCK
> > +   void *locker;
> >  #endif
> >  #ifdef XEN
> >     unsigned char recurse_cpu;
> > @@ -95,6 +100,10 @@
> >                   "(p14) brl.call.spnt.many 
> > b6=ia64_spinlock_contention;;"
> >                   : "=r"(ptr) : "r"(ptr), "r" (flags) : 
> > IA64_SPINLOCK_CLOBBERS);
> >  # endif /* CONFIG_MCKINLEY */
> > +#endif
> > +
> > +#ifdef DEBUG_SPINLOCK
> > +   asm volatile ("mov %0=ip" : "=r" (lock->locker));
> >  #endif
> >  }
> >  #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
> > diff -r d962821aa0e7 -r b4d53809ce79 
> > xen/include/asm-ia64/linux-xen/linux/hardirq.h
> > --- a/xen/include/asm-ia64/linux-xen/linux/hardirq.h        Wed Oct 
> > 12 14:03:22 2005
> > +++ b/xen/include/asm-ia64/linux-xen/linux/hardirq.h        Wed Oct 
> > 12 14:12:44 2005
> > @@ -67,11 +67,7 @@
> >   */
> >  #define in_irq()           (hardirq_count())
> >  #define in_softirq()               (softirq_count())
> > -#ifdef XEN
> > -#define in_interrupt()             0               // 
> > FIXME SMP LATER
> > -#else
> >  #define in_interrupt()             (irq_count())
> > -#endif
> >  
> >  #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
> >  # define in_atomic()       ((preempt_count() & ~PREEMPT_ACTIVE) != 
> > kernel_locked())
> > diff -r d962821aa0e7 -r b4d53809ce79 
> > xen/include/asm-ia64/linux-xen/linux/interrupt.h
> > --- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h      
> > Wed Oct 12 14:03:22 
> > 2005
> > +++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h      
> > Wed Oct 12 14:12:44 
> > 2005
> > @@ -88,6 +88,7 @@
> >  #define save_and_cli(x)    save_and_cli(&x)
> >  #endif /* CONFIG_SMP */
> >  
> > +#ifndef XEN
> >  /* SoftIRQ primitives.  */
> >  #define local_bh_disable() \
> >             do { add_preempt_count(SOFTIRQ_OFFSET); 
> > barrier(); } while (0)
> > @@ -95,6 +96,7 @@
> >             do { barrier(); 
> > sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
> >  
> >  extern void local_bh_enable(void);
> > +#endif
> >  
> >  /* PLEASE, avoid to allocate new softirqs, if you need not 
> > _really_ high
> >     frequency threaded job scheduling. For almost all the purposes
> > diff -r d962821aa0e7 -r b4d53809ce79 xen/include/asm-ia64/vhpt.h
> > --- a/xen/include/asm-ia64/vhpt.h   Wed Oct 12 14:03:22 2005
> > +++ b/xen/include/asm-ia64/vhpt.h   Wed Oct 12 14:12:44 2005
> > @@ -14,6 +14,13 @@
> >  #define    VHPT_CACHE_ENTRY_SIZE_LOG2      6
> >  #define    VHPT_SIZE_LOG2                  26      //????
> >  #define    VHPT_PAGE_SHIFT                 26      //????
> > +#elif 1
> > +#define    VHPT_CACHE_ENTRY_SIZE           64
> > +#define    VHPT_CACHE_NUM_ENTRIES          2048
> > +#define    VHPT_NUM_ENTRIES                131072
> > +#define    VHPT_CACHE_MASK                 131071
> > +#define    VHPT_SIZE_LOG2                  22      //????
> > +#define    VHPT_PAGE_SHIFT                 22      //????
> >  #else
> >  //#define  VHPT_CACHE_NUM_ENTRIES          2048
> >  //#define  VHPT_NUM_ENTRIES                131072
> > 
> > 
> 
> _______________________________________________
> Xen-ia64-devel mailing list
> Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-ia64-devel
> 

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.