[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH 1/2]: hypervisor debugger
Changes to xen code for the debugger. diff -r 32034d1914a6 xen/Makefile --- a/xen/Makefile Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/Makefile Wed Aug 29 14:39:57 2012 -0700 @@ -61,6 +61,7 @@ $(MAKE) -f $(BASEDIR)/Rules.mk -C xsm clean $(MAKE) -f $(BASEDIR)/Rules.mk -C crypto clean $(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) clean + $(MAKE) -f $(BASEDIR)/Rules.mk -C kdb clean rm -f include/asm *.o $(TARGET) $(TARGET).gz $(TARGET)-syms *~ core rm -f include/asm-*/asm-offsets.h [ -d tools/figlet ] && rm -f .banner* @@ -129,7 +130,7 @@ echo ""; \ echo "#endif") <$< >$@ -SUBDIRS = xsm arch/$(TARGET_ARCH) common drivers +SUBDIRS = xsm arch/$(TARGET_ARCH) common drivers kdb define all_sources ( find include/asm-$(TARGET_ARCH) -name '*.h' -print; \ find include -name 'asm-*' -prune -o -name '*.h' -print; \ diff -r 32034d1914a6 xen/Rules.mk --- a/xen/Rules.mk Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/Rules.mk Wed Aug 29 14:39:57 2012 -0700 @@ -10,6 +10,7 @@ crash_debug ?= n frame_pointer ?= n lto ?= n +kdb ?= n include $(XEN_ROOT)/Config.mk @@ -40,6 +41,7 @@ ALL_OBJS-y += $(BASEDIR)/xsm/built_in.o ALL_OBJS-y += $(BASEDIR)/arch/$(TARGET_ARCH)/built_in.o ALL_OBJS-$(x86) += $(BASEDIR)/crypto/built_in.o +ALL_OBJS-$(kdb) += $(BASEDIR)/kdb/built_in.o CFLAGS-y += -g -D__XEN__ -include $(BASEDIR)/include/xen/config.h CFLAGS-$(XSM_ENABLE) += -DXSM_ENABLE @@ -53,6 +55,7 @@ CFLAGS-$(HAS_ACPI) += -DHAS_ACPI CFLAGS-$(HAS_PASSTHROUGH) += -DHAS_PASSTHROUGH CFLAGS-$(frame_pointer) += -fno-omit-frame-pointer -DCONFIG_FRAME_POINTER +CFLAGS-$(kdb) += -DXEN_KDB_CONFIG ifneq ($(max_phys_cpus),) CFLAGS-y += -DMAX_PHYS_CPUS=$(max_phys_cpus) diff -r 32034d1914a6 xen/arch/x86/hvm/svm/entry.S --- a/xen/arch/x86/hvm/svm/entry.S Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/hvm/svm/entry.S Wed Aug 29 14:39:57 2012 -0700 @@ -59,12 +59,23 @@ get_current(bx) CLGI +#ifdef XEN_KDB_CONFIG +#if defined(__x86_64__) + testl $1, kdb_session_begun(%rip) +#else + testl $1, kdb_session_begun +#endif + jnz .Lkdb_skip_softirq +#endif mov VCPU_processor(r(bx)),%eax shl $IRQSTAT_shift,r(ax) lea addr_of(irq_stat),r(dx) testl $~0,(r(dx),r(ax),1) jnz .Lsvm_process_softirqs +#ifdef XEN_KDB_CONFIG +.Lkdb_skip_softirq: +#endif testb $0, VCPU_nsvm_hap_enabled(r(bx)) UNLIKELY_START(nz, nsvm_hap) mov VCPU_nhvm_p2m(r(bx)),r(ax) diff -r 32034d1914a6 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Aug 29 14:39:57 2012 -0700 @@ -2170,6 +2170,10 @@ break; case VMEXIT_EXCEPTION_DB: +#ifdef XEN_KDB_CONFIG + if (kdb_handle_trap_entry(TRAP_debug, regs)) + break; +#endif if ( !v->domain->debugger_attached ) goto exit_and_crash; domain_pause_for_debugger(); @@ -2182,6 +2186,10 @@ if ( (inst_len = __get_instruction_length(v, INSTR_INT3)) == 0 ) break; __update_guest_eip(regs, inst_len); +#ifdef XEN_KDB_CONFIG + if (kdb_handle_trap_entry(TRAP_int3, regs)) + break; +#endif current->arch.gdbsx_vcpu_event = TRAP_int3; domain_pause_for_debugger(); break; diff -r 32034d1914a6 xen/arch/x86/hvm/svm/vmcb.c --- a/xen/arch/x86/hvm/svm/vmcb.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Aug 29 14:39:57 2012 -0700 @@ -315,6 +315,36 @@ register_keyhandler('v', &vmcb_dump_keyhandler); } +#if defined(XEN_KDB_CONFIG) +/* did == 0 : display for all HVM domains. domid 0 is never HVM. + * * vid == -1 : display for all HVM VCPUs + * */ +void kdb_dump_vmcb(domid_t did, int vid) +{ + struct domain *dp; + struct vcpu *vp; + + rcu_read_lock(&domlist_read_lock); + for_each_domain (dp) { + if (!is_hvm_or_hyb_domain(dp) || dp->is_dying) + continue; + if (did != 0 && did != dp->domain_id) + continue; + + for_each_vcpu (dp, vp) { + if (vid != -1 && vid != vp->vcpu_id) + continue; + + kdbp(" VMCB [domid: %d vcpu:%d]:\n", dp->domain_id, vp->vcpu_id); + svm_vmcb_dump("kdb", vp->arch.hvm_svm.vmcb); + kdbp("\n"); + } + kdbp("\n"); + } + rcu_read_unlock(&domlist_read_lock); +} +#endif + /* * Local variables: * mode: C diff -r 32034d1914a6 xen/arch/x86/hvm/vmx/entry.S --- a/xen/arch/x86/hvm/vmx/entry.S Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/hvm/vmx/entry.S Wed Aug 29 14:39:57 2012 -0700 @@ -124,12 +124,23 @@ get_current(bx) cli +#ifdef XEN_KDB_CONFIG +#if defined(__x86_64__) + testl $1, kdb_session_begun(%rip) +#else + testl $1, kdb_session_begun +#endif + jnz .Lkdb_skip_softirq +#endif mov VCPU_processor(r(bx)),%eax shl $IRQSTAT_shift,r(ax) lea addr_of(irq_stat),r(dx) cmpl $0,(r(dx),r(ax),1) jnz .Lvmx_process_softirqs +#ifdef XEN_KDB_CONFIG +.Lkdb_skip_softirq: +#endif testb $0xff,VCPU_vmx_emulate(r(bx)) jnz .Lvmx_goto_emulator testb $0xff,VCPU_vmx_realmode(r(bx)) diff -r 32034d1914a6 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Aug 29 14:39:57 2012 -0700 @@ -1117,6 +1117,13 @@ hvm_asid_flush_vcpu(v); } +#if defined(XEN_KDB_CONFIG) + if (kdb_dr7) + __vmwrite(GUEST_DR7, kdb_dr7); + else + __vmwrite(GUEST_DR7, 0); +#endif + debug_state = v->domain->debugger_attached || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3] || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP]; @@ -1326,6 +1333,220 @@ register_keyhandler('v', &vmcs_dump_keyhandler); } +#if defined(XEN_KDB_CONFIG) +#define GUEST_EFER 0x2806 /* see page 23-20 */ +#define GUEST_EFER_HIGH 0x2807 /* see page 23-20 */ + +/* it's a shame we can't use vmcs_dump_vcpu(), but it does vmx_vmcs_enter which + * will IPI other CPUs. also, print a subset relevant to software debugging */ +static void noinline kdb_print_vmcs(struct vcpu *vp) +{ + struct cpu_user_regs *regs = &vp->arch.user_regs; + unsigned long long x; + + kdbp("*** Guest State ***\n"); + kdbp("CR0: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n", + (unsigned long long)vmr(GUEST_CR0), + (unsigned long long)vmr(CR0_READ_SHADOW), + (unsigned long long)vmr(CR0_GUEST_HOST_MASK)); + kdbp("CR4: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n", + (unsigned long long)vmr(GUEST_CR4), + (unsigned long long)vmr(CR4_READ_SHADOW), + (unsigned long long)vmr(CR4_GUEST_HOST_MASK)); + kdbp("CR3: actual=0x%016llx, target_count=%d\n", + (unsigned long long)vmr(GUEST_CR3), + (int)vmr(CR3_TARGET_COUNT)); + kdbp(" target0=%016llx, target1=%016llx\n", + (unsigned long long)vmr(CR3_TARGET_VALUE0), + (unsigned long long)vmr(CR3_TARGET_VALUE1)); + kdbp(" target2=%016llx, target3=%016llx\n", + (unsigned long long)vmr(CR3_TARGET_VALUE2), + (unsigned long long)vmr(CR3_TARGET_VALUE3)); + kdbp("RSP = 0x%016llx (0x%016llx) RIP = 0x%016llx (0x%016llx)\n", + (unsigned long long)vmr(GUEST_RSP), + (unsigned long long)regs->esp, + (unsigned long long)vmr(GUEST_RIP), + (unsigned long long)regs->eip); + kdbp("RFLAGS=0x%016llx (0x%016llx) DR7 = 0x%016llx\n", + (unsigned long long)vmr(GUEST_RFLAGS), + (unsigned long long)regs->eflags, + (unsigned long long)vmr(GUEST_DR7)); + kdbp("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n", + (unsigned long long)vmr(GUEST_SYSENTER_ESP), + (int)vmr(GUEST_SYSENTER_CS), + (unsigned long long)vmr(GUEST_SYSENTER_EIP)); + vmx_dump_sel("CS", GUEST_CS_SELECTOR); + vmx_dump_sel("DS", GUEST_DS_SELECTOR); + vmx_dump_sel("SS", GUEST_SS_SELECTOR); + vmx_dump_sel("ES", GUEST_ES_SELECTOR); + vmx_dump_sel("FS", GUEST_FS_SELECTOR); + vmx_dump_sel("GS", GUEST_GS_SELECTOR); + vmx_dump_sel2("GDTR", GUEST_GDTR_LIMIT); + vmx_dump_sel("LDTR", GUEST_LDTR_SELECTOR); + vmx_dump_sel2("IDTR", GUEST_IDTR_LIMIT); + vmx_dump_sel("TR", GUEST_TR_SELECTOR); + kdbp("Guest EFER = 0x%08x%08x\n", + (uint32_t)vmr(GUEST_EFER_HIGH), (uint32_t)vmr(GUEST_EFER)); + kdbp("Guest PAT = 0x%08x%08x\n", + (uint32_t)vmr(GUEST_PAT_HIGH), (uint32_t)vmr(GUEST_PAT)); + x = (unsigned long long)vmr(TSC_OFFSET_HIGH) << 32; + x |= (uint32_t)vmr(TSC_OFFSET); + kdbp("TSC Offset = %016llx\n", x); + x = (unsigned long long)vmr(GUEST_IA32_DEBUGCTL_HIGH) << 32; + x |= (uint32_t)vmr(GUEST_IA32_DEBUGCTL); + kdbp("DebugCtl=%016llx DebugExceptions=%016llx\n", x, + (unsigned long long)vmr(GUEST_PENDING_DBG_EXCEPTIONS)); + kdbp("Interruptibility=%04x ActivityState=%04x\n", + (int)vmr(GUEST_INTERRUPTIBILITY_INFO), + (int)vmr(GUEST_ACTIVITY_STATE)); + + kdbp("MSRs: entry_load:$%d exit_load:$%d exit_store:$%d\n", + vmr(VM_ENTRY_MSR_LOAD_COUNT), vmr(VM_EXIT_MSR_LOAD_COUNT), + vmr(VM_EXIT_MSR_STORE_COUNT)); + + kdbp("\n*** Host State ***\n"); + kdbp("RSP = 0x%016llx RIP = 0x%016llx\n", + (unsigned long long)vmr(HOST_RSP), + (unsigned long long)vmr(HOST_RIP)); + kdbp("CS=%04x DS=%04x ES=%04x FS=%04x GS=%04x SS=%04x TR=%04x\n", + (uint16_t)vmr(HOST_CS_SELECTOR), + (uint16_t)vmr(HOST_DS_SELECTOR), + (uint16_t)vmr(HOST_ES_SELECTOR), + (uint16_t)vmr(HOST_FS_SELECTOR), + (uint16_t)vmr(HOST_GS_SELECTOR), + (uint16_t)vmr(HOST_SS_SELECTOR), + (uint16_t)vmr(HOST_TR_SELECTOR)); + kdbp("FSBase=%016llx GSBase=%016llx TRBase=%016llx\n", + (unsigned long long)vmr(HOST_FS_BASE), + (unsigned long long)vmr(HOST_GS_BASE), + (unsigned long long)vmr(HOST_TR_BASE)); + kdbp("GDTBase=%016llx IDTBase=%016llx\n", + (unsigned long long)vmr(HOST_GDTR_BASE), + (unsigned long long)vmr(HOST_IDTR_BASE)); + kdbp("CR0=%016llx CR3=%016llx CR4=%016llx\n", + (unsigned long long)vmr(HOST_CR0), + (unsigned long long)vmr(HOST_CR3), + (unsigned long long)vmr(HOST_CR4)); + kdbp("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n", + (unsigned long long)vmr(HOST_SYSENTER_ESP), + (int)vmr(HOST_SYSENTER_CS), + (unsigned long long)vmr(HOST_SYSENTER_EIP)); + kdbp("Host PAT = 0x%08x%08x\n", + (uint32_t)vmr(HOST_PAT_HIGH), (uint32_t)vmr(HOST_PAT)); + + kdbp("\n*** Control State ***\n"); + kdbp("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", + (uint32_t)vmr(PIN_BASED_VM_EXEC_CONTROL), + (uint32_t)vmr(CPU_BASED_VM_EXEC_CONTROL), + (uint32_t)vmr(SECONDARY_VM_EXEC_CONTROL)); + kdbp("EntryControls=%08x ExitControls=%08x\n", + (uint32_t)vmr(VM_ENTRY_CONTROLS), + (uint32_t)vmr(VM_EXIT_CONTROLS)); + kdbp("ExceptionBitmap=%08x\n", + (uint32_t)vmr(EXCEPTION_BITMAP)); + kdbp("PAGE_FAULT_ERROR_CODE MASK:0x%lx MATCH:0x%lx\n", + (unsigned long)vmr(PAGE_FAULT_ERROR_CODE_MASK), + (unsigned long)vmr(PAGE_FAULT_ERROR_CODE_MATCH)); + kdbp("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", + (uint32_t)vmr(VM_ENTRY_INTR_INFO), + (uint32_t)vmr(VM_ENTRY_EXCEPTION_ERROR_CODE), + (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN)); + kdbp("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", + (uint32_t)vmr(VM_EXIT_INTR_INFO), + (uint32_t)vmr(VM_EXIT_INTR_ERROR_CODE), + (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN)); + kdbp(" reason=%08x qualification=%08x\n", + (uint32_t)vmr(VM_EXIT_REASON), + (uint32_t)vmr(EXIT_QUALIFICATION)); + kdbp("IDTVectoring: info=%08x errcode=%08x\n", + (uint32_t)vmr(IDT_VECTORING_INFO), + (uint32_t)vmr(IDT_VECTORING_ERROR_CODE)); + kdbp("TPR Threshold = 0x%02x\n", + (uint32_t)vmr(TPR_THRESHOLD)); + kdbp("EPT pointer = 0x%08x%08x\n", + (uint32_t)vmr(EPT_POINTER_HIGH), (uint32_t)vmr(EPT_POINTER)); + kdbp("Virtual processor ID = 0x%04x\n", + (uint32_t)vmr(VIRTUAL_PROCESSOR_ID)); + kdbp("=================================================================\n"); +} + +/* Flush VMCS on this cpu if it needs to: + * - Upon leaving kdb, the HVM cpu will resume in vmx_vmexit_handler() and + * do __vmreads. So, the VMCS pointer can't be left cleared. + * - Doing __vmpclear will set the vmx state to 'clear', so to resume a + * vmlaunch must be done and not vmresume. This means, we must clear + * arch_vmx->launched. + */ +void kdb_curr_cpu_flush_vmcs(void) +{ + struct domain *dp; + struct vcpu *vp; + int ccpu = smp_processor_id(); + struct vmcs_struct *cvp = this_cpu(current_vmcs); + + if (this_cpu(current_vmcs) == NULL) + return; /* no HVM active on this CPU */ + + kdbp("KDB:[%d] curvmcs:%lx/%lx\n", ccpu, cvp, virt_to_maddr(cvp)); + + /* looks like we got one. unfortunately, current_vmcs points to vmcs + * and not VCPU, so we gotta search the entire list... */ + for_each_domain (dp) { + if ( !(is_hvm_or_hyb_domain(dp)) || dp->is_dying) + continue; + for_each_vcpu (dp, vp) { + if ( vp->arch.hvm_vmx.vmcs == cvp ) { + __vmpclear(virt_to_maddr(vp->arch.hvm_vmx.vmcs)); + __vmptrld(virt_to_maddr(vp->arch.hvm_vmx.vmcs)); + vp->arch.hvm_vmx.launched = 0; + this_cpu(current_vmcs) = NULL; + kdbp("KDB:[%d] %d:%d current_vmcs:%lx flushed\n", + ccpu, dp->domain_id, vp->vcpu_id, cvp, virt_to_maddr(cvp)); + } + } + } +} + +/* + * domid == 0 : display for all HVM domains (dom0 is never an HVM domain) + * vcpu id == -1 : display all vcpuids + * PreCondition: all HVM cpus (including current cpu) have flushed VMCS + */ +void kdb_dump_vmcs(domid_t did, int vid) +{ + struct domain *dp; + struct vcpu *vp; + struct vmcs_struct *vmcsp; + u64 addr = -1; + + ASSERT(!local_irq_is_enabled()); /* kdb should always run disabled */ + __vmptrst(&addr); + + for_each_domain (dp) { + if ( !(is_hvm_or_hyb_domain(dp)) || dp->is_dying) + continue; + if (did != 0 && did != dp->domain_id) + continue; + + for_each_vcpu (dp, vp) { + if (vid != -1 && vid != vp->vcpu_id) + continue; + + vmcsp = vp->arch.hvm_vmx.vmcs; + kdbp("VMCS %lx/%lx [domid:%d (%p) vcpu:%d (%p)]:\n", vmcsp, + virt_to_maddr(vmcsp), dp->domain_id, dp, vp->vcpu_id, vp); + __vmptrld(virt_to_maddr(vmcsp)); + kdb_print_vmcs(vp); + __vmpclear(virt_to_maddr(vmcsp)); + vp->arch.hvm_vmx.launched = 0; + } + kdbp("\n"); + } + /* restore orig vmcs pointer for __vmreads in vmx_vmexit_handler() */ + if (addr && addr != (u64)-1) + __vmptrld(addr); +} +#endif /* * Local variables: diff -r 32034d1914a6 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Aug 29 14:39:57 2012 -0700 @@ -2183,11 +2183,14 @@ printk("reason not known yet!"); break; } - +#if defined(XEN_KDB_CONFIG) + kdbp("\n************* VMCS Area **************\n"); + kdb_dump_vmcs(curr->domain->domain_id, (curr)->vcpu_id); +#else printk("************* VMCS Area **************\n"); vmcs_dump_vcpu(curr); printk("**************************************\n"); - +#endif domain_crash(curr->domain); } @@ -2415,6 +2418,12 @@ write_debugreg(6, exit_qualification | 0xffff0ff0); if ( !v->domain->debugger_attached || cpu_has_monitor_trap_flag ) goto exit_and_crash; + +#if defined(XEN_KDB_CONFIG) + /* TRAP_debug: IP points correctly to next instr */ + if (kdb_handle_trap_entry(vector, regs)) + break; +#endif domain_pause_for_debugger(); break; case TRAP_int3: @@ -2423,6 +2432,13 @@ if ( v->domain->debugger_attached ) { update_guest_eip(); /* Safe: INT3 */ +#if defined(XEN_KDB_CONFIG) + /* vmcs.IP points to bp, kdb expects bp+1. Hence after the above + * update_guest_eip which updates to bp+1. works for gdbsx too + */ + if (kdb_handle_trap_entry(vector, regs)) + break; +#endif current->arch.gdbsx_vcpu_event = TRAP_int3; domain_pause_for_debugger(); break; @@ -2707,6 +2723,10 @@ case EXIT_REASON_MONITOR_TRAP_FLAG: v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG; vmx_update_cpu_exec_control(v); +#if defined(XEN_KDB_CONFIG) + if (kdb_handle_trap_entry(TRAP_debug, regs)) + break; +#endif if ( v->arch.hvm_vcpu.single_step ) { hvm_memory_event_single_step(regs->eip); if ( v->domain->debugger_attached ) diff -r 32034d1914a6 xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/irq.c Wed Aug 29 14:39:57 2012 -0700 @@ -2305,3 +2305,29 @@ return is_hvm_domain(d) && pirq && pirq->arch.hvm.emuirq != IRQ_UNBOUND; } + +#ifdef XEN_KDB_CONFIG +void kdb_prnt_guest_mapped_irqs(void) +{ + int irq, j; + char affstr[NR_CPUS/4+NR_CPUS/32+2]; /* courtesy dump_irqs() */ + + kdbp("irq vec aff type domid:mapped-pirq pairs (all in decimal)\n"); + for (irq=0; irq < nr_irqs; irq++) { + irq_desc_t *dp = irq_to_desc(irq); + struct arch_irq_desc *archp = &dp->arch; + irq_guest_action_t *actp = (irq_guest_action_t *)dp->action; + + if (!dp->handler ||dp->handler==&no_irq_type || !(dp->status&IRQ_GUEST)) + continue; + + cpumask_scnprintf(affstr, sizeof(affstr), dp->affinity); + kdbp("[%3ld] %3d %3s %-13s ", irq, archp->vector, affstr, + dp->handler->typename); + for (j=0; j < actp->nr_guests; j++) + kdbp("%03d:%04d ", actp->guest[j]->domain_id, + domain_irq_to_pirq(actp->guest[j], irq)); + kdbp("\n"); + } +} +#endif diff -r 32034d1914a6 xen/arch/x86/setup.c --- a/xen/arch/x86/setup.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/setup.c Wed Aug 29 14:39:57 2012 -0700 @@ -47,6 +47,13 @@ #include <xen/cpu.h> #include <asm/nmi.h> +#ifdef XEN_KDB_CONFIG +#include <asm/debugger.h> + +int opt_earlykdb=0; +boolean_param("earlykdb", opt_earlykdb); +#endif + /* opt_nosmp: If true, secondary processors are ignored. */ static bool_t __initdata opt_nosmp; boolean_param("nosmp", opt_nosmp); @@ -1242,6 +1249,11 @@ trap_init(); +#ifdef XEN_KDB_CONFIG + kdb_init(); + if (opt_earlykdb) + kdb_trap_immed(KDB_TRAP_NONFATAL); +#endif rcu_init(); early_time_init(); diff -r 32034d1914a6 xen/arch/x86/smp.c --- a/xen/arch/x86/smp.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/smp.c Wed Aug 29 14:39:57 2012 -0700 @@ -273,7 +273,7 @@ * Structure and data for smp_call_function()/on_selected_cpus(). */ -static void __smp_call_function_interrupt(void); +static void __smp_call_function_interrupt(struct cpu_user_regs *regs); static DEFINE_SPINLOCK(call_lock); static struct call_data_struct { void (*func) (void *info); @@ -321,7 +321,7 @@ if ( cpumask_test_cpu(smp_processor_id(), &call_data.selected) ) { local_irq_disable(); - __smp_call_function_interrupt(); + __smp_call_function_interrupt(NULL); local_irq_enable(); } @@ -390,7 +390,7 @@ this_cpu(irq_count)++; } -static void __smp_call_function_interrupt(void) +static void __smp_call_function_interrupt(struct cpu_user_regs *regs) { void (*func)(void *info) = call_data.func; void *info = call_data.info; @@ -411,6 +411,11 @@ { mb(); cpumask_clear_cpu(cpu, &call_data.selected); +#ifdef XEN_KDB_CONFIG + if (info && !strcmp(info, "XENKDB")) { /* called from kdb */ + (*(void (*)(struct cpu_user_regs *, void *))func)(regs, info); + } else +#endif (*func)(info); } @@ -421,5 +426,5 @@ { ack_APIC_irq(); perfc_incr(ipis); - __smp_call_function_interrupt(); + __smp_call_function_interrupt(regs); } diff -r 32034d1914a6 xen/arch/x86/time.c --- a/xen/arch/x86/time.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/time.c Wed Aug 29 14:39:57 2012 -0700 @@ -2007,6 +2007,46 @@ } __initcall(setup_dump_softtsc); +#ifdef XEN_KDB_CONFIG +void kdb_time_resume(int update_domains) +{ + s_time_t now; + int ccpu = smp_processor_id(); + struct cpu_time *t = &this_cpu(cpu_time); + + if (!plt_src.read_counter) /* not initialized for earlykdb */ + return; + + if (update_domains) { + plt_stamp = plt_src.read_counter(); + platform_timer_stamp = plt_stamp64; + platform_time_calibration(); + do_settime(get_cmos_time(), 0, read_platform_stime()); + } + if (local_irq_is_enabled()) + kdbp("kdb BUG: enabled in time_resume(). ccpu:%d\n", ccpu); + + rdtscll(t->local_tsc_stamp); + now = read_platform_stime(); + t->stime_master_stamp = now; + t->stime_local_stamp = now; + + update_vcpu_system_time(current); + + if (update_domains) + set_timer(&calibration_timer, NOW() + EPOCH); +} + +void kdb_dump_time_pcpu(void) +{ + int cpu; + for_each_online_cpu(cpu) { + kdbp("[%d]: cpu_time: %016lx\n", cpu, &per_cpu(cpu_time, cpu)); + kdbp("[%d]: cpu_calibration: %016lx\n", cpu, + &per_cpu(cpu_calibration, cpu)); + } +} +#endif /* * Local variables: * mode: C diff -r 32034d1914a6 xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/traps.c Wed Aug 29 14:39:57 2012 -0700 @@ -225,7 +225,7 @@ #else -static void show_trace(struct cpu_user_regs *regs) +void show_trace(struct cpu_user_regs *regs) { unsigned long *frame, next, addr, low, high; @@ -3326,6 +3326,10 @@ if ( nmi_callback(regs, cpu) ) return; +#ifdef XEN_KDB_CONFIG + if (kdb_enabled && kdb_handle_trap_entry(TRAP_nmi, regs)) + return; +#endif if ( nmi_watchdog ) nmi_watchdog_tick(regs); diff -r 32034d1914a6 xen/arch/x86/x86_64/compat/entry.S --- a/xen/arch/x86/x86_64/compat/entry.S Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/x86_64/compat/entry.S Wed Aug 29 14:39:57 2012 -0700 @@ -95,6 +95,10 @@ /* %rbx: struct vcpu */ ENTRY(compat_test_all_events) cli # tests must not race interrupts +#ifdef XEN_KDB_CONFIG + testl $1, kdb_session_begun(%rip) + jnz compat_restore_all_guest +#endif /*compat_test_softirqs:*/ movl VCPU_processor(%rbx),%eax shlq $IRQSTAT_shift,%rax diff -r 32034d1914a6 xen/arch/x86/x86_64/entry.S --- a/xen/arch/x86/x86_64/entry.S Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/arch/x86/x86_64/entry.S Wed Aug 29 14:39:57 2012 -0700 @@ -184,6 +184,10 @@ /* %rbx: struct vcpu */ test_all_events: cli # tests must not race interrupts +#ifdef XEN_KDB_CONFIG /* 64bit dom0 will resume here */ + testl $1, kdb_session_begun(%rip) + jnz restore_all_guest +#endif /*test_softirqs:*/ movl VCPU_processor(%rbx),%eax shl $IRQSTAT_shift,%rax @@ -546,6 +550,13 @@ ENTRY(int3) pushq $0 +#ifdef XEN_KDB_CONFIG + pushq %rax + GET_CPUINFO_FIELD(CPUINFO_processor_id, %rax) + movq (%rax), %rax + lock bts %rax, kdb_cpu_traps(%rip) + popq %rax +#endif movl $TRAP_int3,4(%rsp) jmp handle_exception diff -r 32034d1914a6 xen/common/domain.c --- a/xen/common/domain.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/common/domain.c Wed Aug 29 14:39:57 2012 -0700 @@ -530,6 +530,14 @@ { struct vcpu *v; +#ifdef XEN_KDB_CONFIG + if (reason == SHUTDOWN_crash) { + if ( IS_PRIV(d) ) + kdb_trap_immed(KDB_TRAP_FATAL); + else + kdb_trap_immed(KDB_TRAP_NONFATAL); + } +#endif spin_lock(&d->shutdown_lock); if ( d->shutdown_code == -1 ) @@ -624,7 +632,9 @@ for_each_vcpu ( d, v ) vcpu_sleep_nosync(v); - send_global_virq(VIRQ_DEBUGGER); + /* send VIRQ_DEBUGGER to guest only if gdbsx_vcpu_event is not active */ + if (current->arch.gdbsx_vcpu_event == 0) + send_global_virq(VIRQ_DEBUGGER); } /* Complete domain destroy after RCU readers are not holding old references. */ diff -r 32034d1914a6 xen/common/sched_credit.c --- a/xen/common/sched_credit.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/common/sched_credit.c Wed Aug 29 14:39:57 2012 -0700 @@ -1475,6 +1475,33 @@ printk("\n"); } +#ifdef XEN_KDB_CONFIG +static void kdb_csched_dump(int cpu) +{ + struct csched_pcpu *pcpup = CSCHED_PCPU(cpu); + struct vcpu *scurrvp = (CSCHED_VCPU(current))->vcpu; + struct list_head *tmp, *runq = RUNQ(cpu); + + kdbp(" csched_pcpu: %p\n", pcpup); + kdbp(" curr csched:%p {vcpu:%p id:%d domid:%d}\n", (current)->sched_priv, + scurrvp, scurrvp->vcpu_id, scurrvp->domain->domain_id); + kdbp(" runq:\n"); + + /* next is top of struct, so screw stupid, ugly hard to follow macros */ + if (offsetof(struct csched_vcpu, runq_elem.next) != 0) { + kdbp("next is not first in struct csched_vcpu. please fixme\n"); + return; /* otherwise for loop will crash */ + } + for (tmp = runq->next; tmp != runq; tmp = tmp->next) { + + struct csched_vcpu *csp = (struct csched_vcpu *)tmp; + struct vcpu *vp = csp->vcpu; + kdbp(" csp:%p pri:%02d vcpu: {p:%p id:%d domid:%d}\n", csp, + csp->pri, vp, vp->vcpu_id, vp->domain->domain_id); + }; +} +#endif + static void csched_dump_pcpu(const struct scheduler *ops, int cpu) { @@ -1484,6 +1511,10 @@ int loop; #define cpustr keyhandler_scratch +#ifdef XEN_KDB_CONFIG + kdb_csched_dump(cpu); + return; +#endif spc = CSCHED_PCPU(cpu); runq = &spc->runq; diff -r 32034d1914a6 xen/common/schedule.c --- a/xen/common/schedule.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/common/schedule.c Wed Aug 29 14:39:57 2012 -0700 @@ -1454,6 +1454,25 @@ schedule(); } +#ifdef XEN_KDB_CONFIG +void kdb_print_sched_info(void) +{ + int cpu; + + kdbp("Scheduler: name:%s opt_name:%s id:%d\n", ops.name, ops.opt_name, + ops.sched_id); + kdbp("per cpu schedule_data:\n"); + for_each_online_cpu(cpu) { + struct schedule_data *p = &per_cpu(schedule_data, cpu); + kdbp(" cpu:%d &(per cpu)schedule_data:%p\n", cpu, p); + kdbp(" curr:%p sched_priv:%p\n", p->curr, p->sched_priv); + kdbp("\n"); + ops.dump_cpu_state(&ops, cpu); + kdbp("\n"); + } +} +#endif + #ifdef CONFIG_COMPAT #include "compat/schedule.c" #endif diff -r 32034d1914a6 xen/common/symbols.c --- a/xen/common/symbols.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/common/symbols.c Wed Aug 29 14:39:57 2012 -0700 @@ -168,3 +168,21 @@ spin_unlock_irqrestore(&lock, flags); } + +#ifdef XEN_KDB_CONFIG +/* + * * Given a symbol, return its address + * */ +unsigned long address_lookup(char *symp) +{ + int i, off = 0; + char namebuf[KSYM_NAME_LEN+1]; + + for (i=0; i < symbols_num_syms; i++) { + off = symbols_expand_symbol(off, namebuf); + if (strcmp(namebuf, symp) == 0) /* found it */ + return symbols_address(i); + } + return 0; +} +#endif diff -r 32034d1914a6 xen/common/timer.c --- a/xen/common/timer.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/common/timer.c Wed Aug 29 14:39:57 2012 -0700 @@ -643,6 +643,48 @@ register_keyhandler('a', &dump_timerq_keyhandler); } +#ifdef XEN_KDB_CONFIG +#include <xen/symbols.h> +void kdb_dump_timer_queues(void) +{ + struct timer *t; + struct timers *ts; + unsigned long sz, offs; + char buf[KSYM_NAME_LEN+1]; + int cpu, j; + u64 tsc; + + for_each_online_cpu( cpu ) + { + ts = &per_cpu(timers, cpu); + kdbp("CPU[%02d]:", cpu); + + if (cpu == smp_processor_id()) { + s_time_t now = NOW(); + rdtscll(tsc); + kdbp("NOW:0x%08x%08x TSC:0x%016lx\n", (u32)(now>>32),(u32)now, tsc); + } else + kdbp("\n"); + + /* timers in the heap */ + for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ ) { + t = ts->heap[j]; + kdbp(" %d: exp=0x%08x%08x fn:%s data:%p\n", + j, (u32)(t->expires>>32), (u32)t->expires, + symbols_lookup((unsigned long)t->function, &sz, &offs, buf), + t->data); + } + /* timers on the link list */ + for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ ) { + kdbp(" L%d: exp=0x%08x%08x fn:%s data:%p\n", + j, (u32)(t->expires>>32), (u32)t->expires, + symbols_lookup((unsigned long)t->function, &sz, &offs, buf), + t->data); + } + } +} +#endif + /* * Local variables: * mode: C diff -r 32034d1914a6 xen/drivers/char/console.c --- a/xen/drivers/char/console.c Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/drivers/char/console.c Wed Aug 29 14:39:57 2012 -0700 @@ -295,6 +295,21 @@ { static int switch_code_count = 0; +#ifdef XEN_KDB_CONFIG + /* if ctrl-\ pressed and kdb handles it, return */ + if (kdb_enabled && c == 0x1c) { + if (!kdb_session_begun) { + if (kdb_keyboard(regs)) + return; + } else { + kdbp("Sorry... kdb session already active.. please try again..\n"); + return; + } + } + if (kdb_session_begun) /* kdb should already be polling */ + return; /* swallow chars so they don't buffer in dom0 */ +#endif + if ( switch_code && (c == switch_code) ) { /* We eat CTRL-<switch_char> in groups of 3 to switch console input. */ @@ -710,6 +725,18 @@ atomic_dec(&print_everything); } +#ifdef XEN_KDB_CONFIG +void console_putc(char c) +{ + serial_putc(sercon_handle, c); +} + +int console_getc(void) +{ + return serial_getc(sercon_handle); +} +#endif + /* * printk rate limiting, lifted from Linux. * diff -r 32034d1914a6 xen/include/asm-x86/debugger.h --- a/xen/include/asm-x86/debugger.h Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/include/asm-x86/debugger.h Wed Aug 29 14:39:57 2012 -0700 @@ -39,7 +39,11 @@ #define DEBUGGER_trap_fatal(_v, _r) \ if ( debugger_trap_fatal(_v, _r) ) return; -#if defined(CRASH_DEBUG) +#if defined(XEN_KDB_CONFIG) +#define debugger_trap_immediate() kdb_trap_immed(KDB_TRAP_NONFATAL) +#define debugger_trap_fatal(_v, _r) kdb_trap_fatal(_v, _r) + +#elif defined(CRASH_DEBUG) #include <xen/gdbstub.h> @@ -70,6 +74,10 @@ { struct vcpu *v = current; +#ifdef XEN_KDB_CONFIG + if (kdb_handle_trap_entry(vector, regs)) + return 1; +#endif if ( guest_kernel_mode(v, regs) && v->domain->debugger_attached && ((vector == TRAP_int3) || (vector == TRAP_debug)) ) { diff -r 32034d1914a6 xen/include/xen/lib.h --- a/xen/include/xen/lib.h Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/include/xen/lib.h Wed Aug 29 14:39:57 2012 -0700 @@ -116,4 +116,7 @@ struct cpu_user_regs; void dump_execstate(struct cpu_user_regs *); +#ifdef XEN_KDB_CONFIG +#include "../../kdb/include/kdb_extern.h" +#endif #endif /* __LIB_H__ */ diff -r 32034d1914a6 xen/include/xen/sched.h --- a/xen/include/xen/sched.h Thu Jun 07 19:46:57 2012 +0100 +++ b/xen/include/xen/sched.h Wed Aug 29 14:39:57 2012 -0700 @@ -576,11 +576,14 @@ unsigned long hypercall_create_continuation( unsigned int op, const char *format, ...); void hypercall_cancel_continuation(void); - +#ifdef XEN_KDB_CONFIG +#define hypercall_preempt_check() (0) +#else #define hypercall_preempt_check() (unlikely( \ softirq_pending(smp_processor_id()) | \ local_events_need_delivery() \ )) +#endif extern struct domain *domain_list; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |