[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 0 of 6] [RFC] Use wait queues for paging, v2
> > On Feb 27, 2012, at 11:51 AM, Olaf Hering wrote: > >> On Sun, Feb 26, Olaf Hering wrote: >> >>> On Thu, Feb 23, Tim Deegan wrote: >>> >>>> This is v2 of the patch I posted last week, after feedback from >>>> Andres. >>> >>> Tried this series, but processes in dom0 started to hang in D state >>> when a paged guest is started. I will see if I can spot the error. >> >> This change for patch #5 is needed, especially the first part. >> Now it appears to work. >> >> Olaf Unfortunately, I get all kinds of borking on Win7 domains with Citrix PV drivers 6.0. So it's a no-go from my end for now. The pv drivers are calling memops on pages that are paged out, ka-boom. I applied Tim's 6 patch series plus Olaf's fix. Xen WARNs below. Andres (XEN) Xen WARN at p2m.c:200 (XEN) ----[ Xen-4.2-unstable x86_64 debug=y Not tainted ]---- (XEN) CPU: 4 (XEN) RIP: e008:[<ffff82c4801e08e2>] __get_gfn_type_access+0x230/0x3ee (XEN) RFLAGS: 0000000000010202 CONTEXT: hypervisor (XEN) rax: 0000000000000001 rbx: 000000000000000b rcx: ffff83083ffaff18 (XEN) rdx: 00000043bfcac680 rsi: 0000000000000000 rdi: 000000183f775000 (XEN) rbp: ffff83083ffaf9a8 rsp: ffff83083ffaf928 r8: 0000000000000001 (XEN) r9: 0000000000000000 r10: 000000000000000c r11: fffff88002fe3b0e (XEN) r12: ffff83183f7a1bb0 r13: ffff83083ffaf9dc r14: 0000000000000000 (XEN) r15: 00000000000033c0 cr0: 000000008005003b cr4: 00000000000026f0 (XEN) cr3: 000000183f793000 cr2: 000000007724f4df (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008 (XEN) Xen stack trace from rsp=ffff83083ffaf928: (XEN) ffff83083ffaff18 ffff83083ffaff18 0000000000000000 000000018012635a (XEN) ffff83083ffaf9d8 ffff83083ffaff18 ffff82c48030a760 ffff82c48030a760 (XEN) ffff82c48030a760 ffff82c48030a760 01ff82c4801264b3 0000000000000000 (XEN) ffff83183f7a3000 ffff83083ffafa28 0000000000000000 000000000183f7b9 (XEN) ffff83083ffafa08 ffff82c480170b91 ffff830000000001 ffff83183f7a3010 (XEN) 0000000000000000 ffff82c4801b3d94 0000000b00000007 ffff83183f7a3000 (XEN) fffff88002fe3b80 fffff88002fe3b80 ffff8300bf2ea000 ffff82c4801b5344 (XEN) ffff83083ffafa88 ffff82c480170e4b 8b13687800000000 7d4d94af0000009f (XEN) 0000000100007ff0 0000000000000000 00000000000033c0 ffff82c48015a3ff (XEN) 0000000000000202 ffff83183f7a3000 ffff82c4801265c1 0000000000000008 (XEN) 0000000000000007 fffff88002fe3b80 ffff8300bf2ea000 ffff82c4801b5344 (XEN) ffff83083ffafc78 ffff82c480114c72 0000000000000286 ffff83083ffafab8 (XEN) ffff82c4801265c1 ffff83183f6a19a0 ffff83083ffafae8 ffff82c48012daa1 (XEN) ffff83083ffafb18 0000000000000040 ffff83183f6a1990 ffff83183f7a3000 (XEN) ffff83083ffafb18 ffff82c48012dbc5 0000000000000040 ffff83083ffeffd0 (XEN) ffff83183f7a3000 ffff82c4803099c0 ffff83083ffafb68 ffff82c480105e63 (XEN) 0000000000000003 0000000000000000 ffff83183f6a19d0 ffff83083ffb6048 (XEN) ffff83183f7a1bb0 0000000000000001 0000000000000001 0000000000000001 (XEN) ffff83083ffafbf8 ffff82c4801e6f0d 00000000000000ff 0000000300000000 (XEN) ffff83083ffafbc8 ffff83083ffafbc0 0000000000000000 ffff83083ffafcd8 (XEN) Xen call trace: (XEN) [<ffff82c4801e08e2>] __get_gfn_type_access+0x230/0x3ee (XEN) [<ffff82c480170b91>] xenmem_add_to_physmap_once+0x610/0x773 (XEN) [<ffff82c480170e4b>] arch_memory_op+0x157/0xc7a (XEN) [<ffff82c480114c72>] do_memory_op+0x1d60/0x1dce (XEN) [<ffff82c4801b5409>] hvm_memory_op+0x62/0x6a (XEN) [<ffff82c4801b84f1>] hvm_do_hypercall+0x1af/0x2f5 (XEN) [<ffff82c4801d73a5>] vmx_vmexit_handler+0xef3/0x1849 (XEN) (XEN) domain_crash called from p2m.c:200 (XEN) Domain 2 (vcpu#0) crashed on cpu#4: (XEN) ----[ Xen-4.2-unstable x86_64 debug=y Not tainted ]---- (XEN) CPU: 4 (XEN) RIP: 0010:[<fffffa8002252185>] (XEN) RFLAGS: 0000000000000082 CONTEXT: hvm guest (XEN) rax: 000000000000000c rbx: 00000000000033c0 rcx: 0000000000000180 (XEN) rdx: 0000000000000007 rsi: fffff88002fe3b80 rdi: 0000000000000007 (XEN) rbp: 0000000000000001 rsp: fffff88002fe3b30 r8: fffff88002fe3b80 (XEN) r9: 00000000ffffffff r10: 0000000000000000 r11: fffff88002fe3b0e (XEN) r12: 0000000000000004 r13: fffffa800222d200 r14: fffff880010d7730 (XEN) r15: 0000000000000001 cr0: 0000000080050031 cr4: 00000000000006f8 (XEN) cr3: 0000000000187000 cr2: 000000007724f4df (XEN) ds: 002b es: 002b fs: 0053 gs: 002b ss: 0000 cs: 0010 (XEN) Xen WARN at p2m.c:200 (XEN) ----[ Xen-4.2-unstable x86_64 debug=y Not tainted ]---- (XEN) CPU: 4 (XEN) RIP: e008:[<ffff82c4801e08e2>] __get_gfn_type_access+0x230/0x3ee (XEN) RFLAGS: 0000000000010202 CONTEXT: hypervisor (XEN) rax: 0000000000000001 rbx: 000000000000000b rcx: ffff83083ffaff18 (XEN) rdx: 00000043bfcac680 rsi: 0000000000000000 rdi: 000000183f775000 (XEN) rbp: ffff83083ffaf9a8 rsp: ffff83083ffaf928 r8: 0000000000000001 (XEN) r9: 0000000000000000 r10: 0000000000000002 r11: 0000000000000002 (XEN) r12: ffff83183f7a1bb0 r13: ffff83083ffaf9dc r14: 0000000000000000 (XEN) r15: 00000000000033c0 cr0: 000000008005003b cr4: 00000000000026f0 (XEN) cr3: 000000183f793000 cr2: 000000007724f4df (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008 (XEN) Xen stack trace from rsp=ffff83083ffaf928: (XEN) ffff83083ffaff18 ffff83083ffaff18 0000000000000000 000000018012635a (XEN) ffff83083ffaf9d8 ffff83083ffaff18 ffff82c48030a760 ffff82c48030a760 (XEN) ffff82c48030a760 ffff82c48030a760 01ff82c4801264b3 0000000000000000 (XEN) ffff83183f7a3000 ffff83083ffafa28 0000000000000000 000000000183f7b9 (XEN) ffff83083ffafa08 ffff82c480170b91 ffff830000000001 ffff83183f7a3010 (XEN) 0000000000000000 ffff82c4801b3d94 0000000b00000007 ffff83183f7a3000 (XEN) fffff88002fe3b80 fffff88002fe3b80 ffff8300bf2ea000 ffff82c4801b5344 (XEN) ffff83083ffafa88 ffff82c480170e4b 8b13687800000000 7d4d94af0000009f (XEN) 0000000100007ff0 0000000000000000 00000000000033c0 ffff82c48015a3ff (XEN) 0000000000000202 ffff83183f7a3000 ffff82c4801265c1 0000000000000008 (XEN) 0000000000000007 fffff88002fe3b80 ffff8300bf2ea000 ffff82c4801b5344 (XEN) ffff83083ffafc78 ffff82c480114c72 0000000000000286 ffff83083ffafab8 (XEN) ffff82c4801265c1 ffff83183f6a19a0 ffff83083ffafae8 ffff82c48012daa1 (XEN) ffff83083ffafb18 0000000000000040 ffff83183f6a1990 ffff83183f7a3000 (XEN) ffff83083ffafb18 ffff82c48012dbc5 0000000000000040 ffff83083ffeffd0 (XEN) ffff83183f7a3000 ffff82c4803099c0 ffff83083ffafb68 ffff82c480105e63 (XEN) 0000000000000003 0000000000000000 ffff83183f6a19d0 ffff83083ffb6048 (XEN) ffff83183f7a1bb0 0000000000000001 0000000000000001 0000000000000001 (XEN) ffff83083ffafbf8 ffff82c4801e6f0d 00000000000000ff 0000000300000000 (XEN) ffff83083ffafbc8 ffff83083ffafbc0 0000000000000000 ffff83083ffafcd8 (XEN) Xen call trace: (XEN) [<ffff82c4801e08e2>] __get_gfn_type_access+0x230/0x3ee (XEN) [<ffff82c480170b91>] xenmem_add_to_physmap_once+0x610/0x773 (XEN) [<ffff82c480170e4b>] arch_memory_op+0x157/0xc7a (XEN) [<ffff82c480114c72>] do_memory_op+0x1d60/0x1dce (XEN) [<ffff82c4801b5409>] hvm_memory_op+0x62/0x6a (XEN) [<ffff82c4801b84f1>] hvm_do_hypercall+0x1af/0x2f5 (XEN) [<ffff82c4801d73a5>] vmx_vmexit_handler+0xef3/0x1849 (XEN) (XEN) domain_crash called from p2m.c:200 (XEN) Xen WARN at p2m.c:200 (XEN) ----[ Xen-4.2-unstable x86_64 debug=y Not tainted ]---- (XEN) CPU: 4 (XEN) RIP: e008:[<ffff82c4801e08e2>] __get_gfn_type_access+0x230/0x3ee (XEN) RFLAGS: 0000000000010202 CONTEXT: hypervisor (XEN) rax: 0000000000000001 rbx: 000000000000000b rcx: ffff83083ffaff18 (XEN) rdx: 00000043bfcac680 rsi: 0000000000000000 rdi: 000000183f775000 (XEN) rbp: ffff83083ffaf9a8 rsp: ffff83083ffaf928 r8: 0000000000000001 (XEN) r9: 0000000000000000 r10: 0000000000000003 r11: 0000000000000003 (XEN) r12: ffff83183f7a1bb0 r13: ffff83083ffaf9dc r14: 0000000000000000 (XEN) r15: 00000000000033c0 cr0: 000000008005003b cr4: 00000000000026f0 (XEN) cr3: 000000183f793000 cr2: 000000007724f4df (XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: 0000 cs: e008 (XEN) Xen stack trace from rsp=ffff83083ffaf928: (XEN) ffff83083ffaff18 ffff83083ffaff18 0000000000000000 000000018012635a (XEN) ffff83083ffaf9d8 ffff83083ffaff18 ffff82c48030a760 ffff82c48030a760 (XEN) ffff82c48030a760 ffff82c48030a760 01ff82c4801264b3 0000000000000000 (XEN) ffff83183f7a3000 ffff83083ffafa28 0000000000000000 000000000183f7b9 (XEN) ffff83083ffafa08 ffff82c480170b91 ffff830000000001 ffff83183f7a3010 (XEN) 0000000000000000 ffff82c4801b3d94 0000000b00000007 ffff83183f7a3000 (XEN) fffff88002fe3b80 fffff88002fe3b80 ffff8300bf2ea000 ffff82c4801b5344 (XEN) ffff83083ffafa88 ffff82c480170e4b 8b13687800000000 7d4d94af0000009f (XEN) 0000000100007ff0 0000000000000000 00000000000033c0 ffff82c48015a3ff (XEN) 0000000000000202 ffff83183f7a3000 ffff82c4801265c1 0000000000000008 (XEN) 0000000000000007 fffff88002fe3b80 ffff8300bf2ea000 ffff82c4801b5344 (XEN) ffff83083ffafc78 ffff82c480114c72 0000000000000286 ffff83083ffafab8 (XEN) ffff82c4801265c1 ffff83183f6a19a0 ffff83083ffafae8 ffff82c48012daa1 (XEN) ffff83083ffafb18 0000000000000040 ffff83183f6a1990 ffff83183f7a3000 (XEN) ffff83083ffafb18 ffff82c48012dbc5 0000000000000040 ffff83083ffeffd0 (XEN) ffff83183f7a3000 ffff82c4803099c0 ffff83083ffafb68 ffff82c480105e63 (XEN) 0000000000000003 0000000000000000 ffff83183f6a19d0 ffff83083ffb6048 (XEN) ffff83183f7a1bb0 0000000000000001 0000000000000001 0000000000000001 (XEN) ffff83083ffafbf8 ffff82c4801e6f0d 00000000000000ff 0000000300000000 (XEN) ffff83083ffafbc8 ffff83083ffafbc0 0000000000000000 ffff83083ffafcd8 (XEN) Xen call trace: (XEN) [<ffff82c4801e08e2>] __get_gfn_type_access+0x230/0x3ee (XEN) [<ffff82c480170b91>] xenmem_add_to_physmap_once+0x610/0x773 (XEN) [<ffff82c480170e4b>] arch_memory_op+0x157/0xc7a (XEN) [<ffff82c480114c72>] do_memory_op+0x1d60/0x1dce (XEN) [<ffff82c4801b5409>] hvm_memory_op+0x62/0x6a (XEN) [<ffff82c4801b84f1>] hvm_do_hypercall+0x1af/0x2f5 (XEN) [<ffff82c4801d73a5>] vmx_vmexit_handler+0xef3/0x1849 (XEN) (XEN) domain_crash called from p2m.c:200 >> >> >> # HG changeset patch >> # Parent c3738598897f5239a72cabde676f5e86fd4c8241 >> >> diff -r c3738598897f xen/arch/x86/hvm/hvm.c >> --- a/xen/arch/x86/hvm/hvm.c >> +++ b/xen/arch/x86/hvm/hvm.c >> @@ -999,6 +999,10 @@ int hvm_vcpu_initialise(struct vcpu *v) >> >> v->arch.hvm_vcpu.inject_trap = -1; >> >> +#ifdef CONFIG_X86_64 >> + init_waitqueue_head(&v->arch.hvm_vcpu.mem_paging_wq); >> +#endif >> + >> #ifdef CONFIG_COMPAT >> rc = setup_compat_arg_xlat(v); >> if ( rc != 0 ) >> diff -r c3738598897f xen/arch/x86/mm/p2m.c >> --- a/xen/arch/x86/mm/p2m.c >> +++ b/xen/arch/x86/mm/p2m.c >> @@ -183,8 +183,8 @@ again: >> p2m_mem_paging_populate(p2m->domain, gfn); >> >> /* Wait until the pager finishes paging it in */ >> - current->arch.mem_paging_gfn = gfn; >> - wait_event(current->arch.mem_paging_wq, ({ >> + current->arch.hvm_vcpu.mem_paging_gfn = gfn; >> + wait_event(current->arch.hvm_vcpu.mem_paging_wq, ({ >> int done; >> mfn = p2m->get_entry(p2m, gfn, t, a, 0, page_order); >> done = (*t != p2m_ram_paging_in); >> @@ -1190,8 +1190,8 @@ void p2m_mem_paging_resume(struct domain >> } >> /* Wake any vcpus that were waiting for this GFN */ >> for_each_vcpu ( d, v ) >> - if ( v->arch.mem_paging_gfn == rsp.gfn ) >> - wake_up_all(&v->arch.mem_paging_wq); >> + if ( v->arch.hvm_vcpu.mem_paging_gfn == rsp.gfn ) >> + wake_up_all(&v->arch.hvm_vcpu.mem_paging_wq); >> } >> } >> >> diff -r c3738598897f xen/include/asm-x86/domain.h >> --- a/xen/include/asm-x86/domain.h >> +++ b/xen/include/asm-x86/domain.h >> @@ -494,12 +494,6 @@ struct arch_vcpu >> >> struct paging_vcpu paging; >> >> -#ifdef CONFIG_X86_64 >> - /* Mem-paging: this vcpu is waiting for a gfn to be paged in */ >> - struct waitqueue_head mem_paging_wq; >> - unsigned long mem_paging_gfn; >> -#endif >> - >> #ifdef CONFIG_X86_32 >> /* map_domain_page() mapping cache. */ >> struct mapcache_vcpu mapcache; >> diff -r c3738598897f xen/include/asm-x86/hvm/vcpu.h >> --- a/xen/include/asm-x86/hvm/vcpu.h >> +++ b/xen/include/asm-x86/hvm/vcpu.h >> @@ -170,6 +170,13 @@ struct hvm_vcpu { >> unsigned long inject_cr2; >> >> struct viridian_vcpu viridian; >> + >> +#ifdef CONFIG_X86_64 >> + /* Mem-paging: this vcpu is waiting for a gfn to be paged in */ >> + struct waitqueue_head mem_paging_wq; >> + unsigned long mem_paging_gfn; >> +#endif >> + >> }; >> >> #endif /* __ASM_X86_HVM_VCPU_H__ */ > > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |