[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 05/14] arm: implement exception and hypercall entries.
arm: implement exception and hypercall entries. xen/arch/arm/xen/Makefile | 3 + xen/arch/arm/xen/asm-offsets.c | 61 ++++++++ xen/arch/arm/xen/entry.S | 596 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ xen/arch/arm/xen/hypercalls.S | 67 +++++++++ xen/arch/arm/xen/physdev.c | 41 +++++ 5 files changed, 768 insertions(+), 0 deletions(-) Signed-off-by: Jaemin Ryu <jm77.ryu@xxxxxxxxxxx> diff -r 28a6038da99f xen/arch/arm/xen/Makefile --- a/xen/arch/arm/xen/Makefile Fri Feb 03 17:28:34 2012 +0900 +++ b/xen/arch/arm/xen/Makefile Fri Feb 03 17:47:16 2012 +0900 @@ -1,5 +1,8 @@ obj-y += start.o obj-y += setup.o +obj-y += entry.o +obj-y += hypercalls.o +obj-y += physdev.o obj-y += mm.o obj-y += irq.o obj-y += arch_domain.o diff -r 28a6038da99f xen/arch/arm/xen/asm-offsets.c --- a/xen/arch/arm/xen/asm-offsets.c Fri Feb 03 17:28:34 2012 +0900 +++ b/xen/arch/arm/xen/asm-offsets.c Fri Feb 03 17:47:16 2012 +0900 @@ -34,6 +34,67 @@ int main(void) { + DEFINE(OFFSET_SOFTIRQ_PENDING, offsetof(struct irq_cpustat, __softirq_pending)); + DEFINE(OFFSET_LOCAL_IRQ_COUNT, offsetof(struct irq_cpustat, __local_irq_count)); + DEFINE(OFFSET_NMI_COUNT, offsetof(struct irq_cpustat, __nmi_count)); + DEFINE(SIZE_IRQ_CPU_STAT, sizeof(struct irq_cpustat)); + BLANK(); + DEFINE(OFFSET_VCPU_INFO, offsetof(struct vcpu, vcpu_info)); + DEFINE(OFFSET_ARCH_VCPU, offsetof(struct vcpu, arch)); + BLANK(); + DEFINE(OFFSET_EVTCHN_UPCALL_MASK, offsetof(struct vcpu_info, evtchn_upcall_mask)); + DEFINE(OFFSET_EVTCHN_UPCALL_PENDING, offsetof(struct vcpu_info, evtchn_upcall_pending)); + DEFINE(OFFSET_ARCH_VCPU_INFO, offsetof(struct vcpu_info, arch)); + DEFINE(OFFSET_TSP, offsetof(struct arch_vcpu_info, sp)); + DEFINE(OFFSET_TLR, offsetof(struct arch_vcpu_info, lr)); + DEFINE(OFFSET_TCPSR, offsetof(struct arch_vcpu_info, cpsr)); + DEFINE(OFFSET_TSPSR, offsetof(struct arch_vcpu_info, spsr)); + DEFINE(OFFSET_VCR, offsetof(struct arch_vcpu_info, cr)); + DEFINE(OFFSET_VDACR, offsetof(struct arch_vcpu_info, dacr)); + DEFINE(OFFSET_VCPAR, offsetof(struct arch_vcpu_info, cpar)); + DEFINE(OFFSET_VPIDR, offsetof(struct arch_vcpu_info, pidr)); + DEFINE(OFFSET_VFSR, offsetof(struct arch_vcpu_info, fsr)); + DEFINE(OFFSET_VFAR, offsetof(struct arch_vcpu_info, far)); + BLANK(); + DEFINE(OFFSET_GUEST_CONTEXT, offsetof(struct arch_vcpu, ctx)); + DEFINE(OFFSET_VECTOR_RESET, 0); + DEFINE(OFFSET_VECTOR_UND, 4); + DEFINE(OFFSET_VECTOR_SWI, 8); + DEFINE(OFFSET_VECTOR_PABT, 12); + DEFINE(OFFSET_VECTOR_DABT, 16); + DEFINE(OFFSET_VECTOR_IRQ, 24); + DEFINE(OFFSET_VECTOR_FIQ, 28); + BLANK(); + DEFINE(OFFSET_VCPU, offsetof(struct cpu_info, vcpu)); + DEFINE(OFFSET_VPSR, offsetof(struct cpu_info, vspsr)); + DEFINE(OFFSET_VSP, offsetof(struct cpu_info, vsp)); + DEFINE(OFFSET_VLR, offsetof(struct cpu_info, vlr)); + BLANK(); + DEFINE(OFFSET_VCPU_R0, offsetof(struct vcpu_guest_context, r0)); + DEFINE(OFFSET_VCPU_R1, offsetof(struct vcpu_guest_context, r1)); + DEFINE(OFFSET_VCPU_R2, offsetof(struct vcpu_guest_context, r2)); + DEFINE(OFFSET_VCPU_R3, offsetof(struct vcpu_guest_context, r3)); + DEFINE(OFFSET_VCPU_R4, offsetof(struct vcpu_guest_context, r4)); + DEFINE(OFFSET_VCPU_R5, offsetof(struct vcpu_guest_context, r5)); + DEFINE(OFFSET_VCPU_R6, offsetof(struct vcpu_guest_context, r6)); + DEFINE(OFFSET_VCPU_R7, offsetof(struct vcpu_guest_context, r7)); + DEFINE(OFFSET_VCPU_R8, offsetof(struct vcpu_guest_context, r8)); + DEFINE(OFFSET_VCPU_R9, offsetof(struct vcpu_guest_context, r9)); + DEFINE(OFFSET_VCPU_R10, offsetof(struct vcpu_guest_context, r10)); + DEFINE(OFFSET_VCPU_R11, offsetof(struct vcpu_guest_context, r11)); + DEFINE(OFFSET_VCPU_R12, offsetof(struct vcpu_guest_context, r12)); + DEFINE(OFFSET_VCPU_R13, offsetof(struct vcpu_guest_context, r13)); + DEFINE(OFFSET_VCPU_R14, offsetof(struct vcpu_guest_context, r14)); + DEFINE(OFFSET_VCPU_R15, offsetof(struct vcpu_guest_context, r15)); + DEFINE(OFFSET_VCPU_DACR, offsetof(struct vcpu_guest_context, dacr)); + DEFINE(OFFSET_VCPU_VBAR, offsetof(struct vcpu_guest_context, vbar)); + DEFINE(OFFSET_VCPU_CONTEXTIDR, offsetof(struct vcpu_guest_context, contextidr)); + DEFINE(OFFSET_VCPU_FCSEIDR, offsetof(struct vcpu_guest_context, fcseidr)); + DEFINE(OFFSET_VCPU_TTBR0, offsetof(struct vcpu_guest_context, ttbr0)); + DEFINE(OFFSET_VCPU_TTBR1, offsetof(struct vcpu_guest_context, ttbr1)); + DEFINE(OFFSET_VCPU_TTBCR, offsetof(struct vcpu_guest_context, ttbcr)); + //DEFINE(OFFSET_HYPERVISOR_CALLBACK, offsetof(struct vcpu_guest_context, event_callback)); + //DEFINE(OFFSET_FAILSAFE_CALLBACK, offsetof(struct vcpu_guest_context, failsafe_callback)); BLANK(); return 0; diff -r 28a6038da99f xen/arch/arm/xen/entry.S --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/arm/xen/entry.S Fri Feb 03 17:47:16 2012 +0900 @@ -0,0 +1,596 @@ +/* + * entry.S + * + * Copyright (C) 2008-2011 Samsung Electronics + * Sang-bum Suh <sbuk.suh@xxxxxxxxxxx> + * JaeMin Ryu <jm77.ryu@xxxxxxxxxxx> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public version 2 of License as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/system.h> +#include <asm/asm-macros.h> +#include <asm/cpu-domain.h> +#include <asm/asm-offsets.h> +#include <public/arch-arm.h> + +.macro SAVE_CONTEXT offset correction + sub lr, lr, #\correction + str r0, [sp, #-16] + str lr, [sp, #-12] + + mrs r0, spsr + mov lr, #\offset + str r0, [sp, #-8] + str lr, [sp, #-4] + + sub r0, sp, #16 + + msr cpsr_cxsf, #(PSR_I_BIT | PSR_F_BIT | PSR_MODE_SVC) + + sub sp, sp, #CTXT_FRAME_SIZE +SPFIX( tst sp, #4 ) +SPFIX( bicne sp, sp, #4 ) + stmib sp, {r1 - lr}^ + ldmia r0, {r1 - r4} + add r5, sp, #CTXT_SSP + add r0, sp, #CTXT_FRAME_SIZE +SPFIX( addne r0, r0, #4 ) + str r1, [sp] + mov r1, lr + stmia r5, {r0 - r4} + msr spsr_cxsf, r3 +.endm + +.macro RESTORE_CONTEXT + ldr r0, [sp, #CTXT_SPSR] + msr spsr_cxsf, r0 + ldmia sp, {r0 - lr}^ + add sp, sp, #CTXT_SSP + ldmia sp, {sp, lr, pc}^ +.endm + + .align 5 + .global exception_vector_table +exception_vector_table: + ldr pc, .rst + ldr pc, .und + ldr pc, .swi + ldr pc, .pabt + ldr pc, .dabt + ldr pc, .adx + ldr pc, .irq + ldr pc, .fiq + +.rst: .long vector_reset +.und: .long vector_und +.swi: .long vector_swi +.pabt: .long vector_pabt +.dabt: .long vector_dabt +.adx: .long vector_reserved +.irq: .long vector_irq +.fiq: .long vector_fiq + + .align 5 +vector_reset: +1: + b 1b + + .align 5 +vector_irq: + SAVE_CONTEXT 0x18, 4 + + mrs r0, spsr + and r0, r0, #PSR_MODE_MASK + eors r0, r0, #PSR_MODE_SVC + + bne return_to_guest + + cpsid i + + RESTORE_CONTEXT + + .align 5 +vector_dabt: + str r0, [sp, #-16] + str lr, [sp, #-12] + mrs r0, spsr + str r0, [sp, #-8] + sub r0, sp, #16 + + msr cpsr_cxsf, #(PSR_I_BIT | PSR_F_BIT | PSR_MODE_SVC) + + sub sp, sp, #CTXT_FRAME_SIZE +SPFIX( tst sp, #4 ) +SPFIX( bicne sp, sp, #4 ) + stmib sp, {r1 - lr}^ + ldmia r0, {r1 - r3} + add r5, sp, #CTXT_SSP + add r0, sp, #CTXT_FRAME_SIZE +SPFIX( addne r0, r0, #4 ) + str r1, [sp] + mov r1, lr + stmia r5, {r0 - r3} + + mrc p15, 0, r0, c6, c0, 0 + mrc p15, 0, r1, c5, c0, 0 + + and r4, r3, #PSR_MODE_MASK + eors r4, r4, #PSR_MODE_SVC + + beq do_data_abort + + cpsie i + + cci r8 + ldr r9, [r8] + + ldr r10, [r9, #OFFSET_VCPU_INFO ] + ldr r14, [r9, #(OFFSET_ARCH_VCPU + OFFSET_GUEST_CONTEXT + OFFSET_VCPU_VBAR)] + cmp r14, #0 + beq trap_table_invalid + + add r14, r14, #OFFSET_VECTOR_DABT + + str r0, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_VFAR)] + str r1, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_VFSR)] + + @ Following is added to mix evtchn upcall mask and psr + ldr r4, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + orr r9, r4, #VPSR_I_BIT + str r9, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + ldr r0, [sp, #CTXT_USP] + ldr r1, [sp, #CTXT_ULR] + + ldr r5, [r8, #OFFSET_VPSR] + bic r3, r3, #PSR_MODE_MASK + orr r3, r3, r5 + + tst r4, #VPSR_I_BIT + orrne r3, r3, #PSR_I_BIT + + str r0, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSP)] + str r1, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TLR)] + str r3, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSPSR)] + + cmp r5, #PSR_MODE_SVC + ldrne r0, [r8, #8] + + mov r5, #PSR_MODE_SVC + str r5, [r8, #4] + str r0, [r8, #8] + str r2, [r8, #12] + + ldr r5, =DACR_STAT_SVC + mcr p15, 0, r5, c3, c0, 0 + + cpsid i + + add r8, r8, #8 + ldmia r8, {r13, r14}^ + ldmia sp, {r0-r12} + ldr sp, [sp, #CTXT_SSP] + msr spsr, #PSR_MODE_USR + movs pc, lr + + .align 5 +vector_pabt: + str r0, [sp, #-16] + str lr, [sp, #-12] + mrs r0, spsr + str r0, [sp, #-8] + sub r0, sp, #16 + + msr cpsr_cxsf, #(PSR_I_BIT | PSR_F_BIT | PSR_MODE_SVC) + + sub sp, sp, #CTXT_FRAME_SIZE +SPFIX( tst sp, #4 ) +SPFIX( bicne sp, sp, #4 ) + stmib sp, {r1 - lr}^ + ldmia r0, {r1 - r3} + add r5, sp, #CTXT_SSP + add r0, sp, #CTXT_FRAME_SIZE +SPFIX( addne r0, r0, #4 ) + str r1, [sp] + mov r1, lr + stmia r5, {r0 - r3} + + mrc p15, 0, r0, c6, c0, 0 + mrc p15, 0, r1, c5, c0, 0 + + and r4, r3, #PSR_MODE_MASK + eors r4, r4, #PSR_MODE_SVC + + beq do_prefetch_abort + + cpsie i + + cci r8 + ldr r9, [r8] + + ldr r10, [r9, #OFFSET_VCPU_INFO] + ldr r14, [r9, #(OFFSET_ARCH_VCPU + OFFSET_GUEST_CONTEXT + OFFSET_VCPU_VBAR)] + cmp lr, #0 + beq trap_table_invalid + + add r14, r14, #OFFSET_VECTOR_PABT + + ldr r4, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + orr r9, r4, #VPSR_I_BIT + str r9, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + ldr r0, [sp, #CTXT_USP] + ldr r1, [sp, #CTXT_ULR] + + ldr r5, [r8, #4] + bic r3, r3, #PSR_MODE_MASK + orr r3, r3, r5 + + tst r4, #VPSR_I_BIT + orrne r3, r3, #PSR_I_BIT + + str r0, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSP)] + str r1, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TLR)] + str r3, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSPSR)] + + cmp r5, #PSR_MODE_SVC + ldrne r0, [r8, #8] + + mov r5, #PSR_MODE_SVC + str r5, [r8, #4] + str r0, [r8, #8] + str r2, [r8, #12] + + ldr r5, =DACR_STAT_SVC + mcr p15, 0, r5, c3, c0, 0 + + cpsid i + + add r8, r8, #8 + ldmia r8, {r13, r14}^ + ldmia sp, {r0-r12} + ldr sp, [sp, #CTXT_SSP] + msr spsr, #PSR_MODE_USR + movs pc, lr + + .align 5 +vector_und: + str r0, [sp, #-16] + str lr, [sp, #-12] + mrs r0, spsr + str r0, [sp, #-8] + sub r0, sp, #16 + + msr cpsr_cxsf, #(PSR_I_BIT | PSR_F_BIT | PSR_MODE_SVC) + + sub sp, sp, #CTXT_FRAME_SIZE +SPFIX( tst sp, #4 ) +SPFIX( bicne sp, sp, #4 ) + stmib sp, {r1 - lr}^ + ldmia r0, {r1 - r3} + add r5, sp, #CTXT_SSP + add r0, sp, #CTXT_FRAME_SIZE +SPFIX( addne r0, r0, #4 ) + str r1, [sp] + mov r1, lr + stmia r5, {r0 - r3} + + and r4, r3, #PSR_MODE_MASK + eors r4, r4, #PSR_MODE_SVC + + beq do_undefined_instruction + + cpsie i + + cci r8 + ldr r9, [r8] + + ldr r10, [r9, #OFFSET_VCPU_INFO] + ldr r14, [r9, #(OFFSET_ARCH_VCPU + OFFSET_GUEST_CONTEXT + OFFSET_VCPU_VBAR)] + cmp lr, #0 + beq trap_table_invalid + + add r14, r14, #OFFSET_VECTOR_UND + + ldr r4, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + orr r9, r4, #VPSR_I_BIT + str r9, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + ldr r0, [sp, #CTXT_USP] + ldr r1, [sp, #CTXT_ULR] + + ldr r5, [r8, #4] + bic r3, r3, #PSR_MODE_MASK + orr r3, r3, r5 + + tst r4, #VPSR_I_BIT + orrne r3, r3, #PSR_I_BIT + + str r0, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSP)] + str r1, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TLR)] + str r3, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSPSR)] + + cmp r5, #PSR_MODE_SVC + ldrne r0, [r8, #8] + + mov r5, #PSR_MODE_SVC + str r5, [r8, #4] + str r0, [r8, #8] + str r2, [r8, #12] + str r1, [r8, #16] + + ldr r5, =DACR_STAT_SVC + mcr p15, 0, r5, c3, c0, 0 + + cpsid i + + add r8, r8, #8 + ldmia r8, {r13, r14}^ + ldmia sp, {r0-r12} + ldr sp, [sp, #CTXT_SSP] + msr spsr, #PSR_MODE_USR + movs pc, lr + + .align 5 +vector_fiq: + subs pc, lr, #4 + + .align 5 +vector_reserved: + b vector_reserved + + .align 5 +trap_table_invalid: + b trap_table_invalid + + .align 5 +vector_swi: + str sp, [sp, #(CTXT_SSP - CTXT_FRAME_SIZE)] + sub sp, sp, #CTXT_FRAME_SIZE + stmia sp, {r0 - lr}^ + mrs r11, spsr + str r14, [sp, #CTXT_PC] + str r11, [sp, #CTXT_SPSR] + + cpsie i + + cci r8 + ldr r12, [r8, #4] + eors r12, r12, #PSR_MODE_SVC + + beq invoke_hypercall + + mov r12, #PSR_MODE_SVC + str r12, [r8, #4] + str r14, [r8, #12] + + ldr r9, [r8] + ldr r10, [r9, #OFFSET_VCPU_INFO] + ldr r14, [r9, #(OFFSET_ARCH_VCPU + OFFSET_GUEST_CONTEXT + OFFSET_VCPU_VBAR)] + cmp r14, #0 + beq trap_table_invalid + + add r14, r14, #OFFSET_VECTOR_SWI + + ldr r4, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + orr r9, r4, #VPSR_I_BIT + str r9, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + tst r4, #VPSR_I_BIT + orrne r11, r11, #PSR_I_BIT + + ldr r4, [sp, #CTXT_USP] + ldr r5, [sp, #CTXT_ULR] + + str r4, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSP)] + str r5, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TLR)] + str r11, [r10, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSPSR)] + + ldr r11, =DACR_STAT_SVC + mcr p15, 0, r11, c3, c0, 0 + + cpsid i + + add r8, r8, #8 + ldmia r8, {r13, r14}^ + ldmia sp, {r0-r12} + ldr sp, [sp, #CTXT_SSP] + msr spsr, #PSR_MODE_USR + movs pc, lr + +invoke_hypercall: + ldr r12, [lr, #-4] + bic r12, r12, #0xff000000 + + adr r14, 1f + adr r11, hypercall_table + ldr pc, [r11, r12, lsl #2] + +1: + str r0, [sp, #CTXT_R0] + + b return_to_guest + +ENTRY(return_to_guest) + cpsie i + bl do_softirq + + cci r8 + ldr r10, [r8, #OFFSET_VCPU] + + ldr r11, [r10, #OFFSET_VCPU_INFO] + ldr r9, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + tst r9, #VPSR_I_BIT + bne resume_guest_domain + + ldr r12, [r11, #OFFSET_EVTCHN_UPCALL_PENDING] + + tst r12, #0xFF + beq resume_guest_domain + +do_upcall: + ldr r14, [r10, #(OFFSET_ARCH_VCPU + OFFSET_GUEST_CONTEXT + OFFSET_VCPU_VBAR)] + cmp lr, #0 + beq trap_table_invalid + + add r14, r14, #OFFSET_VECTOR_IRQ + + ldr r4, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + orr r9, r4, #VPSR_I_BIT + str r9, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + ldr r0, [sp, #CTXT_USP] + ldr r1, [sp, #CTXT_ULR] + ldr r2, [sp, #CTXT_PC] + ldr r3, [sp, #CTXT_SPSR] + + ldr r5, [r8, #4] + bic r3, r3, #PSR_MODE_MASK + orr r3, r3, r5 + + tst r4, #VPSR_I_BIT + orrne r3, r3, #PSR_I_BIT + + str r0, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSP)] + str r1, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TLR)] + str r3, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSPSR)] + + cmp r5, #PSR_MODE_SVC + ldrne r0, [r8, #8] + + mov r5, #PSR_MODE_SVC + str r5, [r8, #4] + str r0, [r8, #8] + str r2, [r8, #12] + + ldr r5, =DACR_STAT_SVC + mcr p15, 0, r5, c3, c0, 0 + + cpsid i + + add r8, r8, #8 + ldmia r8, {r13, r14}^ + ldmia sp, {r0-r12} + ldr sp, [sp, #CTXT_SSP] + msr spsr, #PSR_MODE_USR + movs pc, lr + +resume_guest_domain: + cci r8 + ldr r3, [r8, #OFFSET_VPSR] + ldr ip, [sp, #CTXT_SPSR] + cmp r3, #PSR_MODE_SVC + + ldrne r7, =DACR_STAT_HYP + ldreq r7, =DACR_STAT_SVC + mcr p15, 0, r7, c3, c0, 0 + + cpsid i + + RESTORE_CONTEXT + +/* + * Prototype : __switch_to(struct vcpu *, struct vcpu_guest_context *, struct vcpu_guest_context *) + */ + .align 5 +ENTRY(switch_to) + add ip, r1, #OFFSET_VCPU_R4 + stmia ip, {r4 - sl, fp, ip, sp, lr} + + mrc p15, 0, r4, c3, c0, 0 + mrc p15, 0, r7, c13, c0, 1 + + str r4, [r1, #OFFSET_VCPU_DACR] + str r7, [r1, #OFFSET_VCPU_CONTEXTIDR] + + ldr r4, [r2, #OFFSET_VCPU_DACR] + ldr r7, [r2, #OFFSET_VCPU_CONTEXTIDR] + + mcr p15, 0, r4, c3, c0, 0 + mcr p15, 0, r7, c13, c0, 1 + + add ip, r2, #OFFSET_VCPU_R4 + ldmia ip, {r4 - sl, fp, ip, sp, lr} + + b context_saved + + .align 5 + .type hypercall_table, #object +ENTRY(hypercall_table) + .long do_set_trap_table /* 0 */ + .long do_mmu_update + .long do_ni_hypercall /* set_gdt */ + .long do_ni_hypercall /* stack_switch */ + .long do_set_callbacks + .long do_ni_hypercall /* fpu_switch */ + .long do_sched_op_compat + .long do_ni_hypercall + .long do_ni_hypercall + .long do_ni_hypercall + .long do_ni_hypercall /* 10 */ + .long do_ni_hypercall + .long do_memory_op + .long do_multicall + .long do_update_va_mapping + .long do_set_timer_op /* 15 */ + .long do_event_channel_op + .long do_xen_version + .long do_console_io + .long do_physdev_op + .long do_grant_table_op /* 20 */ + .long do_vm_assist + .long do_ni_hypercall + .long do_restore_trap_frame + .long do_vcpu_op + .long do_ni_hypercall /* 25 */ + .long do_mmuext_op + .long do_ni_hypercall + .long do_nmi_op + .long do_sched_op + .long do_ni_hypercall /* 30 : callbackop */ + .long do_ni_hypercall /* xenoprof */ + .long do_ni_hypercall /* event_channel_op */ + .long do_ni_hypercall /* physdev_op */ + .long do_ni_hypercall /* hvm_op */ + .long do_ni_hypercall /* 35 : sysctl */ + .long do_ni_hypercall /* domctl */ + .long do_ni_hypercall /* kexec_op */ + .long do_ni_hypercall /* tmem_op */ + .long do_ni_hypercall /* xc_reserved_op */ + .long do_ni_hypercall /* 40 : undefined */ + .long do_ni_hypercall /* undefined */ + .long do_ni_hypercall /* undefined */ + .long do_ni_hypercall /* undefined */ + .long do_ni_hypercall /* undefined */ + .long do_ni_hypercall /* 45 : undefined */ + .long do_ni_hypercall /* undefined */ + .long do_ni_hypercall /* undefined */ + .long do_ni_hypercall /* undefined */ + .long do_ni_hypercall /* undefined */ + .long do_ni_hypercall /* 50 : undefined */ + .long do_ni_hypercall /* undefined */ + .long do_ni_hypercall /* undefined */ + + .section .data +ENTRY(xen_translation_table) + .long start - 0x4000 + diff -r 28a6038da99f xen/arch/arm/xen/hypercalls.S --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/arm/xen/hypercalls.S Fri Feb 03 17:47:16 2012 +0900 @@ -0,0 +1,67 @@ +/* + * hypercalls.S + * + * Copyright (C) 2008-2011 Samsung Electronics + * Sang-bum Suh <sbuk.suh@xxxxxxxxxxx> + * JaeMin Ryu <jm77.ryu@xxxxxxxxxxx> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public version 2 of License as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <xen/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/system.h> +#include <asm/cpu-domain.h> +#include <asm/asm-offsets.h> +#include <asm/asm-macros.h> + +#include <public/arch-arm.h> + + +ENTRY(do_set_domain) + mov pc, lr + + +ENTRY(do_restore_trap_frame) + cci r8 + ldr r4, [r8, #OFFSET_VCPU] + ldr r6, [sp, #CTXT_USP] + ldr r11, [r4, #OFFSET_VCPU_INFO] + + ldr r3, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSPSR)] + ldr r2, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TLR)] + ldr r1, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TSP)] + + ldr r7, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + tst r3, #PSR_I_BIT + orrne r7, #VPSR_I_BIT + biceq r7, #VPSR_I_BIT + + bic r5, r3, #(PSR_MODE_MASK | PSR_I_BIT) + orr r5, r5, #PSR_MODE_USR + and r3, r3, #PSR_MODE_MASK + + @ Construct latest guest context + str r1, [sp, #CTXT_USP] + str r2, [sp, #CTXT_PC] + str r5, [sp, #CTXT_SPSR] + str r3, [r8, #4] + str r6, [r8, #8] + + @ Update VPSR + str r7, [r11, #(OFFSET_ARCH_VCPU_INFO + OFFSET_TCPSR)] + + mov pc, lr diff -r 28a6038da99f xen/arch/arm/xen/physdev.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/arm/xen/physdev.c Fri Feb 03 17:47:16 2012 +0900 @@ -0,0 +1,41 @@ +/* + * physdev.c + * + * Copyright (C) 2008-2011 Samsung Electronics + * Sang-bum Suh <sbuk.suh@xxxxxxxxxxx> + * JaeMin Ryu <jm77.ryu@xxxxxxxxxxx> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public version 2 of License as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <xen/config.h> +#include <xen/lib.h> +#include <xen/types.h> +#include <xen/init.h> +#include <xen/errno.h> +#include <xen/spinlock.h> +#include <xen/bitmap.h> +#include <xen/sched.h> +#include <xen/event.h> +#include <xen/irq.h> +#include <xen/guest_access.h> +#include <public/arch-arm.h> +#include <public/physdev.h> + +int do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg) +{ + NOT_YET(); + + return -EINVAL; +} Attachment:
patch05.diff _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |