|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 11/17] xen: arm: refactor 64-bit return from trap path
Refactor exit path to use a single "exit" macro similar to the entry path.
We can also remove the logic at "return_to_new_vcpu" which detects returns to
hypervisor mode -- seemingly trying to handle hypervisor threads which aren't
an thing which we have. The idle VCPUs do not take this path. This simplifies
the return_to_new_vcpu code, we also split it into 32- and 64-bit VCPU paths.
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
v4: split out from "xen: arm: handle traps from 64-bit guests"
---
xen/arch/arm/arm32/entry.S | 2 +-
xen/arch/arm/arm64/entry.S | 40 +++++++++++++++++++++++-----------------
xen/arch/arm/domain.c | 6 +++++-
3 files changed, 29 insertions(+), 19 deletions(-)
diff --git a/xen/arch/arm/arm32/entry.S b/xen/arch/arm/arm32/entry.S
index 6cdf0aa..81d5990 100644
--- a/xen/arch/arm/arm32/entry.S
+++ b/xen/arch/arm/arm32/entry.S
@@ -87,7 +87,7 @@ DEFINE_TRAP_ENTRY_NOIRQ(fiq)
return_from_trap:
mov sp, r11
-ENTRY(return_to_new_vcpu)
+ENTRY(return_to_new_vcpu32)
ldr r11, [sp, #UREGS_cpsr]
and r11, #PSR_MODE_MASK
cmp r11, #PSR_MODE_HYP
diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index c0d2bd8..390a11d 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -57,7 +57,7 @@ lr .req x30 // link register
.endm
/*
- * Save state on entry to hypervisor
+ * Save state on entry to hypervisor, restore on exit
*/
.macro entry, hyp, compat
sub sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
@@ -96,6 +96,18 @@ lr .req x30 // link register
.endm
+ .macro exit, hyp, compat
+
+ .if \hyp == 0 /* Guest mode */
+
+ bl leave_hypervisor_tail /* Disables interrupts on return */
+
+ .endif
+
+ b return_from_trap
+
+ .endm
+
/*
* Bad Abort numbers
*-----------------
@@ -133,13 +145,13 @@ hyp_sync:
msr daifclr, #2
mov x0, sp
bl do_trap_hypervisor
- b return_to_hypervisor
+ exit hyp=1
hyp_irq:
entry hyp=1
mov x0, sp
bl do_trap_irq
- b return_to_hypervisor
+ exit hyp=1
guest_sync:
entry hyp=0, compat=0
@@ -162,13 +174,13 @@ guest_sync_compat:
msr daifclr, #2
mov x0, sp
bl do_trap_hypervisor
- b return_to_guest
+ exit hyp=0, compat=1
guest_irq_compat:
entry hyp=0, compat=1
mov x0, sp
bl do_trap_irq
- b return_to_guest
+ exit hyp=0, compat=1
guest_fiq_invalid_compat:
entry hyp=0, compat=1
@@ -178,18 +190,12 @@ guest_error_invalid_compat:
entry hyp=0, compat=1
invalid BAD_ERROR
-ENTRY(return_to_new_vcpu)
- ldr x21, [sp, #UREGS_CPSR]
- and x21, x21, #PSR_MODE_MASK
- /* Returning to EL2? */
- cmp x21, #PSR_MODE_EL2t
- ccmp x21, #PSR_MODE_EL2h, #0x4, ne
- b.eq return_to_hypervisor /* Yes */
- /* Fall thru */
-return_to_guest:
- bl leave_hypervisor_tail /* Disables interrupts on return */
- /* Fall thru */
-return_to_hypervisor:
+ENTRY(return_to_new_vcpu32)
+ exit hyp=0, compat=1
+ENTRY(return_to_new_vcpu64)
+ exit hyp=0, compat=0
+
+return_from_trap:
msr daifset, #2 /* Mask interrupts */
ldp x21, x22, [sp, #UREGS_PC] // load ELR, SPSR
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index b4d99f1..4e9cece 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -250,9 +250,13 @@ static void continue_new_vcpu(struct vcpu *prev)
if ( is_idle_vcpu(current) )
reset_stack_and_jump(idle_loop);
+ else if is_pv32_domain(current->domain)
+ /* check_wakeup_from_wait(); */
+ reset_stack_and_jump(return_to_new_vcpu32);
else
/* check_wakeup_from_wait(); */
- reset_stack_and_jump(return_to_new_vcpu);
+ reset_stack_and_jump(return_to_new_vcpu64);
+
}
void context_switch(struct vcpu *prev, struct vcpu *next)
--
1.7.2.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |