__regs structure represents the hardware context used in trap handling.
In assembly, structure fields are accessed by using the corresponding offset
macro definition.
Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
---
include/uk/arch/x86_64/lcpu.h | 28 ------------
plat/common/include/x86/regs.h | 94 ++++++++++++++++++++++++++++++++++++++++
plat/xen/include/xen-x86/traps.h | 2 +-
plat/xen/x86/entry64.S | 94 ++++++++++++++++++----------------------
4 files changed, 138 insertions(+), 80 deletions(-)
create mode 100644 plat/common/include/x86/regs.h
diff --git a/include/uk/arch/x86_64/lcpu.h b/include/uk/arch/x86_64/lcpu.h
index cd667e5..640ff68 100644
--- a/include/uk/arch/x86_64/lcpu.h
+++ b/include/uk/arch/x86_64/lcpu.h
@@ -32,34 +32,6 @@
#error Do not include this header directly
#endif
-struct __regs {
- unsigned long r15;
- unsigned long r14;
- unsigned long r13;
- unsigned long r12;
- unsigned long rbp;
- unsigned long rbx;
-/* arguments: non interrupts/non tracing syscalls only save upto here*/
- unsigned long r11;
- unsigned long r10;
- unsigned long r9;
- unsigned long r8;
- unsigned long rax;
- unsigned long rcx;
- unsigned long rdx;
- unsigned long rsi;
- unsigned long rdi;
- unsigned long orig_rax;
-/* end of arguments */
-/* cpu exception frame or undefined */
- unsigned long rip;
- unsigned long cs;
- unsigned long eflags;
- unsigned long rsp;
- unsigned long ss;
-/* top of stack page */
-};
-
#ifndef mb
#define mb() __asm__ __volatile__ ("mfence" : : : "memory")
#endif
diff --git a/plat/common/include/x86/regs.h b/plat/common/include/x86/regs.h
new file mode 100644
index 0000000..a6a847e
--- /dev/null
+++ b/plat/common/include/x86/regs.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2005, Grzegorz Milos, Intel Research Cambridge
+ * Copyright (c) 2018, NEC Europe Ltd., NEC Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef __UKARCH_REGS_H__
+#define __UKARCH_REGS_H__
+
+#ifndef __ASSEMBLY__
+struct __regs {
+ unsigned long pad; /* for 16 bytes alignment */
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long rbp;
+ unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save upto here*/
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rdx;
+ unsigned long rsi;
+ unsigned long rdi;
+ unsigned long orig_rax;
+/* end of arguments */
+/* cpu exception frame or undefined */
+ unsigned long rip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long rsp;
+ unsigned long ss;
+/* top of stack page */
+};
+#endif
+
+#define OFFSETOF_REGS_PAD 0
+#define OFFSETOF_REGS_R15 8
+#define OFFSETOF_REGS_R14 16
+#define OFFSETOF_REGS_R13 24
+#define OFFSETOF_REGS_R12 32
+#define OFFSETOF_REGS_RBP 40
+#define OFFSETOF_REGS_RBX 48
+#define OFFSETOF_REGS_R11 56
+#define OFFSETOF_REGS_R10 64
+#define OFFSETOF_REGS_R9 72
+#define OFFSETOF_REGS_R8 80
+#define OFFSETOF_REGS_RAX 88
+#define OFFSETOF_REGS_RCX 96
+#define OFFSETOF_REGS_RDX 104
+#define OFFSETOF_REGS_RSI 112
+#define OFFSETOF_REGS_RDI 120
+#define OFFSETOF_REGS_ORIG_RAX 128
+#define OFFSETOF_REGS_RIP 136
+#define OFFSETOF_REGS_CS 144
+#define OFFSETOF_REGS_EFLAGS 152
+#define OFFSETOF_REGS_RSP 160
+#define OFFSETOF_REGS_SS 168
+
+#define REGS_PAD_SIZE OFFSETOF_REGS_R15
+#define SIZEOF_REGS 176
+
+#if SIZEOF_REGS & 0xf
+#error "__regs structure size should be multiple of 16."
+#endif
+
+/* This should be better defined in the thread header */
+#define OFFSETOF_UKTHREAD_REGS 16
+
+#endif /* __UKARCH_REGS_H__ */
diff --git a/plat/xen/include/xen-x86/traps.h b/plat/xen/include/xen-x86/traps.h
index c5cfb85..498af76 100644
--- a/plat/xen/include/xen-x86/traps.h
+++ b/plat/xen/include/xen-x86/traps.h
@@ -35,7 +35,7 @@
#ifndef _TRAPS_H_
#define _TRAPS_H_
-#include <uk/arch/lcpu.h>
+#include <x86/regs.h>
#define pt_regs __regs
diff --git a/plat/xen/x86/entry64.S b/plat/xen/x86/entry64.S
index 314bb70..8109ccb 100644
--- a/plat/xen/x86/entry64.S
+++ b/plat/xen/x86/entry64.S
@@ -25,6 +25,7 @@
#include <uk/arch/types.h>
#include <uk/arch/limits.h>
+#include <x86/regs.h>
#include <uk/config.h>
#include <xen/xen.h>
#include <xen/elfnote.h>
@@ -91,15 +92,6 @@ hypercall_page:
NMI_MASK = 0x80000000
KERNEL_CS_MASK = 0xfc
-#define RAX 80
-#define RDI 112
-#define ORIG_RAX 120 /* + error_code */
-#define RIP 128
-#define CS 136
-#define RFLAGS 144
-#define RSP 152
-
-
/* Macros */
.macro SAVE_PARAVIRT
#ifdef CONFIG_PARAVIRT
@@ -124,44 +116,44 @@ KERNEL_CS_MASK = 0xfc
.endm
.macro RESTORE_ALL
- movq (%rsp),%r15
- movq 1*8(%rsp),%r14
- movq 2*8(%rsp),%r13
- movq 3*8(%rsp),%r12
- movq 4*8(%rsp),%rbp
- movq 5*8(%rsp),%rbx
- movq 6*8(%rsp),%r11
- movq 7*8(%rsp),%r10
- movq 8*8(%rsp),%r9
- movq 9*8(%rsp),%r8
- movq 10*8(%rsp),%rax
- movq 11*8(%rsp),%rcx
- movq 12*8(%rsp),%rdx
- movq 13*8(%rsp),%rsi
- movq 14*8(%rsp),%rdi
- addq $15*8+8,%rsp
+ movq OFFSETOF_REGS_R15(%rsp), %r15
+ movq OFFSETOF_REGS_R14(%rsp), %r14
+ movq OFFSETOF_REGS_R13(%rsp), %r13
+ movq OFFSETOF_REGS_R12(%rsp), %r12
+ movq OFFSETOF_REGS_RBP(%rsp), %rbp
+ movq OFFSETOF_REGS_RBX(%rsp), %rbx
+ movq OFFSETOF_REGS_R11(%rsp), %r11
+ movq OFFSETOF_REGS_R10(%rsp), %r10
+ movq OFFSETOF_REGS_R9(%rsp), %r9
+ movq OFFSETOF_REGS_R8(%rsp), %r8
+ movq OFFSETOF_REGS_RAX(%rsp), %rax
+ movq OFFSETOF_REGS_RCX(%rsp), %rcx
+ movq OFFSETOF_REGS_RDX(%rsp), %rdx
+ movq OFFSETOF_REGS_RSI(%rsp), %rsi
+ movq OFFSETOF_REGS_RDI(%rsp), %rdi
+ addq $OFFSETOF_REGS_RIP,%rsp
.endm
.macro SAVE_ALL
/* rdi slot contains rax, oldrax contains error code */
cld
- subq $14*8,%rsp
- movq %rsi,13*8(%rsp)
- movq 14*8(%rsp),%rsi /* load rax from rdi slot */
- movq %rdx,12*8(%rsp)
- movq %rcx,11*8(%rsp)
- movq %rsi,10*8(%rsp) /* store rax */
- movq %r8, 9*8(%rsp)
- movq %r9, 8*8(%rsp)
- movq %r10,7*8(%rsp)
- movq %r11,6*8(%rsp)
- movq %rbx,5*8(%rsp)
- movq %rbp,4*8(%rsp)
- movq %r12,3*8(%rsp)
- movq %r13,2*8(%rsp)
- movq %r14,1*8(%rsp)
- movq %r15,(%rsp)
- movq %rdi, RDI(%rsp) /* put rdi into the slot */
+ subq $OFFSETOF_REGS_RDI, %rsp
+ movq %rsi, OFFSETOF_REGS_RSI(%rsp)
+ movq OFFSETOF_REGS_RDI(%rsp), %rsi /* load rax from rdi slot */
+ movq %rdx, OFFSETOF_REGS_RDX(%rsp)
+ movq %rcx, OFFSETOF_REGS_RCX(%rsp)
+ movq %rsi, OFFSETOF_REGS_RAX(%rsp) /* store rax */
+ movq %r8, OFFSETOF_REGS_R8(%rsp)
+ movq %r9, OFFSETOF_REGS_R9(%rsp)
+ movq %r10, OFFSETOF_REGS_R10(%rsp)
+ movq %r11, OFFSETOF_REGS_R11(%rsp)
+ movq %rbx, OFFSETOF_REGS_RBX(%rsp)
+ movq %rbp, OFFSETOF_REGS_RBP(%rsp)
+ movq %r12, OFFSETOF_REGS_R12(%rsp)
+ movq %r13, OFFSETOF_REGS_R13(%rsp)
+ movq %r14, OFFSETOF_REGS_R14(%rsp)
+ movq %r15, OFFSETOF_REGS_R15(%rsp)
+ movq %rdi, OFFSETOF_REGS_RDI(%rsp) /* put rdi into the slot */
.endm
.macro HYPERVISOR_IRET
@@ -192,8 +184,8 @@ error_entry:
SAVE_ALL
movq %rsp,%rdi
- movq ORIG_RAX(%rsp),%rsi # get error code
- movq $-1,ORIG_RAX(%rsp)
+ movq OFFSETOF_REGS_ORIG_RAX(%rsp),%rsi # get error code
+ movq $-1,OFFSETOF_REGS_ORIG_RAX(%rsp)
call *%rax
jmp error_exit
@@ -209,7 +201,7 @@ hypervisor_callback2:
movq %rdi, %rsp
/* check against event re-entrant */
- movq RIP(%rsp),%rax
+ movq OFFSETOF_REGS_RIP(%rsp),%rax
cmpq $scrit,%rax
jb 11f
cmpq $ecrit,%rax
@@ -224,7 +216,7 @@ hypervisor_callback2:
decl %gs:0
error_exit:
- movl RFLAGS(%rsp), %eax
+ movl OFFSETOF_REGS_EFLAGS(%rsp), %eax
shr $9, %eax # EAX[0] == IRET_RFLAGS.IF
XEN_GET_VCPU_INFO(%rsi)
andb evtchn_upcall_mask(%rsi),%al
@@ -270,11 +262,11 @@ hypervisor_prologue:
critical_region_fixup:
# Set up source and destination region pointers
- leaq RIP(%rsp),%rsi # esi points at end of src region
+ leaq OFFSETOF_REGS_RIP(%rsp),%rsi # esi points at end of src region
# Acquire interrupted rsp which was saved-on-stack. This points to
# the end of dst region. Note that it is not necessarily current rsp
# plus 0xb0, because the second interrupt might align the stack frame.
- movq RSP(%rsp),%rdi # edi points at end of dst region
+ movq OFFSETOF_REGS_RSP(%rsp),%rdi # edi points at end of dst region
cmpq $restore_end,%rax
jae 13f
@@ -282,11 +274,11 @@ critical_region_fixup:
# If interrupted rip is before restore_end
# then rax hasn't been restored yet
movq (%rdi),%rax
- movq %rax, RAX(%rsp) # save rax
+ movq %rax, OFFSETOF_REGS_RAX(%rsp) # save rax
addq $RSP_OFFSET,%rdi
# Set up the copy
-13: movq $RIP,%rcx
+13: movq $OFFSETOF_REGS_RIP,%rcx
shr $3,%rcx # convert bytes into count of 64-bit entities
15: subq $8,%rsi # pre-decrementing copy loop
subq $8,%rdi
@@ -294,7 +286,7 @@ critical_region_fixup:
movq %rax,(%rdi)
loop 15b
16: movq %rdi,%rsp # final rdi is top of merged stack
- andb $KERNEL_CS_MASK,CS(%rsp) # CS might have changed
+ andb $KERNEL_CS_MASK,OFFSETOF_REGS_CS(%rsp) # CS might have changed
jmp 11b
#else