|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 5/8] x86/domctl: Stop using XLAT_cpu_user_regs()
In order to support FRED, we're going to have to remove the {ds..gs} fields
from struct cpu_user_regs, meaning that it is going to have to become a
different type to the structure embedded in vcpu_guest_context_u.
In both arch_{get,set}_info_guest(), expand the memcpy()/XLAT_cpu_user_regs()
to copy the fields individually. This will allow us to eventually make them
different types.
No practical change. The compat cases are identical, while the non-compat
cases no longer copy _pad fields.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Should we really be copying error_code/entry_vector? They're already listed
as explicitly private fields, and I don't think anything good can come of
providing/consuming them.
---
xen/arch/x86/domain.c | 42 ++++++++++++++++++++++++++++++++++++++++--
xen/arch/x86/domctl.c | 42 ++++++++++++++++++++++++++++++++++++++++--
xen/include/xlat.lst | 2 --
3 files changed, 80 insertions(+), 6 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index a42fa5480593..bc0816c71495 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1196,7 +1196,26 @@ int arch_set_info_guest(
if ( !compat )
{
- memcpy(&v->arch.user_regs, &c.nat->user_regs,
sizeof(c.nat->user_regs));
+ v->arch.user_regs.rbx = c.nat->user_regs.rbx;
+ v->arch.user_regs.rcx = c.nat->user_regs.rcx;
+ v->arch.user_regs.rdx = c.nat->user_regs.rdx;
+ v->arch.user_regs.rsi = c.nat->user_regs.rsi;
+ v->arch.user_regs.rdi = c.nat->user_regs.rdi;
+ v->arch.user_regs.rbp = c.nat->user_regs.rbp;
+ v->arch.user_regs.rax = c.nat->user_regs.rax;
+ v->arch.user_regs.error_code = c.nat->user_regs.error_code;
+ v->arch.user_regs.entry_vector = c.nat->user_regs.entry_vector;
+ v->arch.user_regs.rip = c.nat->user_regs.rip;
+ v->arch.user_regs.cs = c.nat->user_regs.cs;
+ v->arch.user_regs.saved_upcall_mask =
c.nat->user_regs.saved_upcall_mask;
+ v->arch.user_regs.rflags = c.nat->user_regs.rflags;
+ v->arch.user_regs.rsp = c.nat->user_regs.rsp;
+ v->arch.user_regs.ss = c.nat->user_regs.ss;
+ v->arch.user_regs.es = c.nat->user_regs.es;
+ v->arch.user_regs.ds = c.nat->user_regs.ds;
+ v->arch.user_regs.fs = c.nat->user_regs.fs;
+ v->arch.user_regs.gs = c.nat->user_regs.gs;
+
if ( is_pv_domain(d) )
memcpy(v->arch.pv.trap_ctxt, c.nat->trap_ctxt,
sizeof(c.nat->trap_ctxt));
@@ -1204,7 +1223,26 @@ int arch_set_info_guest(
#ifdef CONFIG_COMPAT
else
{
- XLAT_cpu_user_regs(&v->arch.user_regs, &c.cmp->user_regs);
+ v->arch.user_regs.ebx = c.cmp->user_regs.ebx;
+ v->arch.user_regs.ecx = c.cmp->user_regs.ecx;
+ v->arch.user_regs.edx = c.cmp->user_regs.edx;
+ v->arch.user_regs.esi = c.cmp->user_regs.esi;
+ v->arch.user_regs.edi = c.cmp->user_regs.edi;
+ v->arch.user_regs.ebp = c.cmp->user_regs.ebp;
+ v->arch.user_regs.eax = c.cmp->user_regs.eax;
+ v->arch.user_regs.error_code = c.cmp->user_regs.error_code;
+ v->arch.user_regs.entry_vector = c.cmp->user_regs.entry_vector;
+ v->arch.user_regs.eip = c.cmp->user_regs.eip;
+ v->arch.user_regs.cs = c.cmp->user_regs.cs;
+ v->arch.user_regs.saved_upcall_mask =
c.cmp->user_regs.saved_upcall_mask;
+ v->arch.user_regs.eflags = c.cmp->user_regs.eflags;
+ v->arch.user_regs.esp = c.cmp->user_regs.esp;
+ v->arch.user_regs.ss = c.cmp->user_regs.ss;
+ v->arch.user_regs.es = c.cmp->user_regs.es;
+ v->arch.user_regs.ds = c.cmp->user_regs.ds;
+ v->arch.user_regs.fs = c.cmp->user_regs.fs;
+ v->arch.user_regs.gs = c.cmp->user_regs.gs;
+
if ( is_pv_domain(d) )
{
for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); ++i )
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 3044f706de1c..7ab9e9176b58 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1399,7 +1399,26 @@ void arch_get_info_guest(struct vcpu *v,
vcpu_guest_context_u c)
c(flags |= VGCF_online);
if ( !compat )
{
- memcpy(&c.nat->user_regs, &v->arch.user_regs,
sizeof(c.nat->user_regs));
+ c.nat->user_regs.rbx = v->arch.user_regs.rbx;
+ c.nat->user_regs.rcx = v->arch.user_regs.rcx;
+ c.nat->user_regs.rdx = v->arch.user_regs.rdx;
+ c.nat->user_regs.rsi = v->arch.user_regs.rsi;
+ c.nat->user_regs.rdi = v->arch.user_regs.rdi;
+ c.nat->user_regs.rbp = v->arch.user_regs.rbp;
+ c.nat->user_regs.rax = v->arch.user_regs.rax;
+ c.nat->user_regs.error_code = v->arch.user_regs.error_code;
+ c.nat->user_regs.entry_vector = v->arch.user_regs.entry_vector;
+ c.nat->user_regs.rip = v->arch.user_regs.rip;
+ c.nat->user_regs.cs = v->arch.user_regs.cs;
+ c.nat->user_regs.saved_upcall_mask =
v->arch.user_regs.saved_upcall_mask;
+ c.nat->user_regs.rflags = v->arch.user_regs.rflags;
+ c.nat->user_regs.rsp = v->arch.user_regs.rsp;
+ c.nat->user_regs.ss = v->arch.user_regs.ss;
+ c.nat->user_regs.es = v->arch.user_regs.es;
+ c.nat->user_regs.ds = v->arch.user_regs.ds;
+ c.nat->user_regs.fs = v->arch.user_regs.fs;
+ c.nat->user_regs.gs = v->arch.user_regs.gs;
+
if ( is_pv_domain(d) )
memcpy(c.nat->trap_ctxt, v->arch.pv.trap_ctxt,
sizeof(c.nat->trap_ctxt));
@@ -1407,7 +1426,26 @@ void arch_get_info_guest(struct vcpu *v,
vcpu_guest_context_u c)
#ifdef CONFIG_COMPAT
else
{
- XLAT_cpu_user_regs(&c.cmp->user_regs, &v->arch.user_regs);
+ c.cmp->user_regs.ebx = v->arch.user_regs.ebx;
+ c.cmp->user_regs.ecx = v->arch.user_regs.ecx;
+ c.cmp->user_regs.edx = v->arch.user_regs.edx;
+ c.cmp->user_regs.esi = v->arch.user_regs.esi;
+ c.cmp->user_regs.edi = v->arch.user_regs.edi;
+ c.cmp->user_regs.ebp = v->arch.user_regs.ebp;
+ c.cmp->user_regs.eax = v->arch.user_regs.eax;
+ c.cmp->user_regs.error_code = v->arch.user_regs.error_code;
+ c.cmp->user_regs.entry_vector = v->arch.user_regs.entry_vector;
+ c.cmp->user_regs.eip = v->arch.user_regs.eip;
+ c.cmp->user_regs.cs = v->arch.user_regs.cs;
+ c.cmp->user_regs.saved_upcall_mask =
v->arch.user_regs.saved_upcall_mask;
+ c.cmp->user_regs.eflags = v->arch.user_regs.eflags;
+ c.cmp->user_regs.esp = v->arch.user_regs.esp;
+ c.cmp->user_regs.ss = v->arch.user_regs.ss;
+ c.cmp->user_regs.es = v->arch.user_regs.es;
+ c.cmp->user_regs.ds = v->arch.user_regs.ds;
+ c.cmp->user_regs.fs = v->arch.user_regs.fs;
+ c.cmp->user_regs.gs = v->arch.user_regs.gs;
+
if ( is_pv_domain(d) )
{
for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); ++i )
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 3c7b6c6830a9..6d6c6cfab251 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -34,8 +34,6 @@
? pmu_intel_ctxt arch-x86/pmu.h
? pmu_regs arch-x86/pmu.h
-! cpu_user_regs arch-x86/xen-@arch@.h
-
? cpu_offline_action arch-x86/xen-mca.h
? mc arch-x86/xen-mca.h
! mc_fetch arch-x86/xen-mca.h
--
2.39.5
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |