|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4/7] x86/hvm: Rename v->arch.hvm_vcpu to v->arch.hvm
The trailing _vcpu suffix is redundant, but adds to code volume. Drop it.
Reflow lines as appropriate, and switch to using the new XFREE/etc wrappers
where applicable.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
CC: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
CC: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>
---
xen/arch/x86/cpu/vpmu.c | 2 +-
xen/arch/x86/cpuid.c | 4 +-
xen/arch/x86/domain.c | 4 +-
xen/arch/x86/domctl.c | 8 +-
xen/arch/x86/hvm/asid.c | 2 +-
xen/arch/x86/hvm/dm.c | 12 +--
xen/arch/x86/hvm/domain.c | 38 ++++----
xen/arch/x86/hvm/emulate.c | 28 +++---
xen/arch/x86/hvm/hpet.c | 2 +-
xen/arch/x86/hvm/hvm.c | 162 ++++++++++++++++----------------
xen/arch/x86/hvm/io.c | 12 +--
xen/arch/x86/hvm/ioreq.c | 4 +-
xen/arch/x86/hvm/irq.c | 6 +-
xen/arch/x86/hvm/mtrr.c | 22 ++---
xen/arch/x86/hvm/pmtimer.c | 2 +-
xen/arch/x86/hvm/svm/asid.c | 2 +-
xen/arch/x86/hvm/svm/nestedsvm.c | 44 ++++-----
xen/arch/x86/hvm/svm/svm.c | 67 +++++++------
xen/arch/x86/hvm/svm/vmcb.c | 6 +-
xen/arch/x86/hvm/viridian.c | 64 ++++++-------
xen/arch/x86/hvm/vmsi.c | 30 +++---
xen/arch/x86/hvm/vmx/intr.c | 2 +-
xen/arch/x86/hvm/vmx/realmode.c | 10 +-
xen/arch/x86/hvm/vmx/vmcs.c | 8 +-
xen/arch/x86/hvm/vmx/vmx.c | 130 ++++++++++++-------------
xen/arch/x86/hvm/vmx/vvmx.c | 32 +++----
xen/arch/x86/hvm/vpt.c | 74 +++++++--------
xen/arch/x86/mm/hap/guest_walk.c | 2 +-
xen/arch/x86/mm/hap/hap.c | 4 +-
xen/arch/x86/mm/shadow/multi.c | 12 +--
xen/arch/x86/time.c | 6 +-
xen/arch/x86/x86_64/asm-offsets.c | 8 +-
xen/arch/x86/x86_64/traps.c | 8 +-
xen/include/asm-x86/domain.h | 6 +-
xen/include/asm-x86/guest_pt.h | 2 +-
xen/include/asm-x86/hvm/hvm.h | 20 ++--
xen/include/asm-x86/hvm/nestedhvm.h | 2 +-
xen/include/asm-x86/hvm/svm/nestedsvm.h | 2 +-
xen/include/asm-x86/hvm/vcpu.h | 4 +-
xen/include/asm-x86/hvm/vlapic.h | 6 +-
xen/include/asm-x86/hvm/vmx/vmx.h | 2 +-
41 files changed, 428 insertions(+), 433 deletions(-)
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index fa6762f..8a4f753 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -304,7 +304,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
hvm_get_segment_register(sampled, x86_seg_ss, &seg);
r->ss = seg.sel;
r->cpl = seg.dpl;
- if ( !(sampled->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
+ if ( !(sampled->arch.hvm.guest_cr[0] & X86_CR0_PE) )
*flags |= PMU_SAMPLE_REAL;
}
}
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 24366ea..59d3298 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -829,7 +829,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
if ( is_hvm_domain(d) )
{
/* OSXSAVE clear in policy. Fast-forward CR4 back in. */
- if ( v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE )
+ if ( v->arch.hvm.guest_cr[4] & X86_CR4_OSXSAVE )
res->c |= cpufeat_mask(X86_FEATURE_OSXSAVE);
}
else /* PV domain */
@@ -960,7 +960,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
/* OSPKE clear in policy. Fast-forward CR4 back in. */
if ( (is_pv_domain(d)
? v->arch.pv.ctrlreg[4]
- : v->arch.hvm_vcpu.guest_cr[4]) & X86_CR4_PKE )
+ : v->arch.hvm.guest_cr[4]) & X86_CR4_PKE )
res->c |= cpufeat_mask(X86_FEATURE_OSPKE);
break;
}
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 3dcd7f9..ccdfec2 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1619,7 +1619,7 @@ static void __context_switch(void)
BUG();
if ( cpu_has_xsaves && is_hvm_vcpu(n) )
- set_msr_xss(n->arch.hvm_vcpu.msr_xss);
+ set_msr_xss(n->arch.hvm.msr_xss);
}
vcpu_restore_fpu_nonlazy(n, false);
nd->arch.ctxt_switch->to(n);
@@ -1692,7 +1692,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
np2m_schedule(NP2M_SCHEDLE_OUT);
}
- if ( is_hvm_domain(prevd) && !list_empty(&prev->arch.hvm_vcpu.tm_list) )
+ if ( is_hvm_domain(prevd) && !list_empty(&prev->arch.hvm.tm_list) )
pt_save_timer(prev);
local_irq_disable();
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index f306614..797841e 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1585,10 +1585,10 @@ void arch_get_info_guest(struct vcpu *v,
vcpu_guest_context_u c)
{
struct segment_register sreg;
- c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
- c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
- c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
- c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
+ c.nat->ctrlreg[0] = v->arch.hvm.guest_cr[0];
+ c.nat->ctrlreg[2] = v->arch.hvm.guest_cr[2];
+ c.nat->ctrlreg[3] = v->arch.hvm.guest_cr[3];
+ c.nat->ctrlreg[4] = v->arch.hvm.guest_cr[4];
hvm_get_segment_register(v, x86_seg_cs, &sreg);
c.nat->user_regs.cs = sreg.sel;
hvm_get_segment_register(v, x86_seg_ss, &sreg);
diff --git a/xen/arch/x86/hvm/asid.c b/xen/arch/x86/hvm/asid.c
index beca8ec..9d3c671 100644
--- a/xen/arch/x86/hvm/asid.c
+++ b/xen/arch/x86/hvm/asid.c
@@ -87,7 +87,7 @@ void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid)
void hvm_asid_flush_vcpu(struct vcpu *v)
{
- hvm_asid_flush_vcpu_asid(&v->arch.hvm_vcpu.n1asid);
+ hvm_asid_flush_vcpu_asid(&v->arch.hvm.n1asid);
hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
}
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 6755f3f..87d97d0 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -317,17 +317,17 @@ static int inject_event(struct domain *d,
if ( data->vcpuid >= d->max_vcpus || !(v = d->vcpu[data->vcpuid]) )
return -EINVAL;
- if ( cmpxchg(&v->arch.hvm_vcpu.inject_event.vector,
+ if ( cmpxchg(&v->arch.hvm.inject_event.vector,
HVM_EVENT_VECTOR_UNSET, HVM_EVENT_VECTOR_UPDATING) !=
HVM_EVENT_VECTOR_UNSET )
return -EBUSY;
- v->arch.hvm_vcpu.inject_event.type = data->type;
- v->arch.hvm_vcpu.inject_event.insn_len = data->insn_len;
- v->arch.hvm_vcpu.inject_event.error_code = data->error_code;
- v->arch.hvm_vcpu.inject_event.cr2 = data->cr2;
+ v->arch.hvm.inject_event.type = data->type;
+ v->arch.hvm.inject_event.insn_len = data->insn_len;
+ v->arch.hvm.inject_event.error_code = data->error_code;
+ v->arch.hvm.inject_event.cr2 = data->cr2;
smp_wmb();
- v->arch.hvm_vcpu.inject_event.vector = data->vector;
+ v->arch.hvm.inject_event.vector = data->vector;
return 0;
}
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index 8a2c83e..5d5a746 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -204,10 +204,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const
vcpu_hvm_context_t *ctx)
uregs->rip = regs->eip;
uregs->rflags = regs->eflags;
- v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
- v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
- v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
- v->arch.hvm_vcpu.guest_efer = regs->efer;
+ v->arch.hvm.guest_cr[0] = regs->cr0;
+ v->arch.hvm.guest_cr[3] = regs->cr3;
+ v->arch.hvm.guest_cr[4] = regs->cr4;
+ v->arch.hvm.guest_efer = regs->efer;
}
break;
@@ -255,10 +255,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const
vcpu_hvm_context_t *ctx)
uregs->rip = regs->rip;
uregs->rflags = regs->rflags;
- v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
- v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
- v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
- v->arch.hvm_vcpu.guest_efer = regs->efer;
+ v->arch.hvm.guest_cr[0] = regs->cr0;
+ v->arch.hvm.guest_cr[3] = regs->cr3;
+ v->arch.hvm.guest_cr[4] = regs->cr4;
+ v->arch.hvm.guest_efer = regs->efer;
#define SEG(l, a) (struct segment_register){ 0, { a }, l, 0 }
cs = SEG(~0u, 0xa9b); /* 64bit code segment. */
@@ -270,21 +270,21 @@ int arch_set_info_hvm_guest(struct vcpu *v, const
vcpu_hvm_context_t *ctx)
}
- if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
- v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
+ if ( v->arch.hvm.guest_efer & EFER_LME )
+ v->arch.hvm.guest_efer |= EFER_LMA;
- if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(d, false) )
+ if ( v->arch.hvm.guest_cr[4] & ~hvm_cr4_guest_valid_bits(d, false) )
{
gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
- v->arch.hvm_vcpu.guest_cr[4]);
+ v->arch.hvm.guest_cr[4]);
return -EINVAL;
}
- errstr = hvm_efer_valid(v, v->arch.hvm_vcpu.guest_efer, -1);
+ errstr = hvm_efer_valid(v, v->arch.hvm.guest_efer, -1);
if ( errstr )
{
gprintk(XENLOG_ERR, "Bad EFER value (%#016lx): %s\n",
- v->arch.hvm_vcpu.guest_efer, errstr);
+ v->arch.hvm.guest_efer, errstr);
return -EINVAL;
}
@@ -297,12 +297,12 @@ int arch_set_info_hvm_guest(struct vcpu *v, const
vcpu_hvm_context_t *ctx)
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
struct page_info *page = get_page_from_gfn(v->domain,
- v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT,
+ v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
NULL, P2M_ALLOC);
if ( !page )
{
gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n",
- v->arch.hvm_vcpu.guest_cr[3]);
+ v->arch.hvm.guest_cr[3]);
return -EINVAL;
}
@@ -316,9 +316,9 @@ int arch_set_info_hvm_guest(struct vcpu *v, const
vcpu_hvm_context_t *ctx)
hvm_set_segment_register(v, x86_seg_tr, &tr);
/* Sync AP's TSC with BSP's. */
- v->arch.hvm_vcpu.cache_tsc_offset =
- d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ v->arch.hvm.cache_tsc_offset =
+ d->vcpu[0]->arch.hvm.cache_tsc_offset;
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset,
d->arch.hvm.sync_tsc);
paging_update_paging_modes(v);
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 20d1d5b..dbf8b81 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -123,7 +123,7 @@ static int hvmemul_do_io(
{
struct vcpu *curr = current;
struct domain *currd = curr->domain;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
ioreq_t p = {
.type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
.addr = addr,
@@ -437,7 +437,7 @@ static int hvmemul_do_io_addr(
ASSERT(rc != X86EMUL_UNIMPLEMENTED);
if ( rc == X86EMUL_OKAY )
- v->arch.hvm_vcpu.hvm_io.mmio_retry = (count < *reps);
+ v->arch.hvm.hvm_io.mmio_retry = (count < *reps);
*reps = count;
@@ -706,7 +706,7 @@ static int hvmemul_linear_to_phys(
*reps = min_t(unsigned long, *reps, 4096);
/* With no paging it's easy: linear == physical. */
- if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG) )
+ if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
{
*paddr = addr;
return X86EMUL_OKAY;
@@ -975,7 +975,7 @@ static int hvmemul_linear_mmio_access(
unsigned long gla, unsigned int size, uint8_t dir, void *buffer,
uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, bool_t known_gpfn)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned long offset = gla & ~PAGE_MASK;
struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir);
unsigned int chunk, buffer_offset = 0;
@@ -1053,7 +1053,7 @@ static int __hvmemul_read(
pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
if ( is_x86_system_segment(seg) )
@@ -1174,7 +1174,7 @@ static int hvmemul_write(
struct vcpu *curr = current;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
void *mapping;
@@ -1218,7 +1218,7 @@ static int hvmemul_rmw(
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
int rc;
void *mapping;
@@ -1375,7 +1375,7 @@ static int hvmemul_cmpxchg(
struct vcpu *curr = current;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
void *mapping = NULL;
@@ -1593,7 +1593,7 @@ static int hvmemul_rep_movs(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned long saddr, daddr, bytes;
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
@@ -1748,7 +1748,7 @@ static int hvmemul_rep_stos(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned long addr, bytes;
paddr_t gpa;
p2m_type_t p2mt;
@@ -1931,7 +1931,7 @@ static int hvmemul_read_cr(
case 2:
case 3:
case 4:
- *val = current->arch.hvm_vcpu.guest_cr[reg];
+ *val = current->arch.hvm.guest_cr[reg];
HVMTRACE_LONG_2D(CR_READ, reg, TRC_PAR_LONG(*val));
return X86EMUL_OKAY;
default:
@@ -1956,7 +1956,7 @@ static int hvmemul_write_cr(
break;
case 2:
- current->arch.hvm_vcpu.guest_cr[2] = val;
+ current->arch.hvm.guest_cr[2] = val;
rc = X86EMUL_OKAY;
break;
@@ -2280,7 +2280,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt
*hvmemul_ctxt,
const struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
struct vcpu *curr = current;
uint32_t new_intr_shadow;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
hvm_emulate_init_per_insn(hvmemul_ctxt, vio->mmio_insn,
@@ -2410,7 +2410,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind,
unsigned int trapnr,
break;
case EMUL_KIND_SET_CONTEXT_INSN: {
struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
BUILD_BUG_ON(sizeof(vio->mmio_insn) !=
sizeof(curr->arch.vm_event->emul.insn.data));
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index 8090699..cbd1efb 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -581,7 +581,7 @@ static int hpet_save(struct domain *d, hvm_domain_context_t
*h)
return 0;
write_lock(&hp->lock);
- guest_time = (v->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(v)) /
+ guest_time = (v->arch.hvm.guest_time ?: hvm_get_guest_time(v)) /
STIME_PER_HPET_TICK;
/* Write the proper value into the main counter */
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f895339..ac067a8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -278,7 +278,7 @@ void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable)
void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
{
if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
- *guest_pat = v->arch.hvm_vcpu.pat_cr;
+ *guest_pat = v->arch.hvm.pat_cr;
}
int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat)
@@ -303,7 +303,7 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat)
}
if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
- v->arch.hvm_vcpu.pat_cr = guest_pat;
+ v->arch.hvm.pat_cr = guest_pat;
return 1;
}
@@ -415,28 +415,26 @@ static void hvm_set_guest_tsc_fixed(struct vcpu *v, u64
guest_tsc, u64 at_tsc)
}
delta_tsc = guest_tsc - tsc;
- v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc;
+ v->arch.hvm.cache_tsc_offset = delta_tsc;
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc);
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, at_tsc);
}
#define hvm_set_guest_tsc(v, t) hvm_set_guest_tsc_fixed(v, t, 0)
static void hvm_set_guest_tsc_msr(struct vcpu *v, u64 guest_tsc)
{
- uint64_t tsc_offset = v->arch.hvm_vcpu.cache_tsc_offset;
+ uint64_t tsc_offset = v->arch.hvm.cache_tsc_offset;
hvm_set_guest_tsc(v, guest_tsc);
- v->arch.hvm_vcpu.msr_tsc_adjust += v->arch.hvm_vcpu.cache_tsc_offset
- - tsc_offset;
+ v->arch.hvm.msr_tsc_adjust += v->arch.hvm.cache_tsc_offset - tsc_offset;
}
static void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust)
{
- v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust
- - v->arch.hvm_vcpu.msr_tsc_adjust;
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
- v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust;
+ v->arch.hvm.cache_tsc_offset += tsc_adjust - v->arch.hvm.msr_tsc_adjust;
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, 0);
+ v->arch.hvm.msr_tsc_adjust = tsc_adjust;
}
u64 hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc)
@@ -455,7 +453,7 @@ u64 hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc)
tsc = hvm_scale_tsc(v->domain, tsc);
}
- return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
+ return tsc + v->arch.hvm.cache_tsc_offset;
}
void hvm_migrate_timers(struct vcpu *v)
@@ -501,7 +499,7 @@ void hvm_migrate_pirqs(struct vcpu *v)
static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
{
- info->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+ info->cr2 = v->arch.hvm.guest_cr[2];
return hvm_funcs.get_pending_event(v, info);
}
@@ -518,14 +516,14 @@ void hvm_do_resume(struct vcpu *v)
hvm_vm_event_do_resume(v);
/* Inject pending hw/sw event */
- if ( v->arch.hvm_vcpu.inject_event.vector >= 0 )
+ if ( v->arch.hvm.inject_event.vector >= 0 )
{
smp_rmb();
if ( !hvm_event_pending(v) )
- hvm_inject_event(&v->arch.hvm_vcpu.inject_event);
+ hvm_inject_event(&v->arch.hvm.inject_event);
- v->arch.hvm_vcpu.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
+ v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
}
if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
@@ -741,7 +739,7 @@ static int hvm_save_tsc_adjust(struct domain *d,
hvm_domain_context_t *h)
for_each_vcpu ( d, v )
{
- ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
+ ctxt.tsc_adjust = v->arch.hvm.msr_tsc_adjust;
err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
if ( err )
break;
@@ -766,7 +764,7 @@ static int hvm_load_tsc_adjust(struct domain *d,
hvm_domain_context_t *h)
if ( hvm_load_entry(TSC_ADJUST, h, &ctxt) != 0 )
return -EINVAL;
- v->arch.hvm_vcpu.msr_tsc_adjust = ctxt.tsc_adjust;
+ v->arch.hvm.msr_tsc_adjust = ctxt.tsc_adjust;
return 0;
}
@@ -1044,7 +1042,7 @@ static int hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
if ( hvm_funcs.tsc_scaling.setup )
hvm_funcs.tsc_scaling.setup(v);
- v->arch.hvm_vcpu.msr_tsc_aux = ctxt.msr_tsc_aux;
+ v->arch.hvm.msr_tsc_aux = ctxt.msr_tsc_aux;
hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm.sync_tsc);
@@ -1501,8 +1499,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
hvm_asid_flush_vcpu(v);
- spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
- INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
+ spin_lock_init(&v->arch.hvm.tm_lock);
+ INIT_LIST_HEAD(&v->arch.hvm.tm_list);
rc = hvm_vcpu_cacheattr_init(v); /* teardown: vcpu_cacheattr_destroy */
if ( rc != 0 )
@@ -1517,11 +1515,11 @@ int hvm_vcpu_initialise(struct vcpu *v)
goto fail3;
softirq_tasklet_init(
- &v->arch.hvm_vcpu.assert_evtchn_irq_tasklet,
+ &v->arch.hvm.assert_evtchn_irq_tasklet,
(void(*)(unsigned long))hvm_assert_evtchn_irq,
(unsigned long)v);
- v->arch.hvm_vcpu.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
+ v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
if ( rc != 0 )
@@ -1574,7 +1572,7 @@ void hvm_vcpu_destroy(struct vcpu *v)
free_compat_arg_xlat(v);
- tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
+ tasklet_kill(&v->arch.hvm.assert_evtchn_irq_tasklet);
hvm_funcs.vcpu_destroy(v);
vlapic_destroy(v);
@@ -1967,11 +1965,11 @@ int hvm_set_efer(uint64_t value)
{
printk(XENLOG_G_WARNING
"%pv: Invalid EFER update: %#"PRIx64" -> %#"PRIx64" - %s\n",
- v, v->arch.hvm_vcpu.guest_efer, value, errstr);
+ v, v->arch.hvm.guest_efer, value, errstr);
return X86EMUL_EXCEPTION;
}
- if ( ((value ^ v->arch.hvm_vcpu.guest_efer) & EFER_LME) &&
+ if ( ((value ^ v->arch.hvm.guest_efer) & EFER_LME) &&
hvm_paging_enabled(v) )
{
gdprintk(XENLOG_WARNING,
@@ -1979,7 +1977,7 @@ int hvm_set_efer(uint64_t value)
return X86EMUL_EXCEPTION;
}
- if ( (value & EFER_LME) && !(v->arch.hvm_vcpu.guest_efer & EFER_LME) )
+ if ( (value & EFER_LME) && !(v->arch.hvm.guest_efer & EFER_LME) )
{
struct segment_register cs;
@@ -2005,15 +2003,15 @@ int hvm_set_efer(uint64_t value)
if ( nestedhvm_enabled(v->domain) && cpu_has_svm &&
((value & EFER_SVME) == 0 ) &&
- ((value ^ v->arch.hvm_vcpu.guest_efer) & EFER_SVME) )
+ ((value ^ v->arch.hvm.guest_efer) & EFER_SVME) )
{
/* Cleared EFER.SVME: Flush all nestedp2m tables */
p2m_flush_nestedp2m(v->domain);
nestedhvm_vcpu_reset(v);
}
- value |= v->arch.hvm_vcpu.guest_efer & EFER_LMA;
- v->arch.hvm_vcpu.guest_efer = value;
+ value |= v->arch.hvm.guest_efer & EFER_LMA;
+ v->arch.hvm.guest_efer = value;
hvm_update_guest_efer(v);
return X86EMUL_OKAY;
@@ -2029,7 +2027,7 @@ static bool_t domain_exit_uc_mode(struct vcpu *v)
{
if ( (vs == v) || !vs->is_initialised )
continue;
- if ( (vs->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) ||
+ if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
mtrr_pat_not_equal(vs, v) )
return 0;
}
@@ -2097,7 +2095,7 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
case 2:
case 3:
case 4:
- val = curr->arch.hvm_vcpu.guest_cr[cr];
+ val = curr->arch.hvm.guest_cr[cr];
break;
case 8:
val = (vlapic_get_reg(vcpu_vlapic(curr), APIC_TASKPRI) & 0xf0) >> 4;
@@ -2124,7 +2122,7 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long
value)
{
/* Entering no fill cache mode. */
spin_lock(&v->domain->arch.hvm.uc_lock);
- v->arch.hvm_vcpu.cache_mode = NO_FILL_CACHE_MODE;
+ v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
if ( !v->domain->arch.hvm.is_in_uc_mode )
{
@@ -2139,11 +2137,11 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long
value)
spin_unlock(&v->domain->arch.hvm.uc_lock);
}
else if ( !(value & X86_CR0_CD) &&
- (v->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) )
+ (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
{
/* Exit from no fill cache mode. */
spin_lock(&v->domain->arch.hvm.uc_lock);
- v->arch.hvm_vcpu.cache_mode = NORMAL_CACHE_MODE;
+ v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
if ( domain_exit_uc_mode(v) )
hvm_set_uc_mode(v, 0);
@@ -2154,7 +2152,7 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long
value)
static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
{
- v->arch.hvm_vcpu.guest_cr[cr] = value;
+ v->arch.hvm.guest_cr[cr] = value;
nestedhvm_set_cr(v, cr, value);
hvm_update_guest_cr(v, cr);
}
@@ -2163,7 +2161,7 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
{
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
+ unsigned long gfn, old_value = v->arch.hvm.guest_cr[0];
struct page_info *page;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -2202,28 +2200,28 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
{
- if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
+ if ( v->arch.hvm.guest_efer & EFER_LME )
{
- if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) &&
+ if ( !(v->arch.hvm.guest_cr[4] & X86_CR4_PAE) &&
!nestedhvm_vmswitch_in_progress(v) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
return X86EMUL_EXCEPTION;
}
HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
- v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
+ v->arch.hvm.guest_efer |= EFER_LMA;
hvm_update_guest_efer(v);
}
if ( !paging_mode_hap(d) )
{
/* The guest CR3 must be pointing to the guest physical. */
- gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
+ gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page )
{
gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx\n",
- v->arch.hvm_vcpu.guest_cr[3]);
+ v->arch.hvm.guest_cr[3]);
domain_crash(d);
return X86EMUL_UNHANDLEABLE;
}
@@ -2232,7 +2230,7 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
v->arch.guest_table = pagetable_from_page(page);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vcpu.guest_cr[3],
mfn_x(page_to_mfn(page)));
+ v->arch.hvm.guest_cr[3], mfn_x(page_to_mfn(page)));
}
}
else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
@@ -2247,7 +2245,7 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
/* When CR0.PG is cleared, LMA is cleared immediately. */
if ( hvm_long_mode_active(v) )
{
- v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
+ v->arch.hvm.guest_efer &= ~EFER_LMA;
hvm_update_guest_efer(v);
}
@@ -2281,7 +2279,7 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
{
struct vcpu *v = current;
struct page_info *page;
- unsigned long old = v->arch.hvm_vcpu.guest_cr[3];
+ unsigned long old = v->arch.hvm.guest_cr[3];
bool noflush = false;
if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
@@ -2306,7 +2304,7 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
}
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
- (value != v->arch.hvm_vcpu.guest_cr[3]) )
+ (value != v->arch.hvm.guest_cr[3]) )
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
@@ -2321,7 +2319,7 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
- v->arch.hvm_vcpu.guest_cr[3] = value;
+ v->arch.hvm.guest_cr[3] = value;
paging_update_cr3(v, noflush);
return X86EMUL_OKAY;
@@ -2354,11 +2352,11 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
}
}
- old_cr = v->arch.hvm_vcpu.guest_cr[4];
+ old_cr = v->arch.hvm.guest_cr[4];
if ( (value & X86_CR4_PCIDE) && !(old_cr & X86_CR4_PCIDE) &&
(!hvm_long_mode_active(v) ||
- (v->arch.hvm_vcpu.guest_cr[3] & 0xfff)) )
+ (v->arch.hvm.guest_cr[3] & 0xfff)) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to change CR4.PCIDE from "
"0 to 1 while either EFER.LMA=0 or CR3[11:0]!=000H");
@@ -2441,7 +2439,7 @@ bool_t hvm_virtual_to_linear_addr(
*/
ASSERT(seg < x86_seg_none);
- if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
+ if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PE) ||
(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
{
/*
@@ -3050,7 +3048,7 @@ void hvm_task_switch(
tr.type = 0xb; /* busy 32-bit tss */
hvm_set_segment_register(v, x86_seg_tr, &tr);
- v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_TS;
+ v->arch.hvm.guest_cr[0] |= X86_CR0_TS;
hvm_update_guest_cr(v, 0);
if ( (taskswitch_reason == TSW_iret ||
@@ -3392,8 +3390,8 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
uint64_t *var_range_base, *fixed_range_base;
int ret;
- var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
- fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
+ var_range_base = (uint64_t *)v->arch.hvm.mtrr.var_ranges;
+ fixed_range_base = (uint64_t *)v->arch.hvm.mtrr.fixed_ranges;
if ( (ret = guest_rdmsr(v, msr, msr_content)) != X86EMUL_UNHANDLEABLE )
return ret;
@@ -3405,7 +3403,7 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
unsigned int index;
case MSR_EFER:
- *msr_content = v->arch.hvm_vcpu.guest_efer;
+ *msr_content = v->arch.hvm.guest_efer;
break;
case MSR_IA32_TSC:
@@ -3413,7 +3411,7 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
break;
case MSR_IA32_TSC_ADJUST:
- *msr_content = v->arch.hvm_vcpu.msr_tsc_adjust;
+ *msr_content = v->arch.hvm.msr_tsc_adjust;
break;
case MSR_TSC_AUX:
@@ -3440,14 +3438,14 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
case MSR_MTRRcap:
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
- *msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap;
+ *msr_content = v->arch.hvm.mtrr.mtrr_cap;
break;
case MSR_MTRRdefType:
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
- *msr_content = v->arch.hvm_vcpu.mtrr.def_type |
- MASK_INSR(v->arch.hvm_vcpu.mtrr.enabled, MTRRdefType_E)
|
- MASK_INSR(v->arch.hvm_vcpu.mtrr.fixed_enabled,
+ *msr_content = v->arch.hvm.mtrr.def_type |
+ MASK_INSR(v->arch.hvm.mtrr.enabled, MTRRdefType_E) |
+ MASK_INSR(v->arch.hvm.mtrr.fixed_enabled,
MTRRdefType_FE);
break;
case MSR_MTRRfix64K_00000:
@@ -3473,7 +3471,7 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
goto gp_fault;
index = msr - MSR_IA32_MTRR_PHYSBASE(0);
if ( (index / 2) >=
- MASK_EXTR(v->arch.hvm_vcpu.mtrr.mtrr_cap, MTRRcap_VCNT) )
+ MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, MTRRcap_VCNT) )
goto gp_fault;
*msr_content = var_range_base[index];
break;
@@ -3481,7 +3479,7 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
case MSR_IA32_XSS:
if ( !d->arch.cpuid->xstate.xsaves )
goto gp_fault;
- *msr_content = v->arch.hvm_vcpu.msr_xss;
+ *msr_content = v->arch.hvm.msr_xss;
break;
case MSR_IA32_BNDCFGS:
@@ -3573,7 +3571,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
break;
case MSR_TSC_AUX:
- v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content;
+ v->arch.hvm.msr_tsc_aux = (uint32_t)msr_content;
if ( cpu_has_rdtscp
&& (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
wrmsr_tsc_aux(msr_content);
@@ -3604,14 +3602,14 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
case MSR_MTRRdefType:
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
- if ( !mtrr_def_type_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
+ if ( !mtrr_def_type_msr_set(v->domain, &v->arch.hvm.mtrr,
msr_content) )
goto gp_fault;
break;
case MSR_MTRRfix64K_00000:
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
- if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr, 0,
+ if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr, 0,
msr_content) )
goto gp_fault;
break;
@@ -3620,7 +3618,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix16K_80000 + 1;
- if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
+ if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr,
index, msr_content) )
goto gp_fault;
break;
@@ -3628,7 +3626,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix4K_C0000 + 3;
- if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
+ if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr,
index, msr_content) )
goto gp_fault;
break;
@@ -3637,8 +3635,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
goto gp_fault;
index = msr - MSR_IA32_MTRR_PHYSBASE(0);
if ( ((index / 2) >=
- MASK_EXTR(v->arch.hvm_vcpu.mtrr.mtrr_cap, MTRRcap_VCNT)) ||
- !mtrr_var_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
+ MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, MTRRcap_VCNT)) ||
+ !mtrr_var_range_msr_set(v->domain, &v->arch.hvm.mtrr,
msr, msr_content) )
goto gp_fault;
break;
@@ -3647,7 +3645,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
/* No XSS features currently supported for guests. */
if ( !d->arch.cpuid->xstate.xsaves || msr_content != 0 )
goto gp_fault;
- v->arch.hvm_vcpu.msr_xss = msr_content;
+ v->arch.hvm.msr_xss = msr_content;
break;
case MSR_IA32_BNDCFGS:
@@ -3872,7 +3870,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs,
uint16_t ip)
if ( !paging_mode_hap(d) )
{
- if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
+ if ( v->arch.hvm.guest_cr[0] & X86_CR0_PG )
put_page(pagetable_get_page(v->arch.guest_table));
v->arch.guest_table = pagetable_null();
}
@@ -3888,19 +3886,19 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs,
uint16_t ip)
v->arch.user_regs.rip = ip;
memset(&v->arch.debugreg, 0, sizeof(v->arch.debugreg));
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
+ v->arch.hvm.guest_cr[0] = X86_CR0_ET;
hvm_update_guest_cr(v, 0);
- v->arch.hvm_vcpu.guest_cr[2] = 0;
+ v->arch.hvm.guest_cr[2] = 0;
hvm_update_guest_cr(v, 2);
- v->arch.hvm_vcpu.guest_cr[3] = 0;
+ v->arch.hvm.guest_cr[3] = 0;
hvm_update_guest_cr(v, 3);
- v->arch.hvm_vcpu.guest_cr[4] = 0;
+ v->arch.hvm.guest_cr[4] = 0;
hvm_update_guest_cr(v, 4);
- v->arch.hvm_vcpu.guest_efer = 0;
+ v->arch.hvm.guest_efer = 0;
hvm_update_guest_efer(v);
reg.sel = cs;
@@ -3932,12 +3930,12 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs,
uint16_t ip)
hvm_funcs.tsc_scaling.setup(v);
/* Sync AP's TSC with BSP's. */
- v->arch.hvm_vcpu.cache_tsc_offset =
- v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ v->arch.hvm.cache_tsc_offset =
+ v->domain->vcpu[0]->arch.hvm.cache_tsc_offset;
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset,
d->arch.hvm.sync_tsc);
- v->arch.hvm_vcpu.msr_tsc_adjust = 0;
+ v->arch.hvm.msr_tsc_adjust = 0;
paging_update_paging_modes(v);
@@ -4059,7 +4057,7 @@ static int hvmop_set_evtchn_upcall_vector(
printk(XENLOG_G_INFO "%pv: upcall vector %02x\n", v, op.vector);
- v->arch.hvm_vcpu.evtchn_upcall_vector = op.vector;
+ v->arch.hvm.evtchn_upcall_vector = op.vector;
hvm_assert_evtchn_irq(v);
return 0;
}
@@ -4976,7 +4974,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
break;
rc = 0;
vcpu_pause(v);
- v->arch.hvm_vcpu.single_step =
+ v->arch.hvm.single_step =
(op == XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON);
vcpu_unpause(v); /* guest will latch new state */
break;
@@ -4995,7 +4993,7 @@ void hvm_toggle_singlestep(struct vcpu *v)
if ( !hvm_is_singlestep_supported() )
return;
- v->arch.hvm_vcpu.single_step = !v->arch.hvm_vcpu.single_step;
+ v->arch.hvm.single_step = !v->arch.hvm.single_step;
}
void hvm_domain_soft_reset(struct domain *d)
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index f1ea7d7..47d6c85 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -82,7 +82,7 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate,
const char *descr)
{
struct hvm_emulate_ctxt ctxt;
struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs());
@@ -118,7 +118,7 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate,
const char *descr)
bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec access)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
vio->mmio_access = access.gla_valid &&
access.kind == npfec_kind_with_gla
@@ -131,7 +131,7 @@ bool handle_mmio_with_translation(unsigned long gla,
unsigned long gpfn,
bool handle_pio(uint16_t port, unsigned int size, int dir)
{
struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
unsigned long data;
int rc;
@@ -180,7 +180,7 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler
*handler,
{
struct vcpu *curr = current;
const struct hvm_domain *hvm = &curr->domain->arch.hvm;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
struct g2m_ioport *g2m_ioport;
unsigned int start, end;
@@ -201,7 +201,7 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler
*handler,
static int g2m_portio_read(const struct hvm_io_handler *handler,
uint64_t addr, uint32_t size, uint64_t *data)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;
@@ -226,7 +226,7 @@ static int g2m_portio_read(const struct hvm_io_handler
*handler,
static int g2m_portio_write(const struct hvm_io_handler *handler,
uint64_t addr, uint32_t size, uint64_t data)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 8d60b02..138ed69 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -110,7 +110,7 @@ bool hvm_io_pending(struct vcpu *v)
static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
{
struct vcpu *v = sv->vcpu;
- ioreq_t *ioreq = &v->arch.hvm_vcpu.hvm_io.io_req;
+ ioreq_t *ioreq = &v->arch.hvm.hvm_io.io_req;
if ( hvm_ioreq_needs_completion(ioreq) )
{
@@ -184,7 +184,7 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv,
ioreq_t *p)
bool handle_hvm_io_completion(struct vcpu *v)
{
struct domain *d = v->domain;
- struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
struct hvm_ioreq_server *s;
enum hvm_io_completion io_completion;
unsigned int id;
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 1ded2c2..fe2c2fa 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -306,13 +306,13 @@ void hvm_assert_evtchn_irq(struct vcpu *v)
{
if ( unlikely(in_irq() || !local_irq_is_enabled()) )
{
- tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
+ tasklet_schedule(&v->arch.hvm.assert_evtchn_irq_tasklet);
return;
}
- if ( v->arch.hvm_vcpu.evtchn_upcall_vector != 0 )
+ if ( v->arch.hvm.evtchn_upcall_vector != 0 )
{
- uint8_t vector = v->arch.hvm_vcpu.evtchn_upcall_vector;
+ uint8_t vector = v->arch.hvm.evtchn_upcall_vector;
vlapic_set_irq(vcpu_vlapic(v), vector, 0);
}
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 8a772bc..de1b5c4 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -122,7 +122,7 @@ uint8_t pat_type_2_pte_flags(uint8_t pat_type)
int hvm_vcpu_cacheattr_init(struct vcpu *v)
{
- struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
+ struct mtrr_state *m = &v->arch.hvm.mtrr;
unsigned int num_var_ranges =
is_hardware_domain(v->domain) ? MASK_EXTR(mtrr_state.mtrr_cap,
MTRRcap_VCNT)
@@ -144,7 +144,7 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v)
m->mtrr_cap = (1u << 10) | (1u << 8) | num_var_ranges;
- v->arch.hvm_vcpu.pat_cr =
+ v->arch.hvm.pat_cr =
((uint64_t)PAT_TYPE_WRBACK) | /* PAT0: WB */
((uint64_t)PAT_TYPE_WRTHROUGH << 8) | /* PAT1: WT */
((uint64_t)PAT_TYPE_UC_MINUS << 16) | /* PAT2: UC- */
@@ -185,7 +185,7 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v)
void hvm_vcpu_cacheattr_destroy(struct vcpu *v)
{
- xfree(v->arch.hvm_vcpu.mtrr.var_ranges);
+ xfree(v->arch.hvm.mtrr.var_ranges);
}
/*
@@ -343,8 +343,8 @@ uint32_t get_pat_flags(struct vcpu *v,
uint8_t guest_eff_mm_type;
uint8_t shadow_mtrr_type;
uint8_t pat_entry_value;
- uint64_t pat = v->arch.hvm_vcpu.pat_cr;
- struct mtrr_state *g = &v->arch.hvm_vcpu.mtrr;
+ uint64_t pat = v->arch.hvm.pat_cr;
+ struct mtrr_state *g = &v->arch.hvm.mtrr;
/* 1. Get the effective memory type of guest physical address,
* with the pair of guest MTRR and PAT
@@ -494,8 +494,8 @@ bool_t mtrr_var_range_msr_set(
bool mtrr_pat_not_equal(const struct vcpu *vd, const struct vcpu *vs)
{
- const struct mtrr_state *md = &vd->arch.hvm_vcpu.mtrr;
- const struct mtrr_state *ms = &vs->arch.hvm_vcpu.mtrr;
+ const struct mtrr_state *md = &vd->arch.hvm.mtrr;
+ const struct mtrr_state *ms = &vs->arch.hvm.mtrr;
if ( md->enabled != ms->enabled )
return true;
@@ -525,7 +525,7 @@ bool mtrr_pat_not_equal(const struct vcpu *vd, const struct
vcpu *vs)
}
/* Test PAT. */
- return vd->arch.hvm_vcpu.pat_cr != vs->arch.hvm_vcpu.pat_cr;
+ return vd->arch.hvm.pat_cr != vs->arch.hvm.pat_cr;
}
struct hvm_mem_pinned_cacheattr_range {
@@ -697,7 +697,7 @@ static int hvm_save_mtrr_msr(struct domain *d,
hvm_domain_context_t *h)
/* save mtrr&pat */
for_each_vcpu(d, v)
{
- const struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr;
+ const struct mtrr_state *mtrr_state = &v->arch.hvm.mtrr;
struct hvm_hw_mtrr hw_mtrr = {
.msr_mtrr_def_type = mtrr_state->def_type |
MASK_INSR(mtrr_state->fixed_enabled,
@@ -764,7 +764,7 @@ static int hvm_load_mtrr_msr(struct domain *d,
hvm_domain_context_t *h)
return -EINVAL;
}
- mtrr_state = &v->arch.hvm_vcpu.mtrr;
+ mtrr_state = &v->arch.hvm.mtrr;
hvm_set_guest_pat(v, hw_mtrr.msr_pat_cr);
@@ -858,7 +858,7 @@ int epte_get_entry_emt(struct domain *d, unsigned long gfn,
mfn_t mfn,
return -1;
gmtrr_mtype = is_hvm_domain(d) && v ?
- get_mtrr_type(&v->arch.hvm_vcpu.mtrr,
+ get_mtrr_type(&v->arch.hvm.mtrr,
gfn << PAGE_SHIFT, order) :
MTRR_TYPE_WRBACK;
hmtrr_mtype = get_mtrr_type(&mtrr_state, mfn_x(mfn) << PAGE_SHIFT, order);
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 75b9408..8542a32 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -265,7 +265,7 @@ static int acpi_save(struct domain *d, hvm_domain_context_t
*h)
* Update the counter to the guest's current time. Make sure it only
* goes forwards.
*/
- x = (((s->vcpu->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(s->vcpu)) -
+ x = (((s->vcpu->arch.hvm.guest_time ?: hvm_get_guest_time(s->vcpu)) -
s->last_gtime) * s->scale) >> 32;
if ( x < 1UL<<31 )
acpi->tmr_val += x;
diff --git a/xen/arch/x86/hvm/svm/asid.c b/xen/arch/x86/hvm/svm/asid.c
index 4861daa..7cc54da 100644
--- a/xen/arch/x86/hvm/svm/asid.c
+++ b/xen/arch/x86/hvm/svm/asid.c
@@ -43,7 +43,7 @@ void svm_asid_handle_vmrun(void)
struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
struct hvm_vcpu_asid *p_asid =
nestedhvm_vcpu_in_guestmode(curr)
- ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm_vcpu.n1asid;
+ ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm.n1asid;
bool_t need_flush = hvm_asid_handle_vmenter(p_asid);
/* ASID 0 indicates that ASIDs are disabled. */
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 6457532..a1f840e 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -243,10 +243,10 @@ static int nsvm_vcpu_hostsave(struct vcpu *v, unsigned
int inst_len)
/* Save shadowed values. This ensures that the l1 guest
* cannot override them to break out. */
- n1vmcb->_efer = v->arch.hvm_vcpu.guest_efer;
- n1vmcb->_cr0 = v->arch.hvm_vcpu.guest_cr[0];
- n1vmcb->_cr2 = v->arch.hvm_vcpu.guest_cr[2];
- n1vmcb->_cr4 = v->arch.hvm_vcpu.guest_cr[4];
+ n1vmcb->_efer = v->arch.hvm.guest_efer;
+ n1vmcb->_cr0 = v->arch.hvm.guest_cr[0];
+ n1vmcb->_cr2 = v->arch.hvm.guest_cr[2];
+ n1vmcb->_cr4 = v->arch.hvm.guest_cr[4];
/* Remember the host interrupt flag */
svm->ns_hostflags.fields.rflagsif =
@@ -276,7 +276,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
v->arch.hvm_svm.vmcb_pa = nv->nv_n1vmcx_pa;
/* EFER */
- v->arch.hvm_vcpu.guest_efer = n1vmcb->_efer;
+ v->arch.hvm.guest_efer = n1vmcb->_efer;
rc = hvm_set_efer(n1vmcb->_efer);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -284,7 +284,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
gdprintk(XENLOG_ERR, "hvm_set_efer failed, rc: %u\n", rc);
/* CR4 */
- v->arch.hvm_vcpu.guest_cr[4] = n1vmcb->_cr4;
+ v->arch.hvm.guest_cr[4] = n1vmcb->_cr4;
rc = hvm_set_cr4(n1vmcb->_cr4, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -293,28 +293,28 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
/* CR0 */
nestedsvm_fpu_vmexit(n1vmcb, n2vmcb,
- svm->ns_cr0, v->arch.hvm_vcpu.guest_cr[0]);
- v->arch.hvm_vcpu.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
+ svm->ns_cr0, v->arch.hvm.guest_cr[0]);
+ v->arch.hvm.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
n1vmcb->rflags &= ~X86_EFLAGS_VM;
rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
- svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ svm->ns_cr0 = v->arch.hvm.guest_cr[0];
/* CR2 */
- v->arch.hvm_vcpu.guest_cr[2] = n1vmcb->_cr2;
+ v->arch.hvm.guest_cr[2] = n1vmcb->_cr2;
hvm_update_guest_cr(v, 2);
/* CR3 */
/* Nested paging mode */
if (nestedhvm_paging_mode_hap(v)) {
/* host nested paging + guest nested paging. */
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
} else if (paging_mode_hap(v->domain)) {
/* host nested paging + guest shadow paging. */
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
} else {
/* host shadow paging + guest shadow paging. */
@@ -322,7 +322,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct
cpu_user_regs *regs)
if (!pagetable_is_null(v->arch.guest_table))
put_page(pagetable_get_page(v->arch.guest_table));
v->arch.guest_table = pagetable_null();
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
}
rc = hvm_set_cr3(n1vmcb->_cr3, 1);
if ( rc == X86EMUL_EXCEPTION )
@@ -549,7 +549,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
}
/* EFER */
- v->arch.hvm_vcpu.guest_efer = ns_vmcb->_efer;
+ v->arch.hvm.guest_efer = ns_vmcb->_efer;
rc = hvm_set_efer(ns_vmcb->_efer);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -557,7 +557,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
gdprintk(XENLOG_ERR, "hvm_set_efer failed, rc: %u\n", rc);
/* CR4 */
- v->arch.hvm_vcpu.guest_cr[4] = ns_vmcb->_cr4;
+ v->arch.hvm.guest_cr[4] = ns_vmcb->_cr4;
rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -565,9 +565,9 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
/* CR0 */
- svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ svm->ns_cr0 = v->arch.hvm.guest_cr[0];
cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
- v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0;
+ v->arch.hvm.guest_cr[0] = ns_vmcb->_cr0;
rc = hvm_set_cr0(cr0, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -575,7 +575,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
/* CR2 */
- v->arch.hvm_vcpu.guest_cr[2] = ns_vmcb->_cr2;
+ v->arch.hvm.guest_cr[2] = ns_vmcb->_cr2;
hvm_update_guest_cr(v, 2);
/* Nested paging mode */
@@ -585,7 +585,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -599,7 +599,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
/* When l1 guest does shadow paging
* we assume it intercepts page faults.
*/
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -1259,7 +1259,7 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
* Delay the injection because this would result in delivering
* an interrupt *within* the execution of an instruction.
*/
- if ( v->arch.hvm_vcpu.hvm_io.io_req.state != STATE_IOREQ_NONE )
+ if ( v->arch.hvm.hvm_io.io_req.state != STATE_IOREQ_NONE )
return hvm_intblk_shadow;
if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
@@ -1681,7 +1681,7 @@ void svm_nested_features_on_efer_update(struct vcpu *v)
* Need state for transfering the nested gif status so only write on
* the hvm_vcpu EFER.SVME changing.
*/
- if ( v->arch.hvm_vcpu.guest_efer & EFER_SVME )
+ if ( v->arch.hvm.guest_efer & EFER_SVME )
{
if ( !vmcb->virt_ext.fields.vloadsave_enable &&
paging_mode_hap(v->domain) &&
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 2d52247..92b29b1 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -191,13 +191,13 @@ static void svm_set_icebp_interception(struct domain *d,
bool enable)
static void svm_save_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- unsigned int flag_dr_dirty = v->arch.hvm_vcpu.flag_dr_dirty;
+ unsigned int flag_dr_dirty = v->arch.hvm.flag_dr_dirty;
if ( !flag_dr_dirty )
return;
/* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
- v->arch.hvm_vcpu.flag_dr_dirty = 0;
+ v->arch.hvm.flag_dr_dirty = 0;
vmcb_set_dr_intercepts(vmcb, ~0u);
if ( v->domain->arch.cpuid->extd.dbext )
@@ -223,10 +223,10 @@ static void svm_save_dr(struct vcpu *v)
static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v)
{
- if ( v->arch.hvm_vcpu.flag_dr_dirty )
+ if ( v->arch.hvm.flag_dr_dirty )
return;
- v->arch.hvm_vcpu.flag_dr_dirty = 1;
+ v->arch.hvm.flag_dr_dirty = 1;
vmcb_set_dr_intercepts(vmcb, 0);
ASSERT(v == current);
@@ -269,10 +269,10 @@ static int svm_vmcb_save(struct vcpu *v, struct
hvm_hw_cpu *c)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
- c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
- c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
- c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
+ c->cr0 = v->arch.hvm.guest_cr[0];
+ c->cr2 = v->arch.hvm.guest_cr[2];
+ c->cr3 = v->arch.hvm.guest_cr[3];
+ c->cr4 = v->arch.hvm.guest_cr[4];
c->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs;
c->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp;
@@ -330,17 +330,17 @@ static int svm_vmcb_restore(struct vcpu *v, struct
hvm_hw_cpu *c)
}
}
- if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
+ if ( v->arch.hvm.guest_cr[0] & X86_CR0_PG )
put_page(pagetable_get_page(v->arch.guest_table));
v->arch.guest_table =
page ? pagetable_from_page(page) : pagetable_null();
}
- v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
- v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
- v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
- v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+ v->arch.hvm.guest_cr[0] = c->cr0 | X86_CR0_ET;
+ v->arch.hvm.guest_cr[2] = c->cr2;
+ v->arch.hvm.guest_cr[3] = c->cr3;
+ v->arch.hvm.guest_cr[4] = c->cr4;
svm_update_guest_cr(v, 0, 0);
svm_update_guest_cr(v, 2, 0);
svm_update_guest_cr(v, 4, 0);
@@ -384,7 +384,7 @@ static void svm_save_cpu_state(struct vcpu *v, struct
hvm_hw_cpu *data)
data->msr_star = vmcb->star;
data->msr_cstar = vmcb->cstar;
data->msr_syscall_mask = vmcb->sfmask;
- data->msr_efer = v->arch.hvm_vcpu.guest_efer;
+ data->msr_efer = v->arch.hvm.guest_efer;
data->msr_flags = 0;
}
@@ -398,7 +398,7 @@ static void svm_load_cpu_state(struct vcpu *v, struct
hvm_hw_cpu *data)
vmcb->star = data->msr_star;
vmcb->cstar = data->msr_cstar;
vmcb->sfmask = data->msr_syscall_mask;
- v->arch.hvm_vcpu.guest_efer = data->msr_efer;
+ v->arch.hvm.guest_efer = data->msr_efer;
svm_update_guest_efer(v);
}
@@ -509,7 +509,7 @@ static void svm_fpu_leave(struct vcpu *v)
* then this is not necessary: no FPU activity can occur until the guest
* clears CR0.TS, and we will initialise the FPU when that happens.
*/
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
vmcb_set_exception_intercepts(
n1vmcb,
@@ -550,7 +550,7 @@ static int svm_guest_x86_mode(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
+ if ( unlikely(!(v->arch.hvm.guest_cr[0] & X86_CR0_PE)) )
return 0;
if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
@@ -569,7 +569,7 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr,
unsigned int flags)
case 0: {
unsigned long hw_cr0_mask = 0;
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
if ( v != current )
{
@@ -590,17 +590,17 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr,
unsigned int flags)
vmcb_set_cr_intercepts(vmcb, intercepts |
CR_INTERCEPT_CR3_WRITE);
}
- value = v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
+ value = v->arch.hvm.guest_cr[0] | hw_cr0_mask;
if ( !paging_mode_hap(v->domain) )
value |= X86_CR0_PG | X86_CR0_WP;
vmcb_set_cr0(vmcb, value);
break;
}
case 2:
- vmcb_set_cr2(vmcb, v->arch.hvm_vcpu.guest_cr[2]);
+ vmcb_set_cr2(vmcb, v->arch.hvm.guest_cr[2]);
break;
case 3:
- vmcb_set_cr3(vmcb, v->arch.hvm_vcpu.hw_cr[3]);
+ vmcb_set_cr3(vmcb, v->arch.hvm.hw_cr[3]);
if ( !nestedhvm_enabled(v->domain) )
{
if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
@@ -611,13 +611,13 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr,
unsigned int flags)
else if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
hvm_asid_flush_vcpu_asid(
nestedhvm_vcpu_in_guestmode(v)
- ? &vcpu_nestedhvm(v).nv_n2asid : &v->arch.hvm_vcpu.n1asid);
+ ? &vcpu_nestedhvm(v).nv_n2asid : &v->arch.hvm.n1asid);
break;
case 4:
value = HVM_CR4_HOST_MASK;
if ( paging_mode_hap(v->domain) )
value &= ~X86_CR4_PAE;
- value |= v->arch.hvm_vcpu.guest_cr[4];
+ value |= v->arch.hvm.guest_cr[4];
if ( !hvm_paging_enabled(v) )
{
@@ -646,16 +646,16 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr,
unsigned int flags)
static void svm_update_guest_efer(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- bool_t lma = !!(v->arch.hvm_vcpu.guest_efer & EFER_LMA);
+ bool_t lma = !!(v->arch.hvm.guest_efer & EFER_LMA);
uint64_t new_efer;
- new_efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME;
+ new_efer = (v->arch.hvm.guest_efer | EFER_SVME) & ~EFER_LME;
if ( lma )
new_efer |= EFER_LME;
vmcb_set_efer(vmcb, new_efer);
ASSERT(nestedhvm_enabled(v->domain) ||
- !(v->arch.hvm_vcpu.guest_efer & EFER_SVME));
+ !(v->arch.hvm.guest_efer & EFER_SVME));
if ( nestedhvm_enabled(v->domain) )
svm_nested_features_on_efer_update(v);
@@ -1140,11 +1140,11 @@ static void noreturn svm_do_resume(struct vcpu *v)
vcpu_guestmode = 1;
if ( !vcpu_guestmode &&
- unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
+ unlikely(v->arch.hvm.debug_state_latch != debug_state) )
{
uint32_t intercepts = vmcb_get_exception_intercepts(vmcb);
- v->arch.hvm_vcpu.debug_state_latch = debug_state;
+ v->arch.hvm.debug_state_latch = debug_state;
vmcb_set_exception_intercepts(
vmcb, debug_state ? (intercepts | (1U << TRAP_int3))
: (intercepts & ~(1U << TRAP_int3)));
@@ -1458,7 +1458,7 @@ static void svm_inject_event(const struct x86_event
*event)
case TRAP_page_fault:
ASSERT(_event.type == X86_EVENTTYPE_HW_EXCEPTION);
- curr->arch.hvm_vcpu.guest_cr[2] = _event.cr2;
+ curr->arch.hvm.guest_cr[2] = _event.cr2;
vmcb_set_cr2(vmcb, _event.cr2);
break;
}
@@ -1800,14 +1800,14 @@ static void svm_fpu_dirty_intercept(void)
if ( vmcb != n1vmcb )
{
/* Check if l1 guest must make FPU ready for the l2 guest */
- if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS )
+ if ( v->arch.hvm.guest_cr[0] & X86_CR0_TS )
hvm_inject_hw_exception(TRAP_no_device, X86_EVENT_NO_EC);
else
vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS);
return;
}
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) & ~X86_CR0_TS);
}
@@ -2492,7 +2492,7 @@ static void svm_invlpga_intercept(
{
svm_invlpga(vaddr,
(asid == 0)
- ? v->arch.hvm_vcpu.n1asid.asid
+ ? v->arch.hvm.n1asid.asid
: vcpu_nestedhvm(v).nv_n2asid.asid);
}
@@ -2609,8 +2609,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
hvm_invalidate_regs_fields(regs);
if ( paging_mode_hap(v->domain) )
- v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] =
- vmcb_get_cr3(vmcb);
+ v->arch.hvm.guest_cr[3] = v->arch.hvm.hw_cr[3] = vmcb_get_cr3(vmcb);
if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
vcpu_guestmode = 1;
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index d31fcfa..3776c53 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -125,7 +125,7 @@ static int construct_vmcb(struct vcpu *v)
}
/* Guest EFER. */
- v->arch.hvm_vcpu.guest_efer = 0;
+ v->arch.hvm.guest_efer = 0;
hvm_update_guest_efer(v);
/* Guest segment limits. */
@@ -171,10 +171,10 @@ static int construct_vmcb(struct vcpu *v)
vmcb->tr.base = 0;
vmcb->tr.limit = 0xff;
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
+ v->arch.hvm.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
hvm_update_guest_cr(v, 0);
- v->arch.hvm_vcpu.guest_cr[4] = 0;
+ v->arch.hvm.guest_cr[4] = 0;
hvm_update_guest_cr(v, 4);
paging_update_paging_modes(v);
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 5ddb41b..e84c4f4 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -326,7 +326,7 @@ static void dump_vp_assist(const struct vcpu *v)
{
const union viridian_vp_assist *va;
- va = &v->arch.hvm_vcpu.viridian.vp_assist.msr;
+ va = &v->arch.hvm.viridian.vp_assist.msr;
printk(XENLOG_G_INFO "%pv: VIRIDIAN VP_ASSIST_PAGE: enabled: %x pfn:
%lx\n",
v, va->fields.enabled, (unsigned long)va->fields.pfn);
@@ -380,11 +380,11 @@ static void enable_hypercall_page(struct domain *d)
static void initialize_vp_assist(struct vcpu *v)
{
struct domain *d = v->domain;
- unsigned long gmfn = v->arch.hvm_vcpu.viridian.vp_assist.msr.fields.pfn;
+ unsigned long gmfn = v->arch.hvm.viridian.vp_assist.msr.fields.pfn;
struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
void *va;
- ASSERT(!v->arch.hvm_vcpu.viridian.vp_assist.va);
+ ASSERT(!v->arch.hvm.viridian.vp_assist.va);
/*
* See section 7.8.7 of the specification for details of this
@@ -409,7 +409,7 @@ static void initialize_vp_assist(struct vcpu *v)
clear_page(va);
- v->arch.hvm_vcpu.viridian.vp_assist.va = va;
+ v->arch.hvm.viridian.vp_assist.va = va;
return;
fail:
@@ -419,13 +419,13 @@ static void initialize_vp_assist(struct vcpu *v)
static void teardown_vp_assist(struct vcpu *v)
{
- void *va = v->arch.hvm_vcpu.viridian.vp_assist.va;
+ void *va = v->arch.hvm.viridian.vp_assist.va;
struct page_info *page;
if ( !va )
return;
- v->arch.hvm_vcpu.viridian.vp_assist.va = NULL;
+ v->arch.hvm.viridian.vp_assist.va = NULL;
page = mfn_to_page(domain_page_map_to_mfn(va));
@@ -435,7 +435,7 @@ static void teardown_vp_assist(struct vcpu *v)
void viridian_apic_assist_set(struct vcpu *v)
{
- uint32_t *va = v->arch.hvm_vcpu.viridian.vp_assist.va;
+ uint32_t *va = v->arch.hvm.viridian.vp_assist.va;
if ( !va )
return;
@@ -445,25 +445,25 @@ void viridian_apic_assist_set(struct vcpu *v)
* wrong and the VM will most likely hang so force a crash now
* to make the problem clear.
*/
- if ( v->arch.hvm_vcpu.viridian.vp_assist.pending )
+ if ( v->arch.hvm.viridian.vp_assist.pending )
domain_crash(v->domain);
- v->arch.hvm_vcpu.viridian.vp_assist.pending = true;
+ v->arch.hvm.viridian.vp_assist.pending = true;
*va |= 1u;
}
bool viridian_apic_assist_completed(struct vcpu *v)
{
- uint32_t *va = v->arch.hvm_vcpu.viridian.vp_assist.va;
+ uint32_t *va = v->arch.hvm.viridian.vp_assist.va;
if ( !va )
return false;
- if ( v->arch.hvm_vcpu.viridian.vp_assist.pending &&
+ if ( v->arch.hvm.viridian.vp_assist.pending &&
!(*va & 1u) )
{
/* An EOI has been avoided */
- v->arch.hvm_vcpu.viridian.vp_assist.pending = false;
+ v->arch.hvm.viridian.vp_assist.pending = false;
return true;
}
@@ -472,13 +472,13 @@ bool viridian_apic_assist_completed(struct vcpu *v)
void viridian_apic_assist_clear(struct vcpu *v)
{
- uint32_t *va = v->arch.hvm_vcpu.viridian.vp_assist.va;
+ uint32_t *va = v->arch.hvm.viridian.vp_assist.va;
if ( !va )
return;
*va &= ~1u;
- v->arch.hvm_vcpu.viridian.vp_assist.pending = false;
+ v->arch.hvm.viridian.vp_assist.pending = false;
}
static void update_reference_tsc(struct domain *d, bool_t initialize)
@@ -607,9 +607,9 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
case HV_X64_MSR_VP_ASSIST_PAGE:
perfc_incr(mshv_wrmsr_apic_msr);
teardown_vp_assist(v); /* release any previous mapping */
- v->arch.hvm_vcpu.viridian.vp_assist.msr.raw = val;
+ v->arch.hvm.viridian.vp_assist.msr.raw = val;
dump_vp_assist(v);
- if ( v->arch.hvm_vcpu.viridian.vp_assist.msr.fields.enabled )
+ if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled )
initialize_vp_assist(v);
break;
@@ -630,10 +630,10 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
case HV_X64_MSR_CRASH_P3:
case HV_X64_MSR_CRASH_P4:
BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
- ARRAY_SIZE(v->arch.hvm_vcpu.viridian.crash_param));
+ ARRAY_SIZE(v->arch.hvm.viridian.crash_param));
idx -= HV_X64_MSR_CRASH_P0;
- v->arch.hvm_vcpu.viridian.crash_param[idx] = val;
+ v->arch.hvm.viridian.crash_param[idx] = val;
break;
case HV_X64_MSR_CRASH_CTL:
@@ -646,11 +646,11 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
break;
gprintk(XENLOG_WARNING, "VIRIDIAN CRASH: %lx %lx %lx %lx %lx\n",
- v->arch.hvm_vcpu.viridian.crash_param[0],
- v->arch.hvm_vcpu.viridian.crash_param[1],
- v->arch.hvm_vcpu.viridian.crash_param[2],
- v->arch.hvm_vcpu.viridian.crash_param[3],
- v->arch.hvm_vcpu.viridian.crash_param[4]);
+ v->arch.hvm.viridian.crash_param[0],
+ v->arch.hvm.viridian.crash_param[1],
+ v->arch.hvm.viridian.crash_param[2],
+ v->arch.hvm.viridian.crash_param[3],
+ v->arch.hvm.viridian.crash_param[4]);
break;
}
@@ -752,7 +752,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
case HV_X64_MSR_VP_ASSIST_PAGE:
perfc_incr(mshv_rdmsr_apic_msr);
- *val = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw;
+ *val = v->arch.hvm.viridian.vp_assist.msr.raw;
break;
case HV_X64_MSR_REFERENCE_TSC:
@@ -787,10 +787,10 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
case HV_X64_MSR_CRASH_P3:
case HV_X64_MSR_CRASH_P4:
BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
- ARRAY_SIZE(v->arch.hvm_vcpu.viridian.crash_param));
+ ARRAY_SIZE(v->arch.hvm.viridian.crash_param));
idx -= HV_X64_MSR_CRASH_P0;
- *val = v->arch.hvm_vcpu.viridian.crash_param[idx];
+ *val = v->arch.hvm.viridian.crash_param[idx];
break;
case HV_X64_MSR_CRASH_CTL:
@@ -1035,8 +1035,8 @@ static int viridian_save_vcpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
for_each_vcpu( d, v ) {
struct hvm_viridian_vcpu_context ctxt = {
- .vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw,
- .vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending,
+ .vp_assist_msr = v->arch.hvm.viridian.vp_assist.msr.raw,
+ .vp_assist_pending = v->arch.hvm.viridian.vp_assist.pending,
};
if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
@@ -1065,12 +1065,12 @@ static int viridian_load_vcpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
if ( memcmp(&ctxt._pad, zero_page, sizeof(ctxt._pad)) )
return -EINVAL;
- v->arch.hvm_vcpu.viridian.vp_assist.msr.raw = ctxt.vp_assist_msr;
- if ( v->arch.hvm_vcpu.viridian.vp_assist.msr.fields.enabled &&
- !v->arch.hvm_vcpu.viridian.vp_assist.va )
+ v->arch.hvm.viridian.vp_assist.msr.raw = ctxt.vp_assist_msr;
+ if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled &&
+ !v->arch.hvm.viridian.vp_assist.va )
initialize_vp_assist(v);
- v->arch.hvm_vcpu.viridian.vp_assist.pending = !!ctxt.vp_assist_pending;
+ v->arch.hvm.viridian.vp_assist.pending = !!ctxt.vp_assist_pending;
return 0;
}
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index ccbf181..0c5d0cb 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -311,7 +311,7 @@ static int msixtbl_write(struct vcpu *v, unsigned long
address,
if ( !(val & PCI_MSIX_VECTOR_BITMASK) &&
test_and_clear_bit(nr_entry, &entry->table_flags) )
{
- v->arch.hvm_vcpu.hvm_io.msix_unmask_address = address;
+ v->arch.hvm.hvm_io.msix_unmask_address = address;
goto out;
}
@@ -383,8 +383,8 @@ static bool_t msixtbl_range(const struct hvm_io_handler
*handler,
PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) &&
!(data & PCI_MSIX_VECTOR_BITMASK) )
{
- curr->arch.hvm_vcpu.hvm_io.msix_snoop_address = addr;
- curr->arch.hvm_vcpu.hvm_io.msix_snoop_gpa = 0;
+ curr->arch.hvm.hvm_io.msix_snoop_address = addr;
+ curr->arch.hvm.hvm_io.msix_snoop_gpa = 0;
}
}
else if ( (size == 4 || size == 8) &&
@@ -401,9 +401,9 @@ static bool_t msixtbl_range(const struct hvm_io_handler
*handler,
BUILD_BUG_ON((PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET + 4) &
(PCI_MSIX_ENTRY_SIZE - 1));
- curr->arch.hvm_vcpu.hvm_io.msix_snoop_address =
+ curr->arch.hvm.hvm_io.msix_snoop_address =
addr + size * r->count - 4;
- curr->arch.hvm_vcpu.hvm_io.msix_snoop_gpa =
+ curr->arch.hvm.hvm_io.msix_snoop_gpa =
r->data + size * r->count - 4;
}
}
@@ -506,13 +506,13 @@ int msixtbl_pt_register(struct domain *d, struct pirq
*pirq, uint64_t gtable)
for_each_vcpu ( d, v )
{
if ( (v->pause_flags & VPF_blocked_in_xen) &&
- !v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa &&
- v->arch.hvm_vcpu.hvm_io.msix_snoop_address ==
+ !v->arch.hvm.hvm_io.msix_snoop_gpa &&
+ v->arch.hvm.hvm_io.msix_snoop_address ==
(gtable + msi_desc->msi_attrib.entry_nr *
PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) )
- v->arch.hvm_vcpu.hvm_io.msix_unmask_address =
- v->arch.hvm_vcpu.hvm_io.msix_snoop_address;
+ v->arch.hvm.hvm_io.msix_unmask_address =
+ v->arch.hvm.hvm_io.msix_snoop_address;
}
}
@@ -592,13 +592,13 @@ void msixtbl_pt_cleanup(struct domain *d)
void msix_write_completion(struct vcpu *v)
{
- unsigned long ctrl_address = v->arch.hvm_vcpu.hvm_io.msix_unmask_address;
- unsigned long snoop_addr = v->arch.hvm_vcpu.hvm_io.msix_snoop_address;
+ unsigned long ctrl_address = v->arch.hvm.hvm_io.msix_unmask_address;
+ unsigned long snoop_addr = v->arch.hvm.hvm_io.msix_snoop_address;
- v->arch.hvm_vcpu.hvm_io.msix_snoop_address = 0;
+ v->arch.hvm.hvm_io.msix_snoop_address = 0;
if ( !ctrl_address && snoop_addr &&
- v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa )
+ v->arch.hvm.hvm_io.msix_snoop_gpa )
{
const struct msi_desc *desc;
uint32_t data;
@@ -610,7 +610,7 @@ void msix_write_completion(struct vcpu *v)
if ( desc &&
hvm_copy_from_guest_phys(&data,
- v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa,
+ v->arch.hvm.hvm_io.msix_snoop_gpa,
sizeof(data)) == HVMTRANS_okay &&
!(data & PCI_MSIX_VECTOR_BITMASK) )
ctrl_address = snoop_addr;
@@ -619,7 +619,7 @@ void msix_write_completion(struct vcpu *v)
if ( !ctrl_address )
return;
- v->arch.hvm_vcpu.hvm_io.msix_unmask_address = 0;
+ v->arch.hvm.hvm_io.msix_unmask_address = 0;
if ( msixtbl_write(v, ctrl_address, 4, 0) != X86EMUL_OKAY )
gdprintk(XENLOG_WARNING, "MSI-X write completion failure\n");
}
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index eb9b288..889067c 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -232,7 +232,7 @@ void vmx_intr_assist(void)
int pt_vector;
/* Block event injection when single step with MTF. */
- if ( unlikely(v->arch.hvm_vcpu.single_step) )
+ if ( unlikely(v->arch.hvm.single_step) )
{
v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
vmx_update_cpu_exec_control(v);
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index b20d8c4..032a681 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -96,7 +96,7 @@ static void realmode_deliver_exception(
void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
{
struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
perfc_incr(realmode_emulations);
@@ -115,7 +115,7 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt
*hvmemul_ctxt)
if ( rc == X86EMUL_UNRECOGNIZED )
{
gdprintk(XENLOG_ERR, "Unrecognized insn.\n");
- if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ if ( curr->arch.hvm.guest_cr[0] & X86_CR0_PE )
goto fail;
realmode_deliver_exception(TRAP_invalid_op, 0, hvmemul_ctxt);
@@ -129,7 +129,7 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt
*hvmemul_ctxt)
{
domain_pause_for_debugger();
}
- else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ else if ( curr->arch.hvm.guest_cr[0] & X86_CR0_PE )
{
gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
hvmemul_ctxt->ctxt.event.vector);
@@ -156,7 +156,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
struct vcpu *curr = current;
struct hvm_emulate_ctxt hvmemul_ctxt;
struct segment_register *sreg;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
unsigned long intr_info;
unsigned int emulations = 0;
@@ -168,7 +168,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
hvm_emulate_init_once(&hvmemul_ctxt, NULL, regs);
/* Only deliver interrupts into emulated real mode. */
- if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
+ if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PE) &&
(intr_info & INTR_INFO_VALID_MASK) )
{
realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index f30850c..5e4a6b1 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1233,10 +1233,10 @@ static int construct_vmcs(struct vcpu *v)
| (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device));
vmx_update_exception_bitmap(v);
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
+ v->arch.hvm.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
hvm_update_guest_cr(v, 0);
- v->arch.hvm_vcpu.guest_cr[4] = 0;
+ v->arch.hvm.guest_cr[4] = 0;
hvm_update_guest_cr(v, 4);
if ( cpu_has_vmx_tpr_shadow )
@@ -1838,9 +1838,9 @@ void vmx_do_resume(struct vcpu *v)
|| v->domain->arch.monitor.software_breakpoint_enabled
|| v->domain->arch.monitor.singlestep_enabled;
- if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
+ if ( unlikely(v->arch.hvm.debug_state_latch != debug_state) )
{
- v->arch.hvm_vcpu.debug_state_latch = debug_state;
+ v->arch.hvm.debug_state_latch = debug_state;
vmx_update_debug_state(v);
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index ccfbacb..4abd327 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -581,7 +581,7 @@ int vmx_guest_x86_mode(struct vcpu *v)
{
unsigned long cs_ar_bytes;
- if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
+ if ( unlikely(!(v->arch.hvm.guest_cr[0] & X86_CR0_PE)) )
return 0;
if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
@@ -594,11 +594,11 @@ int vmx_guest_x86_mode(struct vcpu *v)
static void vmx_save_dr(struct vcpu *v)
{
- if ( !v->arch.hvm_vcpu.flag_dr_dirty )
+ if ( !v->arch.hvm.flag_dr_dirty )
return;
/* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
- v->arch.hvm_vcpu.flag_dr_dirty = 0;
+ v->arch.hvm.flag_dr_dirty = 0;
v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
vmx_update_cpu_exec_control(v);
@@ -613,10 +613,10 @@ static void vmx_save_dr(struct vcpu *v)
static void __restore_debug_registers(struct vcpu *v)
{
- if ( v->arch.hvm_vcpu.flag_dr_dirty )
+ if ( v->arch.hvm.flag_dr_dirty )
return;
- v->arch.hvm_vcpu.flag_dr_dirty = 1;
+ v->arch.hvm.flag_dr_dirty = 1;
write_debugreg(0, v->arch.debugreg[0]);
write_debugreg(1, v->arch.debugreg[1]);
@@ -645,12 +645,12 @@ static void vmx_vmcs_save(struct vcpu *v, struct
hvm_hw_cpu *c)
vmx_vmcs_enter(v);
- c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
- c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
- c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
- c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
+ c->cr0 = v->arch.hvm.guest_cr[0];
+ c->cr2 = v->arch.hvm.guest_cr[2];
+ c->cr3 = v->arch.hvm.guest_cr[3];
+ c->cr4 = v->arch.hvm.guest_cr[4];
- c->msr_efer = v->arch.hvm_vcpu.guest_efer;
+ c->msr_efer = v->arch.hvm.guest_efer;
__vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
__vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
@@ -696,8 +696,8 @@ static int vmx_restore_cr0_cr3(
page ? pagetable_from_page(page) : pagetable_null();
}
- v->arch.hvm_vcpu.guest_cr[0] = cr0 | X86_CR0_ET;
- v->arch.hvm_vcpu.guest_cr[3] = cr3;
+ v->arch.hvm.guest_cr[0] = cr0 | X86_CR0_ET;
+ v->arch.hvm.guest_cr[3] = cr3;
return 0;
}
@@ -731,13 +731,13 @@ static int vmx_vmcs_restore(struct vcpu *v, struct
hvm_hw_cpu *c)
vmx_vmcs_enter(v);
- v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
- v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+ v->arch.hvm.guest_cr[2] = c->cr2;
+ v->arch.hvm.guest_cr[4] = c->cr4;
vmx_update_guest_cr(v, 0, 0);
vmx_update_guest_cr(v, 2, 0);
vmx_update_guest_cr(v, 4, 0);
- v->arch.hvm_vcpu.guest_efer = c->msr_efer;
+ v->arch.hvm.guest_efer = c->msr_efer;
vmx_update_guest_efer(v);
__vmwrite(GUEST_SYSENTER_CS, c->sysenter_cs);
@@ -827,7 +827,7 @@ static void vmx_save_msr(struct vcpu *v, struct hvm_msr
*ctxt)
if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
{
- ctxt->msr[ctxt->count].val = v->arch.hvm_vcpu.msr_xss;
+ ctxt->msr[ctxt->count].val = v->arch.hvm.msr_xss;
if ( ctxt->msr[ctxt->count].val )
ctxt->msr[ctxt->count++].index = MSR_IA32_XSS;
}
@@ -854,7 +854,7 @@ static int vmx_load_msr(struct vcpu *v, struct hvm_msr
*ctxt)
break;
case MSR_IA32_XSS:
if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
- v->arch.hvm_vcpu.msr_xss = ctxt->msr[i].val;
+ v->arch.hvm.msr_xss = ctxt->msr[i].val;
else
err = -ENXIO;
break;
@@ -897,10 +897,10 @@ static void vmx_fpu_leave(struct vcpu *v)
* then this is not necessary: no FPU activity can occur until the guest
* clears CR0.TS, and we will initialise the FPU when that happens.
*/
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
- v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
- __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+ v->arch.hvm.hw_cr[0] |= X86_CR0_TS;
+ __vmwrite(GUEST_CR0, v->arch.hvm.hw_cr[0]);
v->arch.hvm_vmx.exception_bitmap |= (1u << TRAP_no_device);
vmx_update_exception_bitmap(v);
}
@@ -1192,7 +1192,7 @@ static unsigned long vmx_get_shadow_gs_base(struct vcpu
*v)
static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
{
if ( !paging_mode_hap(v->domain) ||
- unlikely(v->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) )
+ unlikely(v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
return 0;
vmx_vmcs_enter(v);
@@ -1204,7 +1204,7 @@ static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
static int vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
{
if ( !paging_mode_hap(v->domain) ||
- unlikely(v->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) )
+ unlikely(v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
return 0;
vmx_vmcs_enter(v);
@@ -1248,7 +1248,7 @@ static void vmx_handle_cd(struct vcpu *v, unsigned long
value)
}
else
{
- u64 *pat = &v->arch.hvm_vcpu.pat_cr;
+ u64 *pat = &v->arch.hvm.pat_cr;
if ( value & X86_CR0_CD )
{
@@ -1272,11 +1272,11 @@ static void vmx_handle_cd(struct vcpu *v, unsigned long
value)
wbinvd(); /* flush possibly polluted cache */
hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
- v->arch.hvm_vcpu.cache_mode = NO_FILL_CACHE_MODE;
+ v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
}
else
{
- v->arch.hvm_vcpu.cache_mode = NORMAL_CACHE_MODE;
+ v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
vmx_set_guest_pat(v, *pat);
if ( !iommu_enabled || iommu_snoop )
vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
@@ -1369,14 +1369,14 @@ static void vmx_set_interrupt_shadow(struct vcpu *v,
unsigned int intr_shadow)
static void vmx_load_pdptrs(struct vcpu *v)
{
- unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
+ unsigned long cr3 = v->arch.hvm.guest_cr[3];
uint64_t *guest_pdptes;
struct page_info *page;
p2m_type_t p2mt;
char *p;
/* EPT needs to load PDPTRS into VMCS for PAE. */
- if ( !hvm_pae_enabled(v) || (v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
+ if ( !hvm_pae_enabled(v) || (v->arch.hvm.guest_efer & EFER_LMA) )
return;
if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
@@ -1430,7 +1430,7 @@ static void vmx_update_host_cr3(struct vcpu *v)
void vmx_update_debug_state(struct vcpu *v)
{
- if ( v->arch.hvm_vcpu.debug_state_latch )
+ if ( v->arch.hvm.debug_state_latch )
v->arch.hvm_vmx.exception_bitmap |= 1U << TRAP_int3;
else
v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_int3);
@@ -1479,22 +1479,22 @@ static void vmx_update_guest_cr(struct vcpu *v,
unsigned int cr,
}
if ( !nestedhvm_vcpu_in_guestmode(v) )
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm.guest_cr[0]);
else
nvmx_set_cr_read_shadow(v, 0);
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
if ( v != current )
{
if ( !v->arch.fully_eager_fpu )
hw_cr0_mask |= X86_CR0_TS;
}
- else if ( v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS )
+ else if ( v->arch.hvm.hw_cr[0] & X86_CR0_TS )
vmx_fpu_enter(v);
}
- realmode = !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE);
+ realmode = !(v->arch.hvm.guest_cr[0] & X86_CR0_PE);
if ( !vmx_unrestricted_guest(v) &&
(realmode != v->arch.hvm_vmx.vmx_realmode) )
@@ -1527,24 +1527,24 @@ static void vmx_update_guest_cr(struct vcpu *v,
unsigned int cr,
vmx_update_exception_bitmap(v);
}
- v->arch.hvm_vcpu.hw_cr[0] =
- v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
- __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+ v->arch.hvm.hw_cr[0] =
+ v->arch.hvm.guest_cr[0] | hw_cr0_mask;
+ __vmwrite(GUEST_CR0, v->arch.hvm.hw_cr[0]);
}
/* Fallthrough: Changing CR0 can change some bits in real CR4. */
case 4:
- v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
+ v->arch.hvm.hw_cr[4] = HVM_CR4_HOST_MASK;
if ( paging_mode_hap(v->domain) )
- v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
+ v->arch.hvm.hw_cr[4] &= ~X86_CR4_PAE;
if ( !nestedhvm_vcpu_in_guestmode(v) )
- __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm.guest_cr[4]);
else
nvmx_set_cr_read_shadow(v, 4);
- v->arch.hvm_vcpu.hw_cr[4] |= v->arch.hvm_vcpu.guest_cr[4];
+ v->arch.hvm.hw_cr[4] |= v->arch.hvm.guest_cr[4];
if ( v->arch.hvm_vmx.vmx_realmode )
- v->arch.hvm_vcpu.hw_cr[4] |= X86_CR4_VME;
+ v->arch.hvm.hw_cr[4] |= X86_CR4_VME;
if ( !hvm_paging_enabled(v) )
{
@@ -1564,8 +1564,8 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned
int cr,
* HVM_PARAM_IDENT_PT which is a 32bit pagetable using 4M
* superpages. Override the guests paging settings to match.
*/
- v->arch.hvm_vcpu.hw_cr[4] |= X86_CR4_PSE;
- v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
+ v->arch.hvm.hw_cr[4] |= X86_CR4_PSE;
+ v->arch.hvm.hw_cr[4] &= ~X86_CR4_PAE;
}
/*
@@ -1576,10 +1576,10 @@ static void vmx_update_guest_cr(struct vcpu *v,
unsigned int cr,
* effect if paging was actually disabled, so hide them behind the
* back of the guest.
*/
- v->arch.hvm_vcpu.hw_cr[4] &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
+ v->arch.hvm.hw_cr[4] &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
}
- __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
+ __vmwrite(GUEST_CR4, v->arch.hvm.hw_cr[4]);
/*
* Shadow path has not been optimized because it requires
@@ -1625,12 +1625,12 @@ static void vmx_update_guest_cr(struct vcpu *v,
unsigned int cr,
if ( paging_mode_hap(v->domain) )
{
if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
- v->arch.hvm_vcpu.hw_cr[3] =
+ v->arch.hvm.hw_cr[3] =
v->domain->arch.hvm.params[HVM_PARAM_IDENT_PT];
vmx_load_pdptrs(v);
}
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
+ __vmwrite(GUEST_CR3, v->arch.hvm.hw_cr[3]);
if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
hvm_asid_flush_vcpu(v);
@@ -1645,7 +1645,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned
int cr,
static void vmx_update_guest_efer(struct vcpu *v)
{
- unsigned long entry_ctls, guest_efer = v->arch.hvm_vcpu.guest_efer,
+ unsigned long entry_ctls, guest_efer = v->arch.hvm.guest_efer,
xen_efer = read_efer();
if ( paging_mode_shadow(v->domain) )
@@ -1714,7 +1714,7 @@ static void vmx_update_guest_efer(struct vcpu *v)
* If the guests virtualised view of MSR_EFER matches the value loaded
* into hardware, clear the read intercept to avoid unnecessary VMExits.
*/
- if ( guest_efer == v->arch.hvm_vcpu.guest_efer )
+ if ( guest_efer == v->arch.hvm.guest_efer )
vmx_clear_msr_intercept(v, MSR_EFER, VMX_MSR_R);
else
vmx_set_msr_intercept(v, MSR_EFER, VMX_MSR_R);
@@ -1863,7 +1863,7 @@ static void vmx_inject_event(const struct x86_event
*event)
case TRAP_page_fault:
ASSERT(_event.type == X86_EVENTTYPE_HW_EXCEPTION);
- curr->arch.hvm_vcpu.guest_cr[2] = _event.cr2;
+ curr->arch.hvm.guest_cr[2] = _event.cr2;
break;
}
@@ -1901,7 +1901,7 @@ static void vmx_inject_event(const struct x86_event
*event)
if ( (_event.vector == TRAP_page_fault) &&
(_event.type == X86_EVENTTYPE_HW_EXCEPTION) )
HVMTRACE_LONG_2D(PF_INJECT, _event.error_code,
- TRC_PAR_LONG(curr->arch.hvm_vcpu.guest_cr[2]));
+ TRC_PAR_LONG(curr->arch.hvm.guest_cr[2]));
else
HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code);
}
@@ -2549,10 +2549,10 @@ static void vmx_fpu_dirty_intercept(void)
vmx_fpu_enter(curr);
/* Disable TS in guest CR0 unless the guest wants the exception too. */
- if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
- curr->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
- __vmwrite(GUEST_CR0, curr->arch.hvm_vcpu.hw_cr[0]);
+ curr->arch.hvm.hw_cr[0] &= ~X86_CR0_TS;
+ __vmwrite(GUEST_CR0, curr->arch.hvm.hw_cr[0]);
}
}
@@ -2586,7 +2586,7 @@ static void vmx_dr_access(unsigned long
exit_qualification,
HVMTRACE_0D(DR_WRITE);
- if ( !v->arch.hvm_vcpu.flag_dr_dirty )
+ if ( !v->arch.hvm.flag_dr_dirty )
__restore_debug_registers(v);
/* Allow guest direct access to DR registers */
@@ -2633,7 +2633,7 @@ static int vmx_cr_access(cr_access_qual_t qual)
case VMX_CR_ACCESS_TYPE_CLTS:
{
- unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
+ unsigned long old = curr->arch.hvm.guest_cr[0];
unsigned long value = old & ~X86_CR0_TS;
/*
@@ -2642,7 +2642,7 @@ static int vmx_cr_access(cr_access_qual_t qual)
* return value is ignored for now.
*/
hvm_monitor_crX(CR0, value, old);
- curr->arch.hvm_vcpu.guest_cr[0] = value;
+ curr->arch.hvm.guest_cr[0] = value;
vmx_update_guest_cr(curr, 0, 0);
HVMTRACE_0D(CLTS);
break;
@@ -2650,7 +2650,7 @@ static int vmx_cr_access(cr_access_qual_t qual)
case VMX_CR_ACCESS_TYPE_LMSW:
{
- unsigned long value = curr->arch.hvm_vcpu.guest_cr[0];
+ unsigned long value = curr->arch.hvm.guest_cr[0];
int rc;
/* LMSW can (1) set PE; (2) set or clear MP, EM, and TS. */
@@ -3617,14 +3617,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
* Xen allows the guest to modify some CR4 bits directly, update cached
* values to match.
*/
- __vmread(GUEST_CR4, &v->arch.hvm_vcpu.hw_cr[4]);
- v->arch.hvm_vcpu.guest_cr[4] &= v->arch.hvm_vmx.cr4_host_mask;
- v->arch.hvm_vcpu.guest_cr[4] |= v->arch.hvm_vcpu.hw_cr[4] &
- ~v->arch.hvm_vmx.cr4_host_mask;
+ __vmread(GUEST_CR4, &v->arch.hvm.hw_cr[4]);
+ v->arch.hvm.guest_cr[4] &= v->arch.hvm_vmx.cr4_host_mask;
+ v->arch.hvm.guest_cr[4] |= (v->arch.hvm.hw_cr[4] &
+ ~v->arch.hvm_vmx.cr4_host_mask);
- __vmread(GUEST_CR3, &v->arch.hvm_vcpu.hw_cr[3]);
+ __vmread(GUEST_CR3, &v->arch.hvm.hw_cr[3]);
if ( vmx_unrestricted_guest(v) || hvm_paging_enabled(v) )
- v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3];
+ v->arch.hvm.guest_cr[3] = v->arch.hvm.hw_cr[3];
}
__vmread(VM_EXIT_REASON, &exit_reason);
@@ -4167,7 +4167,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
case EXIT_REASON_MONITOR_TRAP_FLAG:
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
vmx_update_cpu_exec_control(v);
- if ( v->arch.hvm_vcpu.single_step )
+ if ( v->arch.hvm.single_step )
{
hvm_monitor_debug(regs->rip,
HVM_MONITOR_SINGLESTEP_BREAKPOINT,
@@ -4338,7 +4338,7 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
if ( nestedhvm_vcpu_in_guestmode(curr) )
p_asid = &vcpu_nestedhvm(curr).nv_n2asid;
else
- p_asid = &curr->arch.hvm_vcpu.n1asid;
+ p_asid = &curr->arch.hvm.n1asid;
old_asid = p_asid->asid;
need_flush = hvm_asid_handle_vmenter(p_asid);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index b7d9a1a..5cdea47 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -383,8 +383,8 @@ static int vmx_inst_check_privilege(struct cpu_user_regs
*regs, int vmxop_check)
if ( vmxop_check )
{
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
- !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VMXE) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_PE) ||
+ !(v->arch.hvm.guest_cr[4] & X86_CR4_VMXE) )
goto invalid_op;
}
else if ( !nvmx_vcpu_in_vmx(v) )
@@ -1082,7 +1082,7 @@ static void load_shadow_guest_state(struct vcpu *v)
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, 0);
vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmentry_fields), vmentry_fields);
@@ -1170,7 +1170,7 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
* hvm_set_efer won't work if CR0.PG = 1, so we change the value
* directly to make hvm_long_mode_active(v) work in L2.
* An additional update_paging_modes is also needed if
- * there is 32/64 switch. v->arch.hvm_vcpu.guest_efer doesn't
+ * there is 32/64 switch. v->arch.hvm.guest_efer doesn't
* need to be saved, since its value on vmexit is determined by
* L1 exit_controls
*/
@@ -1178,9 +1178,9 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
lm_l2 = !!(get_vvmcs(v, VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
if ( lm_l2 )
- v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME;
+ v->arch.hvm.guest_efer |= EFER_LMA | EFER_LME;
else
- v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME);
+ v->arch.hvm.guest_efer &= ~(EFER_LMA | EFER_LME);
load_shadow_control(v);
load_shadow_guest_state(v);
@@ -1189,7 +1189,7 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
paging_update_paging_modes(v);
if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) &&
- !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
+ !(v->arch.hvm.guest_efer & EFER_LMA) )
vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
regs->rip = get_vvmcs(v, GUEST_RIP);
@@ -1236,7 +1236,7 @@ static void sync_vvmcs_guest_state(struct vcpu *v, struct
cpu_user_regs *regs)
if ( v->arch.hvm_vmx.cr4_host_mask != ~0UL )
/* Only need to update nested GUEST_CR4 if not all bits are trapped. */
- set_vvmcs(v, GUEST_CR4, v->arch.hvm_vcpu.guest_cr[4]);
+ set_vvmcs(v, GUEST_CR4, v->arch.hvm.guest_cr[4]);
}
static void sync_vvmcs_ro(struct vcpu *v)
@@ -1288,7 +1288,7 @@ static void load_vvmcs_host_state(struct vcpu *v)
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, 0);
set_vvmcs(v, VM_ENTRY_INTR_INFO, 0);
}
@@ -1369,7 +1369,7 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
sync_exception_state(v);
if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) &&
- !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
+ !(v->arch.hvm.guest_efer & EFER_LMA) )
shadow_to_vvmcs_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
/* This will clear current pCPU bit in p2m->dirty_cpumask */
@@ -1385,9 +1385,9 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
lm_l1 = !!(get_vvmcs(v, VM_EXIT_CONTROLS) & VM_EXIT_IA32E_MODE);
if ( lm_l1 )
- v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME;
+ v->arch.hvm.guest_efer |= EFER_LMA | EFER_LME;
else
- v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME);
+ v->arch.hvm.guest_efer &= ~(EFER_LMA | EFER_LME);
vmx_update_cpu_exec_control(v);
vmx_update_secondary_exec_control(v);
@@ -2438,7 +2438,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
case EXIT_REASON_DR_ACCESS:
ctrl = __n2_exec_control(v);
if ( (ctrl & CPU_BASED_MOV_DR_EXITING) &&
- v->arch.hvm_vcpu.flag_dr_dirty )
+ v->arch.hvm.flag_dr_dirty )
nvcpu->nv_vmexit_pending = 1;
break;
case EXIT_REASON_INVLPG:
@@ -2620,13 +2620,13 @@ void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned
int cr)
* hardware. It consists of the L2-owned bits from the new
* value combined with the L1-owned bits from L1's guest cr.
*/
- v->arch.hvm_vcpu.guest_cr[cr] &= ~virtual_cr_mask;
- v->arch.hvm_vcpu.guest_cr[cr] |= virtual_cr_mask &
+ v->arch.hvm.guest_cr[cr] &= ~virtual_cr_mask;
+ v->arch.hvm.guest_cr[cr] |= virtual_cr_mask &
get_vvmcs(v, cr_field);
}
/* nvcpu.guest_cr is what L2 write to cr actually. */
- __vmwrite(read_shadow_field, v->arch.hvm_vcpu.nvcpu.guest_cr[cr]);
+ __vmwrite(read_shadow_field, v->arch.hvm.nvcpu.guest_cr[cr]);
}
/*
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
index 7b57017..ecd25d7 100644
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -55,7 +55,7 @@ uint64_t hvm_get_guest_time_fixed(const struct vcpu *v,
uint64_t at_tsc)
}
spin_unlock(&pl->pl_time_lock);
- return now + v->arch.hvm_vcpu.stime_offset;
+ return now + v->arch.hvm.stime_offset;
}
void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
@@ -64,9 +64,9 @@ void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
if ( offset )
{
- v->arch.hvm_vcpu.stime_offset += offset;
+ v->arch.hvm.stime_offset += offset;
/*
- * If hvm_vcpu.stime_offset is updated make sure to
+ * If hvm.stime_offset is updated make sure to
* also update vcpu time, since this value is used to
* calculate the TSC.
*/
@@ -159,16 +159,16 @@ static void pt_lock(struct periodic_time *pt)
for ( ; ; )
{
v = pt->vcpu;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
if ( likely(pt->vcpu == v) )
break;
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
}
static void pt_unlock(struct periodic_time *pt)
{
- spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&pt->vcpu->arch.hvm.tm_lock);
}
static void pt_process_missed_ticks(struct periodic_time *pt)
@@ -195,7 +195,7 @@ static void pt_freeze_time(struct vcpu *v)
if ( !mode_is(v->domain, delay_for_missed_ticks) )
return;
- v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
+ v->arch.hvm.guest_time = hvm_get_guest_time(v);
}
static void pt_thaw_time(struct vcpu *v)
@@ -203,22 +203,22 @@ static void pt_thaw_time(struct vcpu *v)
if ( !mode_is(v->domain, delay_for_missed_ticks) )
return;
- if ( v->arch.hvm_vcpu.guest_time == 0 )
+ if ( v->arch.hvm.guest_time == 0 )
return;
- hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
- v->arch.hvm_vcpu.guest_time = 0;
+ hvm_set_guest_time(v, v->arch.hvm.guest_time);
+ v->arch.hvm.guest_time = 0;
}
void pt_save_timer(struct vcpu *v)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt;
if ( v->pause_flags & VPF_blocked )
return;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
list_for_each_entry ( pt, head, list )
if ( !pt->do_not_freeze )
@@ -226,15 +226,15 @@ void pt_save_timer(struct vcpu *v)
pt_freeze_time(v);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
void pt_restore_timer(struct vcpu *v)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
list_for_each_entry ( pt, head, list )
{
@@ -247,7 +247,7 @@ void pt_restore_timer(struct vcpu *v)
pt_thaw_time(v);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
static void pt_timer_fn(void *data)
@@ -302,13 +302,13 @@ static void pt_irq_fired(struct vcpu *v, struct
periodic_time *pt)
int pt_update_irq(struct vcpu *v)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt, *temp, *earliest_pt;
uint64_t max_lag;
int irq, pt_vector = -1;
bool level;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
earliest_pt = NULL;
max_lag = -1ULL;
@@ -338,7 +338,7 @@ int pt_update_irq(struct vcpu *v)
if ( earliest_pt == NULL )
{
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
return -1;
}
@@ -346,7 +346,7 @@ int pt_update_irq(struct vcpu *v)
irq = earliest_pt->irq;
level = earliest_pt->level;
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
switch ( earliest_pt->source )
{
@@ -393,9 +393,9 @@ int pt_update_irq(struct vcpu *v)
time_cb *cb = NULL;
void *cb_priv;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
/* Make sure the timer is still on the list. */
- list_for_each_entry ( pt, &v->arch.hvm_vcpu.tm_list, list )
+ list_for_each_entry ( pt, &v->arch.hvm.tm_list, list )
if ( pt == earliest_pt )
{
pt_irq_fired(v, pt);
@@ -403,7 +403,7 @@ int pt_update_irq(struct vcpu *v)
cb_priv = pt->priv;
break;
}
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
if ( cb != NULL )
cb(v, cb_priv);
@@ -418,7 +418,7 @@ int pt_update_irq(struct vcpu *v)
static struct periodic_time *is_pt_irq(
struct vcpu *v, struct hvm_intack intack)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt;
list_for_each_entry ( pt, head, list )
@@ -440,12 +440,12 @@ void pt_intr_post(struct vcpu *v, struct hvm_intack
intack)
if ( intack.source == hvm_intsrc_vector )
return;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
pt = is_pt_irq(v, intack);
if ( pt == NULL )
{
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
return;
}
@@ -454,7 +454,7 @@ void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
cb = pt->cb;
cb_priv = pt->priv;
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
if ( cb != NULL )
cb(v, cb_priv);
@@ -462,15 +462,15 @@ void pt_intr_post(struct vcpu *v, struct hvm_intack
intack)
void pt_migrate(struct vcpu *v)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
list_for_each_entry ( pt, head, list )
migrate_timer(&pt->timer, v->processor);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
void create_periodic_time(
@@ -489,7 +489,7 @@ void create_periodic_time(
destroy_periodic_time(pt);
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
pt->pending_intr_nr = 0;
pt->do_not_freeze = 0;
@@ -534,12 +534,12 @@ void create_periodic_time(
pt->priv = data;
pt->on_list = 1;
- list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
+ list_add(&pt->list, &v->arch.hvm.tm_list);
init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
set_timer(&pt->timer, pt->scheduled);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
void destroy_periodic_time(struct periodic_time *pt)
@@ -578,16 +578,16 @@ static void pt_adjust_vcpu(struct periodic_time *pt,
struct vcpu *v)
pt->on_list = 0;
pt_unlock(pt);
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
pt->vcpu = v;
if ( on_list )
{
pt->on_list = 1;
- list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
+ list_add(&pt->list, &v->arch.hvm.tm_list);
migrate_timer(&pt->timer, v->processor);
}
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
void pt_adjust_global_vcpu_target(struct vcpu *v)
@@ -627,7 +627,7 @@ static void pt_resume(struct periodic_time *pt)
if ( pt->pending_intr_nr && !pt->on_list )
{
pt->on_list = 1;
- list_add(&pt->list, &pt->vcpu->arch.hvm_vcpu.tm_list);
+ list_add(&pt->list, &pt->vcpu->arch.hvm.tm_list);
vcpu_kick(pt->vcpu);
}
pt_unlock(pt);
diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c
index cb3f9ce..3b8ee2e 100644
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -42,7 +42,7 @@ asm(".file \"" __OBJECT_FILE__ "\"");
unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec)
{
- unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
+ unsigned long cr3 = v->arch.hvm.guest_cr[3];
return hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(v, p2m, cr3, gva, pfec,
NULL);
}
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index fe10e9d..6031361 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -664,7 +664,7 @@ static bool_t hap_invlpg(struct vcpu *v, unsigned long va)
static void hap_update_cr3(struct vcpu *v, int do_locking, bool noflush)
{
- v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
+ v->arch.hvm.hw_cr[3] = v->arch.hvm.guest_cr[3];
hvm_update_guest_cr3(v, noflush);
}
@@ -680,7 +680,7 @@ hap_paging_get_mode(struct vcpu *v)
static void hap_update_paging_modes(struct vcpu *v)
{
struct domain *d = v->domain;
- unsigned long cr3_gfn = v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT;
+ unsigned long cr3_gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
p2m_type_t t;
/* We hold onto the cr3 as it may be modified later, and
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 62819eb..2dac8d1 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4070,7 +4070,7 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool
noflush)
ASSERT(shadow_mode_external(d));
/* Find where in the page the l3 table is */
- guest_idx = guest_index((void *)v->arch.hvm_vcpu.guest_cr[3]);
+ guest_idx = guest_index((void *)v->arch.hvm.guest_cr[3]);
// Ignore the low 2 bits of guest_idx -- they are really just
// cache control.
@@ -4208,19 +4208,17 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool
noflush)
///
- /// v->arch.hvm_vcpu.hw_cr[3]
+ /// v->arch.hvm.hw_cr[3]
///
if ( shadow_mode_external(d) )
{
ASSERT(is_hvm_domain(d));
#if SHADOW_PAGING_LEVELS == 3
/* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
- v->arch.hvm_vcpu.hw_cr[3] =
- virt_to_maddr(&v->arch.paging.shadow.l3table);
+ v->arch.hvm.hw_cr[3] = virt_to_maddr(&v->arch.paging.shadow.l3table);
#else
/* 4-on-4: Just use the shadow top-level directly */
- v->arch.hvm_vcpu.hw_cr[3] =
- pagetable_get_paddr(v->arch.shadow_table[0]);
+ v->arch.hvm.hw_cr[3] = pagetable_get_paddr(v->arch.shadow_table[0]);
#endif
hvm_update_guest_cr3(v, noflush);
}
@@ -4543,7 +4541,7 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t
gpa)
unsigned long l3gfn;
mfn_t l3mfn;
- gcr3 = (v->arch.hvm_vcpu.guest_cr[3]);
+ gcr3 = v->arch.hvm.guest_cr[3];
/* fast path: the pagetable belongs to the current context */
if ( gcr3 == gpa )
fast_path = 1;
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 5922fbf..e964e60 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1041,7 +1041,7 @@ static void __update_vcpu_system_time(struct vcpu *v, int
force)
{
struct pl_time *pl = v->domain->arch.hvm.pl_time;
- stime += pl->stime_offset + v->arch.hvm_vcpu.stime_offset;
+ stime += pl->stime_offset + v->arch.hvm.stime_offset;
if ( stime >= 0 )
tsc_stamp = gtime_to_gtsc(d, stime);
else
@@ -1081,7 +1081,7 @@ static void __update_vcpu_system_time(struct vcpu *v, int
force)
_u.flags |= XEN_PVCLOCK_TSC_STABLE_BIT;
if ( is_hvm_domain(d) )
- _u.tsc_timestamp += v->arch.hvm_vcpu.cache_tsc_offset;
+ _u.tsc_timestamp += v->arch.hvm.cache_tsc_offset;
/* Don't bother unless timestamp record has changed or we are forced. */
_u.version = u->version; /* make versions match for memcmp test */
@@ -2199,7 +2199,7 @@ void tsc_set_info(struct domain *d,
*/
d->arch.hvm.sync_tsc = rdtsc();
hvm_set_tsc_offset(d->vcpu[0],
- d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
+ d->vcpu[0]->arch.hvm.cache_tsc_offset,
d->arch.hvm.sync_tsc);
}
}
diff --git a/xen/arch/x86/x86_64/asm-offsets.c
b/xen/arch/x86/x86_64/asm-offsets.c
index 26524c4..555804c 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -95,12 +95,12 @@ void __dummy__(void)
OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
- OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
+ OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm.guest_cr[2]);
BLANK();
- OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm_vcpu.nvcpu.nv_guestmode);
- OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm_vcpu.nvcpu.nv_p2m);
- OFFSET(VCPU_nsvm_hap_enabled, struct vcpu,
arch.hvm_vcpu.nvcpu.u.nsvm.ns_hap_enabled);
+ OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm.nvcpu.nv_guestmode);
+ OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm.nvcpu.nv_p2m);
+ OFFSET(VCPU_nsvm_hap_enabled, struct vcpu,
arch.hvm.nvcpu.u.nsvm.ns_hap_enabled);
BLANK();
OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 606b1b0..c423bc0 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -104,10 +104,10 @@ void show_registers(const struct cpu_user_regs *regs)
{
struct segment_register sreg;
context = CTXT_hvm_guest;
- fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
- fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
- fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
- fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4];
+ fault_crs[0] = v->arch.hvm.guest_cr[0];
+ fault_crs[2] = v->arch.hvm.guest_cr[2];
+ fault_crs[3] = v->arch.hvm.guest_cr[3];
+ fault_crs[4] = v->arch.hvm.guest_cr[4];
hvm_get_segment_register(v, x86_seg_cs, &sreg);
fault_regs.cs = sreg.sel;
hvm_get_segment_register(v, x86_seg_ds, &sreg);
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 4722c2d..0ea3742 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -541,7 +541,7 @@ struct arch_vcpu
/* Virtual Machine Extensions */
union {
struct pv_vcpu pv;
- struct hvm_vcpu hvm_vcpu;
+ struct hvm_vcpu hvm;
};
pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
@@ -605,8 +605,8 @@ void update_guest_memory_policy(struct vcpu *v,
struct guest_memory_policy *policy);
/* Shorthands to improve code legibility. */
-#define hvm_vmx hvm_vcpu.u.vmx
-#define hvm_svm hvm_vcpu.u.svm
+#define hvm_vmx hvm.u.vmx
+#define hvm_svm hvm.u.svm
bool update_runstate_area(struct vcpu *);
bool update_secondary_system_time(struct vcpu *,
diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h
index 08031c8..8684b83 100644
--- a/xen/include/asm-x86/guest_pt.h
+++ b/xen/include/asm-x86/guest_pt.h
@@ -215,7 +215,7 @@ static inline bool guest_can_use_l2_superpages(const struct
vcpu *v)
return (is_pv_vcpu(v) ||
GUEST_PAGING_LEVELS != 2 ||
!hvm_paging_enabled(v) ||
- (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE));
+ (v->arch.hvm.guest_cr[4] & X86_CR4_PSE));
}
static inline bool guest_can_use_l3_superpages(const struct domain *d)
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index ac0f035..132e62b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -285,27 +285,27 @@ void vmsi_deliver_pirq(struct domain *d, const struct
hvm_pirq_dpci *);
int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
#define hvm_paging_enabled(v) \
- (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
+ (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_PG))
#define hvm_wp_enabled(v) \
- (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_WP))
+ (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_WP))
#define hvm_pcid_enabled(v) \
- (!!((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PCIDE))
+ (!!((v)->arch.hvm.guest_cr[4] & X86_CR4_PCIDE))
#define hvm_pae_enabled(v) \
- (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
+ (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PAE))
#define hvm_smep_enabled(v) \
- (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
+ (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMEP))
#define hvm_smap_enabled(v) \
- (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMAP))
+ (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMAP))
#define hvm_nx_enabled(v) \
- ((v)->arch.hvm_vcpu.guest_efer & EFER_NX)
+ ((v)->arch.hvm.guest_efer & EFER_NX)
#define hvm_pku_enabled(v) \
- (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PKE))
+ (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKE))
/* Can we use superpages in the HAP p2m table? */
#define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
#define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB))
-#define hvm_long_mode_active(v) (!!((v)->arch.hvm_vcpu.guest_efer & EFER_LMA))
+#define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA))
enum hvm_intblk
hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
@@ -548,7 +548,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
#define hvm_msr_tsc_aux(v) ({ \
struct domain *__d = (v)->domain; \
(__d->arch.tsc_mode == TSC_MODE_PVRDTSCP) \
- ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm_vcpu.msr_tsc_aux; \
+ ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm.msr_tsc_aux; \
})
int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t
*msr_content);
diff --git a/xen/include/asm-x86/hvm/nestedhvm.h
b/xen/include/asm-x86/hvm/nestedhvm.h
index 4a041e2..9d1c274 100644
--- a/xen/include/asm-x86/hvm/nestedhvm.h
+++ b/xen/include/asm-x86/hvm/nestedhvm.h
@@ -89,7 +89,7 @@ static inline void nestedhvm_set_cr(struct vcpu *v, unsigned
int cr,
{
if ( !nestedhvm_vmswitch_in_progress(v) &&
nestedhvm_vcpu_in_guestmode(v) )
- v->arch.hvm_vcpu.nvcpu.guest_cr[cr] = value;
+ v->arch.hvm.nvcpu.guest_cr[cr] = value;
}
#endif /* _HVM_NESTEDHVM_H */
diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h
b/xen/include/asm-x86/hvm/svm/nestedsvm.h
index abcf2e7..31fb4bf 100644
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h
@@ -94,7 +94,7 @@ struct nestedsvm {
/* True when l1 guest enabled SVM in EFER */
#define nsvm_efer_svm_enabled(v) \
- (!!((v)->arch.hvm_vcpu.guest_efer & EFER_SVME))
+ (!!((v)->arch.hvm.guest_efer & EFER_SVME))
int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr);
void nestedsvm_vmexit_defer(struct vcpu *v,
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 86b4ee2..54ea044 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -135,14 +135,14 @@ struct nestedvcpu {
unsigned long guest_cr[5];
};
-#define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu)
+#define vcpu_nestedhvm(v) ((v)->arch.hvm.nvcpu)
struct altp2mvcpu {
uint16_t p2midx; /* alternate p2m index */
gfn_t veinfo_gfn; /* #VE information page gfn */
};
-#define vcpu_altp2m(v) ((v)->arch.hvm_vcpu.avcpu)
+#define vcpu_altp2m(v) ((v)->arch.hvm.avcpu)
struct hvm_vcpu {
/* Guest control-register and EFER values, just as the guest sees them. */
diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h
index 212c36b..8dbec90 100644
--- a/xen/include/asm-x86/hvm/vlapic.h
+++ b/xen/include/asm-x86/hvm/vlapic.h
@@ -25,10 +25,10 @@
#include <public/hvm/ioreq.h>
#include <asm/hvm/vpt.h>
-#define vcpu_vlapic(x) (&(x)->arch.hvm_vcpu.vlapic)
-#define vlapic_vcpu(x) (container_of((x), struct vcpu, arch.hvm_vcpu.vlapic))
+#define vcpu_vlapic(x) (&(x)->arch.hvm.vlapic)
+#define vlapic_vcpu(x) (container_of((x), struct vcpu, arch.hvm.vlapic))
#define const_vlapic_vcpu(x) (container_of((x), const struct vcpu, \
- arch.hvm_vcpu.vlapic))
+ arch.hvm.vlapic))
#define vlapic_domain(x) (vlapic_vcpu(x)->domain)
#define _VLAPIC_ID(vlapic, id) (vlapic_x2apic_mode(vlapic) \
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h
b/xen/include/asm-x86/hvm/vmx/vmx.h
index 89619e4..23de869 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -552,7 +552,7 @@ static inline void vpid_sync_vcpu_gva(struct vcpu *v,
unsigned long gva)
type = INVVPID_ALL_CONTEXT;
execute_invvpid:
- __invvpid(type, v->arch.hvm_vcpu.n1asid.asid, (u64)gva);
+ __invvpid(type, v->arch.hvm.n1asid.asid, (u64)gva);
}
static inline void vpid_sync_all(void)
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |