|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 27/32] xen/x86: allow HVM guests to use hypercalls to bring up vCPUs
Allow the usage of the VCPUOP_initialise, VCPUOP_up, VCPUOP_down and
VCPUOP_is_up hypercalls from HVM guests.
This patch introduces a new structure (vcpu_hvm_context) that should be used
in conjuction with the VCPUOP_initialise hypercall in order to initialize
vCPUs for HVM guests.
Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxx>
---
Changes since v6:
- Add comments to clarify some initializations.
- Introduce a generic default_initialize_vcpu that's used to initialize a
ARM vCPU or a x86 PV vCPU.
- Move the undef of the SEG macro.
- Fix the size of the eflags register, it should be 32bits.
- Add a comment regarding the value of the 12-15 bits of the _ar fields.
- Remove the 16bit strucutre, the 32bit one can be used to start the cpu in
real mode.
- Add some sanity checks to the values passed in.
- Add paddings to vcpu_hvm_context so the layout on 32/64bits is the same.
- Add support for the compat version of VCPUOP_initialise.
Changes since v5:
- Fix a coding style issue.
- Merge the code from wip-dmlite-v5-refactor by Andrew in order to reduce
bloat.
- Print the offending %cr3 in case of error when using shadow.
- Reduce the scope of local variables in arch_initialize_vcpu.
- s/current->domain/v->domain/g in arch_initialize_vcpu.
- Expand the comment in public/vcpu.h to document the usage of
vcpu_hvm_context for HVM guests.
- Add myself as the copyright holder for the public hvm_vcpu.h header.
Changes since v4:
- Don't assume mode is 64B, add an explicit check.
- Don't set TF_kernel_mode, it is only needed for PV guests.
- Don't set CR0_ET unconditionally.
---
xen/arch/x86/domain.c | 185 ++++++++++++++++++++++++++++++++++++++
xen/arch/x86/hvm/hvm.c | 8 ++
xen/common/compat/domain.c | 71 +++++++++++----
xen/common/domain.c | 56 +++++++++---
xen/include/Makefile | 1 +
xen/include/asm-x86/domain.h | 3 +
xen/include/public/hvm/hvm_vcpu.h | 144 +++++++++++++++++++++++++++++
xen/include/public/vcpu.h | 6 +-
xen/include/xlat.lst | 3 +
9 files changed, 448 insertions(+), 29 deletions(-)
create mode 100644 xen/include/public/hvm/hvm_vcpu.h
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index a3b1c9b..af5feea 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -37,6 +37,7 @@
#include <xen/wait.h>
#include <xen/guest_access.h>
#include <public/sysctl.h>
+#include <public/hvm/hvm_vcpu.h>
#include <asm/regs.h>
#include <asm/mc146818rtc.h>
#include <asm/system.h>
@@ -1176,6 +1177,190 @@ int arch_set_info_guest(
#undef c
}
+/* Called by VCPUOP_initialise for HVM guests. */
+int arch_set_info_hvm_guest(struct vcpu *v, vcpu_hvm_context_t *ctx)
+{
+ struct cpu_user_regs *uregs = &v->arch.user_regs;
+ struct segment_register cs, ds, ss, es, tr;
+
+ switch ( ctx->mode )
+ {
+ default:
+ return -EINVAL;
+
+ case VCPU_HVM_MODE_32B:
+ {
+ const struct vcpu_hvm_x86_32 *regs = &ctx->cpu_regs.x86_32;
+ uint32_t limit;
+
+#define SEG(s, r) \
+ (struct segment_register){ .sel = 0, .base = (r)->s ## _base, \
+ .limit = (r)->s ## _limit, .attr.bytes = (r)->s ## _ar }
+ cs = SEG(cs, regs);
+ ds = SEG(ds, regs);
+ ss = SEG(ss, regs);
+ es = SEG(es, regs);
+ tr = SEG(tr, regs);
+#undef SEG
+
+ /* Basic sanity checks. */
+ if ( cs.attr.fields.pad != 0 || ds.attr.fields.pad != 0 ||
+ ss.attr.fields.pad != 0 || es.attr.fields.pad != 0 ||
+ tr.attr.fields.pad != 0 )
+ {
+ gprintk(XENLOG_ERR, "Attribute bits 12-15 of the segments are not
null\n");
+ return -EINVAL;
+ }
+
+ limit = cs.limit * (cs.attr.fields.g ? PAGE_SIZE : 1);
+ if ( regs->eip > limit )
+ {
+ gprintk(XENLOG_ERR, "EIP address is outside of the CS limit\n");
+ return -EINVAL;
+ }
+
+ if ( ds.attr.fields.dpl > cs.attr.fields.dpl )
+ {
+ gprintk(XENLOG_ERR, "DPL of DS is greater than DPL of CS\n");
+ return -EINVAL;
+ }
+
+ if ( ss.attr.fields.dpl != cs.attr.fields.dpl )
+ {
+ gprintk(XENLOG_ERR, "DPL of SS is different than DPL of CS\n");
+ return -EINVAL;
+ }
+
+ if ( es.attr.fields.dpl > cs.attr.fields.dpl )
+ {
+ gprintk(XENLOG_ERR, "DPL of ES is greater than DPL of CS\n");
+ return -EINVAL;
+ }
+
+ if ( ((regs->efer & EFER_LMA) && !(regs->efer & EFER_LME)) ||
+ ((regs->efer & EFER_LME) && !(regs->efer & EFER_LMA)) )
+ {
+ gprintk(XENLOG_ERR, "EFER.LMA and EFER.LME must be both set\n");
+ return -EINVAL;
+ }
+
+ uregs->rax = regs->eax;
+ uregs->rcx = regs->ecx;
+ uregs->rdx = regs->edx;
+ uregs->rbx = regs->ebx;
+ uregs->rsp = regs->esp;
+ uregs->rbp = regs->ebp;
+ uregs->rsi = regs->esi;
+ uregs->rdi = regs->edi;
+ uregs->rip = regs->eip;
+ uregs->rflags = regs->eflags;
+
+ v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
+ v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
+ v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
+ v->arch.hvm_vcpu.guest_efer = regs->efer;
+ }
+ break;
+
+ case VCPU_HVM_MODE_64B:
+ {
+ const struct vcpu_hvm_x86_64 *regs = &ctx->cpu_regs.x86_64;
+
+ /* Basic sanity checks. */
+ if ( !is_canonical_address(regs->rip) )
+ {
+ gprintk(XENLOG_ERR, "RIP contains a non-canonical address\n");
+ return -EINVAL;
+ }
+
+ if ( !(regs->cr0 & X86_CR0_PG) )
+ {
+ gprintk(XENLOG_ERR, "CR0 doesn't have paging enabled\n");
+ return -EINVAL;
+ }
+
+ if ( !(regs->cr4 & X86_CR4_PAE) )
+ {
+ gprintk(XENLOG_ERR, "CR4 doesn't have PAE enabled\n");
+ return -EINVAL;
+ }
+
+ if ( (regs->efer & (EFER_LME | EFER_LMA)) != (EFER_LME | EFER_LMA) )
+ {
+ gprintk(XENLOG_ERR, "EFER doesn't have LME or LMA enabled\n");
+ return -EINVAL;
+ }
+
+ uregs->rax = regs->rax;
+ uregs->rcx = regs->rcx;
+ uregs->rdx = regs->rdx;
+ uregs->rbx = regs->rbx;
+ uregs->rsp = regs->rsp;
+ uregs->rbp = regs->rbp;
+ uregs->rsi = regs->rsi;
+ uregs->rdi = regs->rdi;
+ uregs->rip = regs->rip;
+ uregs->rflags = regs->rflags;
+
+ v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
+ v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
+ v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
+ v->arch.hvm_vcpu.guest_efer = regs->efer;
+
+#define SEG(b, l, a) \
+ (struct segment_register){ .sel = 0, .base = (b), .limit = (l), \
+ .attr.bytes = (a) }
+ cs = SEG(0, ~0u, 0xa9b); /* 64bit code segment. */
+ ds = ss = es = SEG(0, ~0u, 0xc93);
+ tr = SEG(0, 0x67, 0x8b); /* 64bit TSS (busy). */
+#undef SEG
+ }
+ break;
+
+ }
+
+ hvm_update_guest_cr(v, 0);
+ hvm_update_guest_cr(v, 3);
+ hvm_update_guest_cr(v, 4);
+ hvm_update_guest_efer(v);
+
+ if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
+ {
+ /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
+ struct page_info *page = get_page_from_gfn(v->domain,
+ v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
+ {
+ gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n",
+ v->arch.hvm_vcpu.guest_cr[3]);
+ domain_crash(v->domain);
+ return -EINVAL;
+ }
+
+ v->arch.guest_table = pagetable_from_page(page);
+ }
+
+ hvm_set_segment_register(v, x86_seg_cs, &cs);
+ hvm_set_segment_register(v, x86_seg_ds, &ds);
+ hvm_set_segment_register(v, x86_seg_ss, &ss);
+ hvm_set_segment_register(v, x86_seg_es, &es);
+ hvm_set_segment_register(v, x86_seg_tr, &tr);
+
+ /* Sync AP's TSC with BSP's. */
+ v->arch.hvm_vcpu.cache_tsc_offset =
+ v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ v->domain->arch.hvm_domain.sync_tsc);
+
+ paging_update_paging_modes(v);
+
+ v->is_initialised = 1;
+ set_bit(_VPF_down, &v->pause_flags);
+
+ return 0;
+}
+
int arch_vcpu_reset(struct vcpu *v)
{
if ( is_pv_vcpu(v) )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b4d8475..3c890c1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4993,6 +4993,10 @@ static long hvm_vcpu_op(
case VCPUOP_stop_singleshot_timer:
case VCPUOP_register_vcpu_info:
case VCPUOP_register_vcpu_time_memory_area:
+ case VCPUOP_initialise:
+ case VCPUOP_up:
+ case VCPUOP_down:
+ case VCPUOP_is_up:
rc = do_vcpu_op(cmd, vcpuid, arg);
break;
default:
@@ -5051,6 +5055,10 @@ static long hvm_vcpu_op_compat32(
case VCPUOP_stop_singleshot_timer:
case VCPUOP_register_vcpu_info:
case VCPUOP_register_vcpu_time_memory_area:
+ case VCPUOP_initialise:
+ case VCPUOP_up:
+ case VCPUOP_down:
+ case VCPUOP_is_up:
rc = compat_vcpu_op(cmd, vcpuid, arg);
break;
default:
diff --git a/xen/common/compat/domain.c b/xen/common/compat/domain.c
index 5dc7d94..9cccef0 100644
--- a/xen/common/compat/domain.c
+++ b/xen/common/compat/domain.c
@@ -10,6 +10,9 @@
#include <xen/guest_access.h>
#include <xen/hypercall.h>
#include <compat/vcpu.h>
+#ifdef CONFIG_X86
+#include <compat/hvm/hvm_vcpu.h>
+#endif
#define xen_vcpu_set_periodic_timer vcpu_set_periodic_timer
CHECK_vcpu_set_periodic_timer;
@@ -23,8 +26,43 @@ CHECK_SIZE_(struct, vcpu_info);
CHECK_vcpu_register_vcpu_info;
#undef xen_vcpu_register_vcpu_info
+#ifdef CONFIG_X86
+#define xen_vcpu_hvm_context vcpu_hvm_context
+#define xen_vcpu_hvm_x86_32 vcpu_hvm_x86_32
+#define xen_vcpu_hvm_x86_64 vcpu_hvm_x86_64
+CHECK_vcpu_hvm_context;
+#undef xen_vcpu_hvm_x86_64
+#undef xen_vcpu_hvm_x86_32
+#undef xen_vcpu_hvm_context
+#endif
+
extern vcpu_info_t dummy_vcpu_info;
+static int default_initialize_vcpu(struct vcpu *v,
+ XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+ struct compat_vcpu_guest_context *cmp_ctxt;
+ struct domain *d = v->domain;
+ int rc;
+
+ if ( (cmp_ctxt = xmalloc(struct compat_vcpu_guest_context)) == NULL )
+ return -ENOMEM;
+
+ if ( copy_from_guest(cmp_ctxt, arg, 1) )
+ {
+ xfree(cmp_ctxt);
+ return -EFAULT;
+ }
+
+ domain_lock(d);
+ rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, cmp_ctxt);
+ domain_unlock(d);
+
+ xfree(cmp_ctxt);
+
+ return rc;
+}
+
int compat_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void)
arg)
{
struct domain *d = current->domain;
@@ -38,33 +76,36 @@ int compat_vcpu_op(int cmd, unsigned int vcpuid,
XEN_GUEST_HANDLE_PARAM(void) ar
{
case VCPUOP_initialise:
{
- struct compat_vcpu_guest_context *cmp_ctxt;
-
if ( v->vcpu_info == &dummy_vcpu_info )
return -EINVAL;
- if ( (cmp_ctxt = xmalloc(struct compat_vcpu_guest_context)) == NULL )
+#if defined(CONFIG_X86)
+ if ( is_hvm_vcpu(v) )
{
- rc = -ENOMEM;
- break;
- }
+ struct vcpu_hvm_context hvm_ctx;
- if ( copy_from_guest(cmp_ctxt, arg, 1) )
+ if ( copy_from_guest(&hvm_ctx, arg, 1) )
+ return -EFAULT;
+
+ domain_lock(d);
+ rc = v->is_initialised ? -EEXIST :
+ arch_set_info_hvm_guest(v, &hvm_ctx);
+ domain_unlock(d);
+ }
+ else
{
- xfree(cmp_ctxt);
- rc = -EFAULT;
- break;
+ rc = default_initialize_vcpu(v, arg);
}
-
- domain_lock(d);
- rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, cmp_ctxt);
- domain_unlock(d);
+#elif defined(CONFIG_ARM)
+ rc = default_initialize_vcpu(v, arg);
+#else
+ #error Unsupported architecture
+#endif
if ( rc == -ERESTART )
rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iuh",
cmd, vcpuid, arg);
- xfree(cmp_ctxt);
break;
}
diff --git a/xen/common/domain.c b/xen/common/domain.c
index cec0dcf..33b67a6 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1207,11 +1207,35 @@ void unmap_vcpu_info(struct vcpu *v)
put_page_and_type(mfn_to_page(mfn));
}
+static int default_initialize_vcpu(struct vcpu *v,
+ XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+ struct vcpu_guest_context *ctxt;
+ struct domain *d = v->domain;
+ int rc;
+
+ if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
+ return -ENOMEM;
+
+ if ( copy_from_guest(ctxt, arg, 1) )
+ {
+ free_vcpu_guest_context(ctxt);
+ return -EFAULT;
+ }
+
+ domain_lock(d);
+ rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
+ domain_unlock(d);
+
+ free_vcpu_guest_context(ctxt);
+
+ return rc;
+}
+
long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct domain *d = current->domain;
struct vcpu *v;
- struct vcpu_guest_context *ctxt;
long rc = 0;
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
@@ -1223,20 +1247,28 @@ long do_vcpu_op(int cmd, unsigned int vcpuid,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( v->vcpu_info == &dummy_vcpu_info )
return -EINVAL;
- if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
- return -ENOMEM;
-
- if ( copy_from_guest(ctxt, arg, 1) )
+#if defined(CONFIG_X86)
+ if ( is_hvm_vcpu(v) )
{
- free_vcpu_guest_context(ctxt);
- return -EFAULT;
- }
+ struct vcpu_hvm_context hvm_ctx;
- domain_lock(d);
- rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
- domain_unlock(d);
+ if ( copy_from_guest(&hvm_ctx, arg, 1) )
+ return -EFAULT;
- free_vcpu_guest_context(ctxt);
+ domain_lock(d);
+ rc = v->is_initialised ? -EEXIST :
+ arch_set_info_hvm_guest(v, &hvm_ctx);
+ domain_unlock(d);
+ }
+ else
+ {
+ rc = default_initialize_vcpu(v, arg);
+ }
+#elif defined(CONFIG_ARM)
+ rc = default_initialize_vcpu(v, arg);
+#else
+ #error Unsupported architecture
+#endif
if ( rc == -ERESTART )
rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iuh",
diff --git a/xen/include/Makefile b/xen/include/Makefile
index 6664107..301245a 100644
--- a/xen/include/Makefile
+++ b/xen/include/Makefile
@@ -26,6 +26,7 @@ headers-$(CONFIG_X86) += compat/arch-x86/pmu.h
headers-$(CONFIG_X86) += compat/arch-x86/xen-mca.h
headers-$(CONFIG_X86) += compat/arch-x86/xen.h
headers-$(CONFIG_X86) += compat/arch-x86/xen-$(compat-arch-y).h
+headers-$(CONFIG_X86) += compat/hvm/hvm_vcpu.h
headers-y += compat/arch-$(compat-arch-y).h compat/pmu.h
compat/xlat.h
headers-$(FLASK_ENABLE) += compat/xsm/flask_op.h
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 84ae4c1..3ba7d37 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -10,6 +10,7 @@
#include <asm/mce.h>
#include <public/vcpu.h>
#include <public/hvm/hvm_info_table.h>
+#include <public/hvm/hvm_vcpu.h>
#define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
#define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv)
@@ -599,6 +600,8 @@ static inline void free_vcpu_guest_context(struct
vcpu_guest_context *vgc)
vfree(vgc);
}
+int arch_set_info_hvm_guest(struct vcpu *v, vcpu_hvm_context_t *ctx);
+
#endif /* __ASM_DOMAIN_H__ */
/*
diff --git a/xen/include/public/hvm/hvm_vcpu.h
b/xen/include/public/hvm/hvm_vcpu.h
new file mode 100644
index 0000000..c841461
--- /dev/null
+++ b/xen/include/public/hvm/hvm_vcpu.h
@@ -0,0 +1,144 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2015, Roger Pau Monne <roger.pau@xxxxxxxxxx>
+ */
+
+#ifndef __XEN_PUBLIC_HVM_HVM_VCPU_H__
+#define __XEN_PUBLIC_HVM_HVM_VCPU_H__
+
+#include "../xen.h"
+
+struct vcpu_hvm_x86_32 {
+ uint32_t eax;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t ebx;
+ uint32_t esp;
+ uint32_t ebp;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t eip;
+ uint32_t eflags;
+
+ uint32_t cr0;
+ uint32_t cr3;
+ uint32_t cr4;
+
+ uint32_t pad1;
+
+ /*
+ * EFER should only be used to set the NXE bit (if required)
+ * when starting a vCPU in 32bit mode with paging enabled or
+ * to set the LME/LMA bits in order to start the vCPU in
+ * compatibility mode.
+ */
+ uint64_t efer;
+
+ uint32_t cs_base;
+ uint32_t ds_base;
+ uint32_t ss_base;
+ uint32_t es_base;
+ uint32_t tr_base;
+ uint32_t cs_limit;
+ uint32_t ds_limit;
+ uint32_t ss_limit;
+ uint32_t es_limit;
+ uint32_t tr_limit;
+ uint16_t cs_ar;
+ uint16_t ds_ar;
+ uint16_t ss_ar;
+ uint16_t es_ar;
+ uint16_t tr_ar;
+
+ uint16_t pad2[2];
+};
+
+/*
+ * The layout of the _ar fields of the segment registers is the
+ * following:
+ *
+ * Bits [0,3]: type (bits 40-43).
+ * Bit 4: s (descriptor type, bit 44).
+ * Bit [5,6]: dpl (descriptor privilege level, bits 45-46).
+ * Bit 7: p (segment-present, bit 47).
+ * Bit 8: avl (available for system software, bit 52).
+ * Bit 9: l (64-bit code segment, bit 53).
+ * Bit 10: db (meaning depends on the segment, bit 54).
+ * Bit 11: g (granularity, bit 55)
+ * Bits [12,15]: unused, must be blank.
+ *
+ * A more complete description of the meaning of this fields can be
+ * obtained from the Intel SDM, Volume 3, section 3.4.5.
+ */
+
+struct vcpu_hvm_x86_64 {
+ uint64_t rax;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbx;
+ uint64_t rsp;
+ uint64_t rbp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t rip;
+ uint64_t rflags;
+
+ uint64_t cr0;
+ uint64_t cr3;
+ uint64_t cr4;
+ uint64_t efer;
+
+ /*
+ * Using VCPU_HVM_MODE_64B implies that the vCPU is launched
+ * directly in long mode, so the cached parts of the segment
+ * registers get set to match that environment.
+ *
+ * If the user wants to launch the vCPU in compatibility mode
+ * the 32-bit structure should be used instead.
+ */
+};
+
+struct vcpu_hvm_context {
+#define VCPU_HVM_MODE_32B 0 /* 32bit fields of the structure will be used. */
+#define VCPU_HVM_MODE_64B 1 /* 64bit fields of the structure will be used. */
+ uint32_t mode;
+
+ uint32_t pad;
+
+ /* CPU registers. */
+ union {
+ struct vcpu_hvm_x86_32 x86_32;
+ struct vcpu_hvm_x86_64 x86_64;
+ } cpu_regs;
+};
+typedef struct vcpu_hvm_context vcpu_hvm_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_hvm_context_t);
+
+#endif /* __XEN_PUBLIC_HVM_HVM_VCPU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/public/vcpu.h b/xen/include/public/vcpu.h
index 898b89f..692b87a 100644
--- a/xen/include/public/vcpu.h
+++ b/xen/include/public/vcpu.h
@@ -41,8 +41,10 @@
* Initialise a VCPU. Each VCPU can be initialised only once. A
* newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
*
- * @extra_arg == pointer to vcpu_guest_context structure containing initial
- * state for the VCPU.
+ * @extra_arg == For PV or ARM guests this is a pointer to a vcpu_guest_context
+ * structure containing the initial state for the VCPU. For x86
+ * HVM based guests this is a pointer to a vcpu_hvm_context
+ * structure.
*/
#define VCPUOP_initialise 0
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 3795059..fda1137 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -56,6 +56,9 @@
? grant_entry_header grant_table.h
? grant_entry_v2 grant_table.h
? gnttab_swap_grant_ref grant_table.h
+? vcpu_hvm_context hvm/hvm_vcpu.h
+? vcpu_hvm_x86_32 hvm/hvm_vcpu.h
+? vcpu_hvm_x86_64 hvm/hvm_vcpu.h
? kexec_exec kexec.h
! kexec_image kexec.h
! kexec_range kexec.h
--
1.9.5 (Apple Git-50.3)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |