[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 03/11] nEPT: Implement guest ept's walker
From: Zhang Xiantao <xiantao.zhang@xxxxxxxxx> Implment guest EPT PT walker, some logic is based on shadow's ia32e PT walker. During the PT walking, if the target pages are not in memory, use RETRY mechanism and get a chance to let the target page back. Signed-off-by: Zhang Xiantao <xiantao.zhang@xxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 1 + xen/arch/x86/hvm/vmx/vvmx.c | 42 +++++- xen/arch/x86/mm/guest_walk.c | 12 +- xen/arch/x86/mm/hap/Makefile | 1 + xen/arch/x86/mm/hap/nested_ept.c | 327 +++++++++++++++++++++++++++++++++++ xen/arch/x86/mm/hap/nested_hap.c | 2 +- xen/arch/x86/mm/shadow/multi.c | 2 +- xen/include/asm-x86/guest_pt.h | 8 + xen/include/asm-x86/hvm/nestedhvm.h | 1 + xen/include/asm-x86/hvm/vmx/vmcs.h | 1 + xen/include/asm-x86/hvm/vmx/vvmx.h | 14 ++ 11 files changed, 403 insertions(+), 8 deletions(-) create mode 100644 xen/arch/x86/mm/hap/nested_ept.c diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 85bc9be..3400e6b 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1324,6 +1324,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, access_r, access_w, access_x); switch (rv) { case NESTEDHVM_PAGEFAULT_DONE: + case NESTEDHVM_PAGEFAULT_RETRY: return 1; case NESTEDHVM_PAGEFAULT_L1_ERROR: /* An error occured while translating gpa from diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 4495dd6..76cf757 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -906,9 +906,18 @@ static void sync_vvmcs_ro(struct vcpu *v) { int i; struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); + void *vvmcs = nvcpu->nv_vvmcx; for ( i = 0; i < ARRAY_SIZE(vmcs_ro_field); i++ ) shadow_to_vvmcs(nvcpu->nv_vvmcx, vmcs_ro_field[i]); + + /* Adjust exit_reason/exit_qualifciation for violation case */ + if ( __get_vvmcs(vvmcs, VM_EXIT_REASON) == + EXIT_REASON_EPT_VIOLATION ) { + __set_vvmcs(vvmcs, EXIT_QUALIFICATION, nvmx->ept_exit.exit_qual); + __set_vvmcs(vvmcs, VM_EXIT_REASON, nvmx->ept_exit.exit_reason); + } } static void load_vvmcs_host_state(struct vcpu *v) @@ -1454,8 +1463,37 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order, bool_t access_r, bool_t access_w, bool_t access_x) { - /*TODO:*/ - return 0; + uint64_t exit_qual = __vmread(EXIT_QUALIFICATION); + uint32_t exit_reason = EXIT_REASON_EPT_VIOLATION; + int rc; + unsigned long gfn; + uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r; + struct nestedvmx *nvmx = &vcpu_2_nvmx(v); + + rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn, + &exit_qual, &exit_reason); + switch ( rc ) { + case EPT_TRANSLATE_SUCCEED: + *L1_gpa = (gfn << PAGE_SHIFT) + (L2_gpa & ~PAGE_MASK); + rc = NESTEDHVM_PAGEFAULT_DONE; + break; + case EPT_TRANSLATE_VIOLATION: + case EPT_TRANSLATE_MISCONFIG: + rc = NESTEDHVM_PAGEFAULT_INJECT; + nvmx->ept_exit.exit_reason = exit_reason; + nvmx->ept_exit.exit_qual = exit_qual; + break; + case EPT_TRANSLATE_RETRY: + rc = NESTEDHVM_PAGEFAULT_RETRY; + break; + case EPT_TRANSLATE_ERR_PAGE: + rc = NESTEDHVM_PAGEFAULT_L1_ERROR; + break; + default: + gdprintk(XENLOG_ERR, "GUEST EPT translation error!\n"); + } + + return rc; } void nvmx_idtv_handling(void) diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c index 13ea0bb..afbe9db 100644 --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -88,10 +88,11 @@ static uint32_t set_ad_bits(void *guest_p, void *walk_p, int set_dirty) /* If the map is non-NULL, we leave this function having * acquired an extra ref on mfn_to_page(*mfn) */ -static inline void *map_domain_gfn(struct p2m_domain *p2m, +void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, p2m_type_t *p2mt, + p2m_query_t *q, uint32_t *rc) { struct page_info *page; @@ -99,7 +100,7 @@ static inline void *map_domain_gfn(struct p2m_domain *p2m, /* Translate the gfn, unsharing if shared */ page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL, - P2M_ALLOC | P2M_UNSHARE); + *q); if ( p2m_is_paging(*p2mt) ) { ASSERT(!p2m_is_nestedp2m(p2m)); @@ -128,7 +129,6 @@ static inline void *map_domain_gfn(struct p2m_domain *p2m, return map; } - /* Walk the guest pagetables, after the manner of a hardware walker. */ /* Because the walk is essentially random, it can cause a deadlock * warning in the p2m locking code. Highly unlikely this is an actual @@ -149,6 +149,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, uint32_t gflags, mflags, iflags, rc = 0; int smep; bool_t pse1G = 0, pse2M = 0; + p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE; perfc_incr(guest_walk); memset(gw, 0, sizeof(*gw)); @@ -188,7 +189,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, l3p = map_domain_gfn(p2m, guest_l4e_get_gfn(gw->l4e), &gw->l3mfn, - &p2mt, + &p2mt, + &qt, &rc); if(l3p == NULL) goto out; @@ -249,6 +251,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, guest_l3e_get_gfn(gw->l3e), &gw->l2mfn, &p2mt, + &qt, &rc); if(l2p == NULL) goto out; @@ -322,6 +325,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, guest_l2e_get_gfn(gw->l2e), &gw->l1mfn, &p2mt, + &qt, &rc); if(l1p == NULL) goto out; diff --git a/xen/arch/x86/mm/hap/Makefile b/xen/arch/x86/mm/hap/Makefile index 80a6bec..68f2bb5 100644 --- a/xen/arch/x86/mm/hap/Makefile +++ b/xen/arch/x86/mm/hap/Makefile @@ -3,6 +3,7 @@ obj-y += guest_walk_2level.o obj-y += guest_walk_3level.o obj-$(x86_64) += guest_walk_4level.o obj-y += nested_hap.o +obj-y += nested_ept.o guest_walk_%level.o: guest_walk.c Makefile $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@ diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c new file mode 100644 index 0000000..da868e7 --- /dev/null +++ b/xen/arch/x86/mm/hap/nested_ept.c @@ -0,0 +1,327 @@ +/* + * nested_ept.c: Handling virtulized EPT for guest in nested case. + * + * pt walker logic based on arch/x86/mm/guest_walk.c + * Copyright (c) 2012, Intel Corporation + * Xiantao Zhang <xiantao.zhang@xxxxxxxxx> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ +#include <asm/domain.h> +#include <asm/page.h> +#include <asm/paging.h> +#include <asm/p2m.h> +#include <asm/mem_event.h> +#include <public/mem_event.h> +#include <asm/mem_sharing.h> +#include <xen/event.h> +#include <asm/hap.h> +#include <asm/hvm/support.h> + +#include <asm/hvm/nestedhvm.h> + +#include "private.h" + +#include <asm/hvm/vmx/vmx.h> +#include <asm/hvm/vmx/vvmx.h> + +/* EPT always use 4-level paging structure*/ +#define GUEST_PAGING_LEVELS 4 +#include <asm/guest_pt.h> + +/* For EPT's walker reserved bits and EMT check */ +#define EPT_MUST_RSV_BITS (((1ull << PADDR_BITS) -1) & \ + ~((1ull << paddr_bits) - 1)) + + +#define EPT_EMT_WB 6 +#define EPT_EMT_UC 0 + +#define NEPT_VPID_CAP_BITS 0 + +#define NEPT_1G_ENTRY_FLAG (1 << 11) +#define NEPT_2M_ENTRY_FLAG (1 << 10) +#define NEPT_4K_ENTRY_FLAG (1 << 9) + +/* Always expose 1G and 2M capability to guest, + so don't need additional check */ +bool_t nept_sp_entry(uint64_t entry) +{ + return !!(entry & EPTE_SUPER_PAGE_MASK); +} + +static bool_t nept_rsv_bits_check(uint64_t entry, uint32_t level) +{ + uint64_t rsv_bits = EPT_MUST_RSV_BITS; + + switch ( level ){ + case 1: + break; + case 2 ... 3: + if (nept_sp_entry(entry)) + rsv_bits |= ((1ull << (9 * (level -1 ))) -1) << PAGE_SHIFT; + else + rsv_bits |= 0xfull << 3; + break; + case 4: + rsv_bits |= 0xf8; + break; + default: + printk("Unsupported EPT paging level: %d\n", level); + } + if ( ((entry & rsv_bits) ^ rsv_bits) == rsv_bits ) + return 0; + return 1; +} + +/* EMT checking*/ +static bool_t nept_emt_bits_check(uint64_t entry, uint32_t level) +{ + ept_entry_t e; + e.epte = entry; + if ( e.sp || level == 1 ) { + if ( e.emt == 2 || e.emt == 3 || e.emt == 7 ) + return 1; + } + return 0; +} + +static bool_t nept_rwx_bits_check(uint64_t entry) { + /*write only or write/execute only*/ + uint8_t rwx_bits = entry & 0x7; + + if ( rwx_bits == 2 || rwx_bits == 6) + return 1; + if ( rwx_bits == 4 && !(NEPT_VPID_CAP_BITS & + VMX_EPT_EXEC_ONLY_SUPPORTED)) + return 1; + return 0; +} + +/* nept's misconfiguration check */ +static bool_t nept_misconfiguration_check(uint64_t entry, uint32_t level) +{ + return (nept_rsv_bits_check(entry, level) || + nept_emt_bits_check(entry, level) || + nept_rwx_bits_check(entry)); +} + +static bool_t nept_present_check(uint64_t entry) +{ + if (entry & 0x7) + return 1; + return 0; +} + +uint64_t nept_get_ept_vpid_cap(void) +{ + /*TODO: exposed ept and vpid features*/ + return NEPT_VPID_CAP_BITS; +} + +static uint32_t +nept_walk_tables(struct vcpu *v, unsigned long l2ga, walk_t *gw) +{ + p2m_type_t p2mt; + uint32_t rc = 0, ret = 0, gflags; + struct domain *d = v->domain; + struct p2m_domain *p2m = d->arch.p2m; + gfn_t base_gfn = _gfn(nhvm_vcpu_p2m_base(v) >> PAGE_SHIFT); + p2m_query_t qt = P2M_ALLOC; + + guest_l1e_t *l1p = NULL; + guest_l2e_t *l2p = NULL; + guest_l3e_t *l3p = NULL; + guest_l4e_t *l4p = NULL; + + bool_t sp= 0; + + memset(gw, 0, sizeof(*gw)); + gw->va = l2ga; + + /* Map the l4 root entry */ + l4p = map_domain_gfn(p2m, base_gfn, &gw->l4mfn, &p2mt, &qt, &rc); + if ( !l4p ) + goto map_err; + gw->l4e = l4p[guest_l4_table_offset(l2ga)]; + if (!nept_present_check(gw->l4e.l4)) + goto non_present; + if (nept_misconfiguration_check(gw->l4e.l4, 4)) + goto misconfig_err; + + /* Map the l3 table */ + base_gfn = guest_l4e_get_gfn(gw->l4e); + l3p = map_domain_gfn(p2m, base_gfn, &gw->l3mfn, &p2mt, &qt, &rc); + if( l3p == NULL ) + goto map_err; + + /* Get the l3e and check its flags*/ + gw->l3e = l3p[guest_l3_table_offset(l2ga)]; + if ( !nept_present_check(gw->l3e.l3) ) + goto non_present; + if ( nept_misconfiguration_check(gw->l3e.l3, 3) ) + goto misconfig_err; + + sp = nept_sp_entry(gw->l3e.l3); + /* Super 1G entry */ + if ( sp ) + { + /* Generate a fake l1 table entry so callers don't all + * have to understand superpages. */ + gfn_t start = guest_l3e_get_gfn(gw->l3e); + + /* Increment the pfn by the right number of 4k pages. */ + start = _gfn((gfn_x(start) & ~GUEST_L3_GFN_MASK) + + ((l2ga >> PAGE_SHIFT) & GUEST_L3_GFN_MASK)); + gflags = (gw->l3e.l3 & 0x7f) | NEPT_1G_ENTRY_FLAG; + gw->l1e = guest_l1e_from_gfn(start, gflags); + gw->l2mfn = gw->l1mfn = _mfn(INVALID_MFN); + goto done; + } + + /* Map the l2 table */ + base_gfn = guest_l3e_get_gfn(gw->l3e); + l2p = map_domain_gfn(p2m, base_gfn, &gw->l2mfn, &p2mt, &qt, &rc); + if( l2p == NULL ) + goto map_err; + /* Get the l2e */ + gw->l2e = l2p[guest_l2_table_offset(l2ga)]; + if ( !nept_present_check(gw->l2e.l2) ) + goto non_present; + if ( nept_misconfiguration_check(gw->l2e.l2, 2) ) + goto misconfig_err; + sp = nept_sp_entry(gw->l2e.l2); + + if ( sp ) + { + gfn_t start = guest_l2e_get_gfn(gw->l2e); + gflags = (gw->l2e.l2 & 0x7f) | NEPT_2M_ENTRY_FLAG; + + /* Increment the pfn by the right number of 4k pages.*/ + start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) + + guest_l1_table_offset(l2ga)); + gw->l1e = guest_l1e_from_gfn(start, gflags); + gw->l1mfn = _mfn(INVALID_MFN); + goto done; + } + /* Not a superpage: carry on and find the l1e. */ + base_gfn = guest_l2e_get_gfn(gw->l2e); + l1p = map_domain_gfn(p2m, base_gfn, &gw->l1mfn, &p2mt, &qt, &rc); + if( l1p == NULL ) + goto map_err; + /* Get the l1e */ + gw->l1e = l1p[guest_l1_table_offset(l2ga)]; + if ( !nept_present_check(gw->l1e.l1) ) + goto non_present; + if ( nept_misconfiguration_check(gw->l1e.l1, 1) ) + goto misconfig_err; + + gflags = (gw->l1e.l1 & 0x7f) | NEPT_4K_ENTRY_FLAG; + gw->l1e.l1 = (gw->l1e.l1 & PAGE_MASK) | gflags; + +done: + ret = EPT_TRANSLATE_SUCCEED; + goto unmap; + +misconfig_err: + ret = EPT_TRANSLATE_MISCONFIG; + goto unmap; + +map_err: + if ( rc == _PAGE_PAGED ) + ret = EPT_TRANSLATE_RETRY; + else + ret = EPT_TRANSLATE_ERR_PAGE; + goto unmap; + +non_present: + ret = EPT_TRANSLATE_VIOLATION; + +unmap: + if ( l4p ) + { + unmap_domain_page(l4p); + put_page(mfn_to_page(mfn_x(gw->l4mfn))); + } + if ( l3p ) + { + unmap_domain_page(l3p); + put_page(mfn_to_page(mfn_x(gw->l3mfn))); + } + if ( l2p ) + { + unmap_domain_page(l2p); + put_page(mfn_to_page(mfn_x(gw->l2mfn))); + } + if ( l1p ) + { + unmap_domain_page(l1p); + put_page(mfn_to_page(mfn_x(gw->l1mfn))); + } + return ret; +} + +/* Translate a L2 guest address to L1 gpa via L1 EPT paging structure */ + +int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga, + unsigned int *page_order, uint32_t rwx_acc, + unsigned long *l1gfn, uint64_t *exit_qual, + uint32_t *exit_reason) +{ + uint32_t rc, rwx_bits = 0; + walk_t gw; + + *l1gfn = INVALID_GFN; + + rc = nept_walk_tables(v, l2ga, &gw); + switch ( rc ) { + case EPT_TRANSLATE_SUCCEED: + if ( likely(gw.l1e.l1 & NEPT_2M_ENTRY_FLAG) ) + { + rwx_bits = gw.l4e.l4 & gw.l3e.l3 & gw.l2e.l2 & 0x7; + *page_order = 9; + } + else if ( gw.l1e.l1 & NEPT_4K_ENTRY_FLAG ) { + rwx_bits = gw.l4e.l4 & gw.l3e.l3 & gw.l2e.l2 & gw.l1e.l1 & 0x7; + *page_order = 0; + } + else if ( gw.l1e.l1 & NEPT_1G_ENTRY_FLAG ) + { + rwx_bits = gw.l4e.l4 & gw.l3e.l3 & 0x7; + *page_order = 18; + } + else + gdprintk(XENLOG_ERR, "Uncorrect l1 entry!\n"); + + *l1gfn = guest_l1e_get_paddr(gw.l1e) >> PAGE_SHIFT; + break; + case EPT_TRANSLATE_VIOLATION: + *exit_qual = (*exit_qual & 0xffffffc0) | (rwx_bits << 3) | rwx_acc; + *exit_reason = EXIT_REASON_EPT_VIOLATION; + break; + + case EPT_TRANSLATE_ERR_PAGE: + break; + case EPT_TRANSLATE_MISCONFIG: + rc = EPT_TRANSLATE_MISCONFIG; + *exit_qual = 0; + *exit_reason = EXIT_REASON_EPT_MISCONFIG; + break; + case EPT_TRANSLATE_RETRY: + break; + default: + gdprintk(XENLOG_ERR, "Unsupported ept translation type!:%d\n", rc); + } + return rc; +} diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c index 8787c91..6d1264b 100644 --- a/xen/arch/x86/mm/hap/nested_hap.c +++ b/xen/arch/x86/mm/hap/nested_hap.c @@ -217,7 +217,7 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, /* let caller to handle these two cases */ switch (rv) { case NESTEDHVM_PAGEFAULT_INJECT: - return rv; + case NESTEDHVM_PAGEFAULT_RETRY: case NESTEDHVM_PAGEFAULT_L1_ERROR: return rv; case NESTEDHVM_PAGEFAULT_DONE: diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index 4967da1..409198c 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -4582,7 +4582,7 @@ static mfn_t emulate_gva_to_mfn(struct vcpu *v, /* Translate the GFN to an MFN */ ASSERT(!paging_locked_by_me(v->domain)); mfn = get_gfn(v->domain, _gfn(gfn), &p2mt); - + if ( p2m_is_readonly(p2mt) ) { put_gfn(v->domain, gfn); diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h index 4e1dda0..600c52d 100644 --- a/xen/include/asm-x86/guest_pt.h +++ b/xen/include/asm-x86/guest_pt.h @@ -315,6 +315,14 @@ guest_walk_to_page_order(walk_t *gw) #define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels #define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l) #define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS) +#define map_domain_gfn GPT_RENAME(map_domain_gfn, GUEST_PAGING_LEVELS) + +extern void *map_domain_gfn(struct p2m_domain *p2m, + gfn_t gfn, + mfn_t *mfn, + p2m_type_t *p2mt, + p2m_query_t *q, + uint32_t *rc); extern uint32_t guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, unsigned long va, diff --git a/xen/include/asm-x86/hvm/nestedhvm.h b/xen/include/asm-x86/hvm/nestedhvm.h index 91fde0b..649c511 100644 --- a/xen/include/asm-x86/hvm/nestedhvm.h +++ b/xen/include/asm-x86/hvm/nestedhvm.h @@ -52,6 +52,7 @@ bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v); #define NESTEDHVM_PAGEFAULT_L1_ERROR 2 #define NESTEDHVM_PAGEFAULT_L0_ERROR 3 #define NESTEDHVM_PAGEFAULT_MMIO 4 +#define NESTEDHVM_PAGEFAULT_RETRY 5 int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, bool_t access_r, bool_t access_w, bool_t access_x); diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index ef2c9c9..9a728b6 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -194,6 +194,7 @@ extern u32 vmx_secondary_exec_control; extern bool_t cpu_has_vmx_ins_outs_instr_info; +#define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001 #define VMX_EPT_WALK_LENGTH_4_SUPPORTED 0x00000040 #define VMX_EPT_MEMORY_TYPE_UC 0x00000100 #define VMX_EPT_MEMORY_TYPE_WB 0x00004000 diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h index 422f006..8eb377b 100644 --- a/xen/include/asm-x86/hvm/vmx/vvmx.h +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h @@ -32,6 +32,10 @@ struct nestedvmx { unsigned long intr_info; u32 error_code; } intr; + struct { + uint32_t exit_reason; + uint32_t exit_qual; + } ept_exit; }; #define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx) @@ -109,6 +113,12 @@ void nvmx_domain_relinquish_resources(struct domain *d); int nvmx_handle_vmxon(struct cpu_user_regs *regs); int nvmx_handle_vmxoff(struct cpu_user_regs *regs); +#define EPT_TRANSLATE_SUCCEED 0 +#define EPT_TRANSLATE_VIOLATION 1 +#define EPT_TRANSLATE_ERR_PAGE 2 +#define EPT_TRANSLATE_MISCONFIG 3 +#define EPT_TRANSLATE_RETRY 4 + int nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order, @@ -192,5 +202,9 @@ u64 nvmx_get_tsc_offset(struct vcpu *v); int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, unsigned int exit_reason); +int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga, + unsigned int *page_order, uint32_t rwx_acc, + unsigned long *l1gfn, uint64_t *exit_qual, + uint32_t *exit_reason); #endif /* __ASM_X86_HVM_VVMX_H__ */ -- 1.7.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |