[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 09/10] x86/hvm: pkeys, add pkeys support for guest_walk_tables
This patch adds pkeys support for guest_walk_tables. Signed-off-by: Huaitong Han <huaitong.han@xxxxxxxxx> diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c index 773454d..7a7ae96 100644 --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -124,6 +124,46 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, return map; } +#if GUEST_PAGING_LEVELS >= 4 +uint32_t leaf_pte_pkeys_check(struct vcpu *vcpu, uint32_t pfec, + uint32_t pte_access, uint32_t pte_pkeys) +{ + unsigned int pkru_ad, pkru_wd; + unsigned int ff, wf, uf, rsvdf, pkuf; + + uf = pfec & PFEC_user_mode; + wf = pfec & PFEC_write_access; + rsvdf = pfec & PFEC_reserved_bit; + ff = pfec & PFEC_insn_fetch; + pkuf = pfec & PFEC_protection_key; + + if (!pkuf) + return 0; + + /* + * PKU: additional mechanism by which the paging controls + * access to user-mode addresses based on the value in the + * PKRU register. A fault is considered as a PKU violation if all + * of the following conditions are ture: + * 1.CR4_PKE=1. + * 2.EFER_LMA=1. + * 3.page is present with no reserved bit violations. + * 4.the access is not an instruction fetch. + * 5.the access is to a user page. + * 6.PKRU.AD=1 + * or The access is a data write and PKRU.WD=1 + * and either CR0.WP=1 or it is a user access. + */ + pkru_ad = READ_PKRU_AD(pte_pkeys); + pkru_wd = READ_PKRU_AD(pte_pkeys); + if ( hvm_pku_enabled(vcpu) && hvm_long_mode_enabled(vcpu) && + !rsvdf && !ff && (pkru_ad || + (pkru_wd && wf && (hvm_wp_enabled(vcpu) || uf)))) + return 1; + + return 0; +} +#endif /* Walk the guest pagetables, after the manner of a hardware walker. */ /* Because the walk is essentially random, it can cause a deadlock @@ -141,6 +181,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ guest_l3e_t *l3p = NULL; guest_l4e_t *l4p; + uint32_t pkeys; #endif uint32_t gflags, mflags, iflags, rc = 0; bool_t smep = 0, smap = 0; @@ -225,6 +266,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, goto out; /* Get the l3e and check its flags*/ gw->l3e = l3p[guest_l3_table_offset(va)]; + pkeys = guest_l3e_get_pkeys(gw->l3e); gflags = guest_l3e_get_flags(gw->l3e) ^ iflags; if ( !(gflags & _PAGE_PRESENT) ) { rc |= _PAGE_PRESENT; @@ -234,6 +276,9 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, pse1G = (gflags & _PAGE_PSE) && guest_supports_1G_superpages(v); + if (pse1G && leaf_pte_pkeys_check(v, pfec, gflags, pkeys)) + rc |= _PAGE_PK_BIT; + if ( pse1G ) { /* Generate a fake l1 table entry so callers don't all @@ -295,7 +340,6 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, gw->l2e = l2p[guest_l2_table_offset(va)]; #endif /* All levels... */ - gflags = guest_l2e_get_flags(gw->l2e) ^ iflags; if ( !(gflags & _PAGE_PRESENT) ) { rc |= _PAGE_PRESENT; @@ -305,6 +349,12 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, pse2M = (gflags & _PAGE_PSE) && guest_supports_superpages(v); +#if GUEST_PAGING_LEVELS >= 4 + pkeys = guest_l2e_get_pkeys(gw->l2e); + if (pse2M && leaf_pte_pkeys_check(v, pfec, gflags, pkeys)) + rc |= _PAGE_PK_BIT; +#endif + if ( pse2M ) { /* Special case: this guest VA is in a PSE superpage, so there's @@ -365,6 +415,11 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, goto out; } rc |= ((gflags & mflags) ^ mflags); +#if GUEST_PAGING_LEVELS >= 4 + pkeys = guest_l1e_get_pkeys(gw->l1e); + if (leaf_pte_pkeys_check(v, pfec, gflags, pkeys)) + rc |= _PAGE_PK_BIT; +#endif } #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h index f8a0d76..1c0f050 100644 --- a/xen/include/asm-x86/guest_pt.h +++ b/xen/include/asm-x86/guest_pt.h @@ -154,6 +154,17 @@ static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e) { return l4e_get_flags(gl4e); } #endif +static inline u32 guest_l1e_get_pkeys(guest_l1e_t gl1e) +{ return l1e_get_pkeys(gl1e); } +static inline u32 guest_l2e_get_pkeys(guest_l2e_t gl2e) +{ return l2e_get_pkeys(gl2e); } +static inline u32 guest_l3e_get_pkeys(guest_l3e_t gl3e) +{ return l3e_get_pkeys(gl3e); } +#if GUEST_PAGING_LEVELS >= 4 +static inline u32 guest_l4e_get_pkeys(guest_l4e_t gl4e) +{ return l4e_get_pkeys(gl4e); } +#endif + static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) { return l1e_from_pfn(gfn_x(gfn), flags); } static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 68b216c..e421a9d 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -273,6 +273,8 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode); (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMAP)) #define hvm_nx_enabled(v) \ (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX)) +#define hvm_pku_enabled(v) \ + (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PKE)) /* Can we use superpages in the HAP p2m table? */ #define hvm_hap_has_1gb(d) \ diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h index 03418ba..7bb5d2d 100644 --- a/xen/include/asm-x86/x86_64/page.h +++ b/xen/include/asm-x86/x86_64/page.h @@ -146,6 +146,8 @@ typedef l4_pgentry_t root_pgentry_t; #define get_pte_pkeys(x) ((int)(get_pte_flags(x) >> _PAGE_PKEY_BIT0) & 0xF) +#define _PAGE_PK_BIT (1U<<_PAGE_PKEY_BIT0) + /* Bit 23 of a 24-bit flag mask. This corresponds to bit 63 of a pte.*/ #define _PAGE_NX_BIT (1U<<23) -- 2.4.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |