|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [V2 PATCH 7/9] x86/hvm: pkeys, add pkeys support for guest_walk_tables
On 27/11/15 09:52, Huaitong Han wrote:
> This patch adds pkeys support for guest_walk_tables.
>
> Signed-off-by: Huaitong Han <huaitong.han@xxxxxxxxx>
You must CC the appropriate maintainer for this patch, which includes
the x86 MM maintainer.
> ---
> xen/arch/x86/mm/guest_walk.c | 65
> +++++++++++++++++++++++++++++++++++++++++++
> xen/include/asm-x86/hvm/hvm.h | 2 ++
> 2 files changed, 67 insertions(+)
>
> diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
> index 18d1acf..3e443b3 100644
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -31,6 +31,8 @@ asm(".file \"" __OBJECT_FILE__ "\"");
> #include <xen/sched.h>
> #include <asm/page.h>
> #include <asm/guest_pt.h>
> +#include <asm/xstate.h>
> +#include <asm/i387.h>
I can see why you need xstate.h, but I why do you need i387.h ?
>
> extern const uint32_t gw_page_flags[];
> #if GUEST_PAGING_LEVELS == CONFIG_PAGING_LEVELS
> @@ -90,6 +92,53 @@ static uint32_t set_ad_bits(void *guest_p, void *walk_p,
> int set_dirty)
> return 0;
> }
>
> +#if GUEST_PAGING_LEVELS >= 4
> +uint32_t leaf_pte_pkeys_check(struct vcpu *vcpu, uint32_t pfec,
> + uint32_t pte_access, uint32_t pte_pkeys)
This is a latent linking bug for the future when 5 levels comes along.
It will probably be best to use the same trick as gw_page_flags to
compile it once but use it multiple times.
> +{
> + bool_t pkru_ad, pkru_wd;
> + bool_t ff, wf, uf, rsvdf, pkuf;
> + unsigned int pkru = 0;
> +
> + uf = pfec & PFEC_user_mode;
> + wf = pfec & PFEC_write_access;
> + rsvdf = pfec & PFEC_reserved_bit;
> + ff = pfec & PFEC_insn_fetch;
> + pkuf = pfec & PFEC_prot_key;
> +
> + if ( !cpu_has_xsave || !pkuf || is_pv_vcpu(vcpu) )
> + return 0;
> +
> + vcpu_save_fpu(vcpu);
> + pkru = *(unsigned int*)get_xsave_addr(vcpu->arch.xsave_area,
> XSTATE_PKRU);
Style.
> + if ( unlikely(pkru) )
> + {
> + /*
> + * PKU: additional mechanism by which the paging controls
> + * access to user-mode addresses based on the value in the
> + * PKRU register. A fault is considered as a PKU violation if all
> + * of the following conditions are ture:
> + * 1.CR4_PKE=1.
> + * 2.EFER_LMA=1.
> + * 3.page is present with no reserved bit violations.
> + * 4.the access is not an instruction fetch.
> + * 5.the access is to a user page.
> + * 6.PKRU.AD=1
> + * or The access is a data write and PKRU.WD=1
> + * and either CR0.WP=1 or it is a user access.
> + */
> + pkru_ad = READ_PKRU_AD(pkru, pte_pkeys);
> + pkru_wd = READ_PKRU_AD(pkru, pte_pkeys);
> + if ( hvm_pku_enabled(vcpu) && hvm_long_mode_enabled(vcpu) &&
> + !rsvdf && !ff && (pkru_ad ||
> + (pkru_wd && wf && (hvm_wp_enabled(vcpu) || uf))))
> + return 1;
> + }
> +
> + return 0;
> +}
> +#endif
> +
> /* Walk the guest pagetables, after the manner of a hardware walker. */
> /* Because the walk is essentially random, it can cause a deadlock
> * warning in the p2m locking code. Highly unlikely this is an actual
> @@ -106,6 +155,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
> #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
> guest_l3e_t *l3p = NULL;
> guest_l4e_t *l4p;
> + uint32_t pkeys;
> #endif
> uint32_t gflags, mflags, iflags, rc = 0;
> bool_t smep = 0, smap = 0;
> @@ -190,6 +240,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
> goto out;
> /* Get the l3e and check its flags*/
> gw->l3e = l3p[guest_l3_table_offset(va)];
> + pkeys = guest_l3e_get_pkeys(gw->l3e);
> gflags = guest_l3e_get_flags(gw->l3e) ^ iflags;
> if ( !(gflags & _PAGE_PRESENT) ) {
> rc |= _PAGE_PRESENT;
> @@ -199,6 +250,9 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
>
> pse1G = (gflags & _PAGE_PSE) && guest_supports_1G_superpages(v);
>
> + if (pse1G && leaf_pte_pkeys_check(v, pfec, gflags, pkeys))
> + rc |= _PAGE_PKEY_BIT;
> +
> if ( pse1G )
> {
> /* Generate a fake l1 table entry so callers don't all
> @@ -270,6 +324,12 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
>
> pse2M = (gflags & _PAGE_PSE) && guest_supports_superpages(v);
>
> +#if GUEST_PAGING_LEVELS >= 4
> + pkeys = guest_l2e_get_pkeys(gw->l2e);
> + if (pse2M && leaf_pte_pkeys_check(v, pfec, gflags, pkeys))
> + rc |= _PAGE_PKEY_BIT;
> +#endif
> +
> if ( pse2M )
> {
> /* Special case: this guest VA is in a PSE superpage, so there's
> @@ -330,6 +390,11 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
> goto out;
> }
> rc |= ((gflags & mflags) ^ mflags);
> +#if GUEST_PAGING_LEVELS >= 4
> + pkeys = guest_l1e_get_pkeys(gw->l1e);
> + if (leaf_pte_pkeys_check(v, pfec, gflags, pkeys))
> + rc |= _PAGE_PKEY_BIT;
> +#endif
As I identified in v1, the fact that you do not modify the callers of
guest_walk_tables() proves that this change is buggy. You must modify
the callers to cope with the new error of _PAGE_PKEY_BIT.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |