[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v7 3/4] x86/mem_event: Deliver gla fault EPT violation information



> From: Tamas K Lengyel [mailto:tamas.lengyel@xxxxxxxxxxxx]
> Sent: Tuesday, August 12, 2014 4:30 AM
> 
> On Intel EPT the exit qualification generated by a violation also includes a 
> bit
> (EPT_GLA_FAULT) which describes the following information:
> Set if the access causing the EPT violation is to a guest-physical address 
> that is
> the translation of a linear address. Clear if the access causing the EPT 
> violation
> is to a paging-structure entry as part of a page walk or the update of an
> accessed or dirty bit.
> 
> For more information see Table 27-7 in the Intel SDM.
> 
> This patch extends the mem_event system to deliver this extra information,
> which could be useful for determining the cause of a violation.
> 
> v7: Minor fix of setting the mem event response values twice and style
> changes of SVM bit declarations.
> v6: Fixes regarding the enum usage.
> v5: Add missing bits to the SVM side, style fixes and switching to shared
> struct+enum in mm.h.
> v4: Use new bitmaps to pass information.
> v3: Style fixes.
> v2: Split gla_fault into fault_in_gpt and fault_gla to be more compatible with
> the AMD implementation.
> 
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>

Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
>  xen/arch/x86/hvm/svm/svm.c        |  8 +++++++-
>  xen/arch/x86/hvm/vmx/vmx.c        |  4 ++++
>  xen/arch/x86/mm/p2m.c             |  5 ++++-
>  xen/include/asm-x86/hvm/svm/svm.h |  6 ++++++
>  xen/include/asm-x86/mm.h          | 11 +++++++++++
>  xen/include/public/mem_event.h    |  4 +++-
>  6 files changed, 35 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 9531248..860df89 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1394,7 +1394,7 @@ const struct hvm_function_table * __init
> start_svm(void)
>  }
> 
>  static void svm_do_nested_pgfault(struct vcpu *v,
> -    struct cpu_user_regs *regs, uint32_t pfec, paddr_t gpa)
> +    struct cpu_user_regs *regs, uint64_t pfec, paddr_t gpa)
>  {
>      int ret;
>      unsigned long gfn = gpa >> PAGE_SHIFT;
> @@ -1409,6 +1409,12 @@ static void svm_do_nested_pgfault(struct vcpu *v,
>          .insn_fetch = !!(pfec & PFEC_insn_fetch)
>      };
> 
> +    /* These bits are mutually exclusive */
> +    if ( pfec & NPT_PFEC_with_gla )
> +        npfec.npfec_kind = npfec_kind_with_gla;
> +    else if ( pfec & NPT_PFEC_in_gpt )
> +        npfec.npfec_kind = npfec_kind_in_gpt;
> +
>      ret = hvm_hap_nested_page_fault(gpa, ~0ul, npfec);
> 
>      if ( tb_init_done )
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index af0ad7c..df687a6 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2380,6 +2380,10 @@ static void ept_handle_violation(unsigned long
> qualification, paddr_t gpa)
>      {
>          __vmread(GUEST_LINEAR_ADDRESS, &gla);
>          npfec.gla_valid = 1;
> +        if( qualification & EPT_GLA_FAULT )
> +            npfec.npfec_kind = npfec_kind_with_gla;
> +        else
> +            npfec.npfec_kind = npfec_kind_in_gpt;
>      }
>      else
>          gla = ~0ull;
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 1f1f6cd..5815372 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1405,10 +1405,13 @@ bool_t p2m_mem_access_check(paddr_t gpa,
> unsigned long gla,
>          req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
>          req->gla_valid = npfec.gla_valid;
>          req->gla = gla;
> +        if ( npfec.npfec_kind == npfec_kind_with_gla )
> +            req->fault_with_gla = 1;
> +        else if ( npfec.npfec_kind == npfec_kind_in_gpt )
> +            req->fault_in_gpt = 1;
>          req->access_r = npfec.read_access;
>          req->access_w = npfec.write_access;
>          req->access_x = npfec.insn_fetch;
> -
>          req->vcpu_id = v->vcpu_id;
>      }
> 
> diff --git a/xen/include/asm-x86/hvm/svm/svm.h
> b/xen/include/asm-x86/hvm/svm/svm.h
> index 1ffe6d6..cb2db3f 100644
> --- a/xen/include/asm-x86/hvm/svm/svm.h
> +++ b/xen/include/asm-x86/hvm/svm/svm.h
> @@ -105,4 +105,10 @@ extern u32 svm_feature_flags;
>  extern void svm_host_osvw_reset(void);
>  extern void svm_host_osvw_init(void);
> 
> +/* EXITINFO1 fields on NPT faults */
> +#define _NPT_PFEC_with_gla     32
> +#define NPT_PFEC_with_gla      (1UL<<_NPT_PFEC_with_gla)
> +#define _NPT_PFEC_in_gpt       33
> +#define NPT_PFEC_in_gpt        (1UL<<_NPT_PFEC_in_gpt)
> +
>  #endif /* __ASM_X86_HVM_SVM_H__ */
> diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
> index 1889b25..7261db7 100644
> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -552,6 +552,16 @@ void audit_domains(void);
>  #endif
> 
>  /*
> + * Extra fault info types which are used to further describe
> + * the source of an access violation.
> + */
> +typedef enum {
> +    npfec_kind_unknown, /* must be first */
> +    npfec_kind_in_gpt,  /* violation in guest page table */
> +    npfec_kind_with_gla /* violation with guest linear address */
> +} npfec_kind_t;
> +
> +/*
>   * Nested page fault exception codes.
>   */
>  struct npfec {
> @@ -559,6 +569,7 @@ struct npfec {
>      unsigned int write_access:1;
>      unsigned int insn_fetch:1;
>      unsigned int gla_valid:1;
> +    unsigned int npfec_kind:2;  /* npfec_kind_t */
>  };
> 
>  int new_guest_cr3(unsigned long pfn);
> diff --git a/xen/include/public/mem_event.h
> b/xen/include/public/mem_event.h
> index 3831b41..fc12697 100644
> --- a/xen/include/public/mem_event.h
> +++ b/xen/include/public/mem_event.h
> @@ -62,7 +62,9 @@ typedef struct mem_event_st {
>      uint16_t access_w:1;
>      uint16_t access_x:1;
>      uint16_t gla_valid:1;
> -    uint16_t available:12;
> +    uint16_t fault_with_gla:1;
> +    uint16_t fault_in_gpt:1;
> +    uint16_t available:10;
> 
>      uint16_t reason;
>  } mem_event_request_t, mem_event_response_t;
> --
> 2.0.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.