[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 1/4] x86: Consolidate boolean inputs in hvm and p2m into a shared bitmap.



> From: Tamas K Lengyel [mailto:tamas.lengyel@xxxxxxxxxxxx]
> Sent: Monday, August 11, 2014 7:49 AM
> 
> This patch consolidates the boolean input parameters of
> hvm_hap_nested_page_fault and p2m_mem_access_check into a common
> bitmap and defines the bitmap members accordingly.
> 
> v6: Rename shared structure to "struct npfec" and style fixes.
> v5: Shared structure in mm.h, style fixes and moving gla fault type additions
> into next patch in the series.
> 
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>

Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
>  xen/arch/x86/hvm/hvm.c        | 49
> ++++++++++++++++++++++---------------------
>  xen/arch/x86/hvm/svm/svm.c    | 15 +++++++------
>  xen/arch/x86/hvm/vmx/vmx.c    | 15 ++++++++-----
>  xen/arch/x86/mm/p2m.c         | 18 ++++++++--------
>  xen/include/asm-x86/hvm/hvm.h |  7 ++-----
>  xen/include/asm-x86/mm.h      | 10 +++++++++
>  xen/include/asm-x86/p2m.h     |  6 +++---
>  7 files changed, 68 insertions(+), 52 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index e834406..94a6836 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2722,12 +2722,8 @@ void hvm_inject_page_fault(int errcode, unsigned
> long cr2)
>      hvm_inject_trap(&trap);
>  }
> 
> -int hvm_hap_nested_page_fault(paddr_t gpa,
> -                              bool_t gla_valid,
> -                              unsigned long gla,
> -                              bool_t access_r,
> -                              bool_t access_w,
> -                              bool_t access_x)
> +int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
> +                              struct npfec npfec)
>  {
>      unsigned long gfn = gpa >> PAGE_SHIFT;
>      p2m_type_t p2mt;
> @@ -2756,8 +2752,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>           * into l1 guest if not fixable. The algorithm is
>           * the same as for shadow paging.
>           */
> -        rv = nestedhvm_hap_nested_page_fault(v, &gpa,
> -                                             access_r, access_w,
> access_x);
> +
> +         rv = nestedhvm_hap_nested_page_fault(v, &gpa,
> +                                              npfec.read_access,
> +                                              npfec.write_access,
> +                                              npfec.insn_fetch);
>          switch (rv) {
>          case NESTEDHVM_PAGEFAULT_DONE:
>          case NESTEDHVM_PAGEFAULT_RETRY:
> @@ -2793,47 +2792,49 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
> 
>      p2m = p2m_get_hostp2m(v->domain);
>      mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma,
> -                              P2M_ALLOC | (access_w ?
> P2M_UNSHARE : 0), NULL);
> +                              P2M_ALLOC | npfec.write_access ?
> P2M_UNSHARE : 0,
> +                              NULL);
> 
>      /* Check access permissions first, then handle faults */
>      if ( mfn_x(mfn) != INVALID_MFN )
>      {
> -        int violation = 0;
> +        bool_t violation;
> +
>          /* If the access is against the permissions, then send to mem_event
> */
> -        switch (p2ma)
> +        switch (p2ma)
>          {
>          case p2m_access_n:
>          case p2m_access_n2rwx:
>          default:
> -            violation = access_r || access_w || access_x;
> +            violation = npfec.read_access || npfec.write_access ||
> npfec.insn_fetch;
>              break;
>          case p2m_access_r:
> -            violation = access_w || access_x;
> +            violation = npfec.write_access || npfec.insn_fetch;
>              break;
>          case p2m_access_w:
> -            violation = access_r || access_x;
> +            violation = npfec.read_access || npfec.insn_fetch;
>              break;
>          case p2m_access_x:
> -            violation = access_r || access_w;
> +            violation = npfec.read_access || npfec.write_access;
>              break;
>          case p2m_access_rx:
>          case p2m_access_rx2rw:
> -            violation = access_w;
> +            violation = npfec.write_access;
>              break;
>          case p2m_access_wx:
> -            violation = access_r;
> +            violation = npfec.read_access;
>              break;
>          case p2m_access_rw:
> -            violation = access_x;
> +            violation = npfec.insn_fetch;
>              break;
>          case p2m_access_rwx:
> +            violation = 0;
>              break;
>          }
> 
>          if ( violation )
>          {
> -            if ( p2m_mem_access_check(gpa, gla_valid, gla, access_r,
> -                                        access_w, access_x,
> &req_ptr) )
> +            if ( p2m_mem_access_check(gpa, gla, npfec, &req_ptr) )
>              {
>                  fall_through = 1;
>              } else {
> @@ -2849,7 +2850,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>       * to the mmio handler.
>       */
>      if ( (p2mt == p2m_mmio_dm) ||
> -         (access_w && (p2mt == p2m_ram_ro)) )
> +         (npfec.write_access && (p2mt == p2m_ram_ro)) )
>      {
>          put_gfn(p2m->domain, gfn);
> 
> @@ -2868,7 +2869,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>          paged = 1;
> 
>      /* Mem sharing: unshare the page and try again */
> -    if ( access_w && (p2mt == p2m_ram_shared) )
> +    if ( npfec.write_access && (p2mt == p2m_ram_shared) )
>      {
>          ASSERT(!p2m_is_nestedp2m(p2m));
>          sharing_enomem =
> @@ -2885,7 +2886,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>           * a large page, we do not change other pages type within that
> large
>           * page.
>           */
> -        if ( access_w )
> +        if ( npfec.write_access )
>          {
>              paging_mark_dirty(v->domain, mfn_x(mfn));
>              p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty,
> p2m_ram_rw);
> @@ -2895,7 +2896,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>      }
> 
>      /* Shouldn't happen: Maybe the guest was writing to a r/o grant
> mapping? */
> -    if ( access_w && (p2mt == p2m_grant_map_ro) )
> +    if ( npfec.write_access && (p2mt == p2m_grant_map_ro) )
>      {
>          gdprintk(XENLOG_WARNING,
>                   "trying to write to read-only grant mapping\n");
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 76616ac..1f72e19 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1394,7 +1394,7 @@ const struct hvm_function_table * __init
> start_svm(void)
>  }
> 
>  static void svm_do_nested_pgfault(struct vcpu *v,
> -    struct cpu_user_regs *regs, uint32_t npfec, paddr_t gpa)
> +    struct cpu_user_regs *regs, uint32_t pfec, paddr_t gpa)
>  {
>      int ret;
>      unsigned long gfn = gpa >> PAGE_SHIFT;
> @@ -1403,10 +1403,13 @@ static void svm_do_nested_pgfault(struct vcpu *v,
>      p2m_access_t p2ma;
>      struct p2m_domain *p2m = NULL;
> 
> -    ret = hvm_hap_nested_page_fault(gpa, 0, ~0ul,
> -                                    1, /* All NPFs count as reads */
> -                                    npfec & PFEC_write_access,
> -                                    npfec & PFEC_insn_fetch);
> +    struct npfec npfec = {
> +        .read_access = 1, /* All NPFs count as reads */
> +        .write_access = !!(pfec & PFEC_write_access),
> +        .insn_fetch = !!(pfec & PFEC_insn_fetch)
> +    };
> +
> +    ret = hvm_hap_nested_page_fault(gpa, ~0ul, npfec);
> 
>      if ( tb_init_done )
>      {
> @@ -1434,7 +1437,7 @@ static void svm_do_nested_pgfault(struct vcpu *v,
>      case -1:
>          ASSERT(nestedhvm_enabled(v->domain) &&
> nestedhvm_vcpu_in_guestmode(v));
>          /* inject #VMEXIT(NPF) into guest. */
> -        nestedsvm_vmexit_defer(v, VMEXIT_NPF, npfec, gpa);
> +        nestedsvm_vmexit_defer(v, VMEXIT_NPF, pfec, gpa);
>          return;
>      }
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 2caa04a..656ce61 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2353,6 +2353,11 @@ static void ept_handle_violation(unsigned long
> qualification, paddr_t gpa)
>      p2m_type_t p2mt;
>      int ret;
>      struct domain *d = current->domain;
> +    struct npfec npfec = {
> +        .read_access = !!(qualification & EPT_READ_VIOLATION),
> +        .write_access = !!(qualification & EPT_WRITE_VIOLATION),
> +        .insn_fetch = !!(qualification & EPT_EXEC_VIOLATION)
> +    };
> 
>      if ( tb_init_done )
>      {
> @@ -2371,14 +2376,14 @@ static void ept_handle_violation(unsigned long
> qualification, paddr_t gpa)
>      }
> 
>      if ( qualification & EPT_GLA_VALID )
> +    {
>          __vmread(GUEST_LINEAR_ADDRESS, &gla);
> +        npfec.gla_valid = 1;
> +    }
>      else
>          gla = ~0ull;
> -    ret = hvm_hap_nested_page_fault(gpa,
> -                                    !!(qualification &
> EPT_GLA_VALID), gla,
> -                                    !!(qualification &
> EPT_READ_VIOLATION),
> -                                    !!(qualification &
> EPT_WRITE_VIOLATION),
> -                                    !!(qualification &
> EPT_EXEC_VIOLATION));
> +
> +    ret = hvm_hap_nested_page_fault(gpa, gla, npfec);
>      switch ( ret )
>      {
>      case 0:         // Unhandled L1 EPT violation
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index bca9f0f..1f1f6cd 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1323,9 +1323,9 @@ void p2m_mem_paging_resume(struct domain *d)
>      }
>  }
> 
> -bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long
> gla,
> -                          bool_t access_r, bool_t access_w, bool_t
> access_x,
> -                          mem_event_request_t **req_ptr)
> +bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> +                            struct npfec npfec,
> +                            mem_event_request_t **req_ptr)
>  {
>      struct vcpu *v = current;
>      unsigned long gfn = gpa >> PAGE_SHIFT;
> @@ -1343,7 +1343,7 @@ bool_t p2m_mem_access_check(paddr_t gpa,
> bool_t gla_valid, unsigned long gla,
>      gfn_lock(p2m, gfn, 0);
>      mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL);
> 
> -    if ( access_w && p2ma == p2m_access_rx2rw )
> +    if ( npfec.write_access && p2ma == p2m_access_rx2rw )
>      {
>          rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt,
> p2m_access_rw);
>          ASSERT(rc == 0);
> @@ -1352,7 +1352,7 @@ bool_t p2m_mem_access_check(paddr_t gpa,
> bool_t gla_valid, unsigned long gla,
>      }
>      else if ( p2ma == p2m_access_n2rwx )
>      {
> -        ASSERT(access_w || access_r || access_x);
> +        ASSERT(npfec.write_access || npfec.read_access ||
> npfec.insn_fetch);
>          rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
>                              p2mt, p2m_access_rwx);
>          ASSERT(rc == 0);
> @@ -1403,11 +1403,11 @@ bool_t p2m_mem_access_check(paddr_t gpa,
> bool_t gla_valid, unsigned long gla,
>          /* Send request to mem event */
>          req->gfn = gfn;
>          req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
> -        req->gla_valid = gla_valid;
> +        req->gla_valid = npfec.gla_valid;
>          req->gla = gla;
> -        req->access_r = access_r;
> -        req->access_w = access_w;
> -        req->access_x = access_x;
> +        req->access_r = npfec.read_access;
> +        req->access_w = npfec.write_access;
> +        req->access_x = npfec.insn_fetch;
> 
>          req->vcpu_id = v->vcpu_id;
>      }
> diff --git a/xen/include/asm-x86/hvm/hvm.h
> b/xen/include/asm-x86/hvm/hvm.h
> index 0ebd478..1123857 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -455,11 +455,8 @@ static inline void hvm_invalidate_regs_fields(struct
> cpu_user_regs *regs)
>  #endif
>  }
> 
> -int hvm_hap_nested_page_fault(paddr_t gpa,
> -                              bool_t gla_valid, unsigned long gla,
> -                              bool_t access_r,
> -                              bool_t access_w,
> -                              bool_t access_x);
> +int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
> +                              struct npfec npfec);
> 
>  #define hvm_msr_tsc_aux(v)
> ({                                               \
>      struct domain *__d = (v)->domain;
> \
> diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
> index d253117..1889b25 100644
> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -551,6 +551,16 @@ void audit_domains(void);
> 
>  #endif
> 
> +/*
> + * Nested page fault exception codes.
> + */
> +struct npfec {
> +    unsigned int read_access:1;
> +    unsigned int write_access:1;
> +    unsigned int insn_fetch:1;
> +    unsigned int gla_valid:1;
> +};
> +
>  int new_guest_cr3(unsigned long pfn);
>  void make_cr3(struct vcpu *v, unsigned long mfn);
>  void update_cr3(struct vcpu *v);
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 0ddbadb..3975e32 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -597,9 +597,9 @@ void p2m_mem_paging_resume(struct domain *d);
>   * been promoted with no underlying vcpu pause. If the req_ptr has been
> populated,
>   * then the caller must put the event in the ring (once having released
> get_gfn*
>   * locks -- caller must also xfree the request. */
> -bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long
> gla,
> -                          bool_t access_r, bool_t access_w, bool_t
> access_x,
> -                          mem_event_request_t **req_ptr);
> +bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> +                            struct npfec npfec,
> +                            mem_event_request_t **req_ptr);
>  /* Resumes the running of the VCPU, restarting the last instruction */
>  void p2m_mem_access_resume(struct domain *d);
> 
> --
> 2.0.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.