[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH V13 1/7] xen/arm: p2m changes for mem_access support



On Wed, Mar 11, 2015 at 5:07 PM, Stefano Stabellini
<stefano.stabellini@xxxxxxxxxxxxx> wrote:
> On Fri, 6 Mar 2015, Tamas K Lengyel wrote:
>> Add necessary changes for page table construction routines to pass
>> the default access information. We store the p2m_access_t info in a
>> Radix tree as the PTE lacks enough software programmable bits.
>>
>> Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
>> ---
>> v13: - Rename access_in_use to mem_access_enabled.
>>      - Define p2m_get_mem_access function prototype but
>>         return -ENOSYS for now.
>> v11: - Move including common/mem_event.h down the series.
>> v10: - Typo fix and drop reshuffling things that no longer need
>>       shuffling.
>> v8: - Drop lock inputs as common mem_access_check is postponed.
>>     - Resurrect the radix tree with an extra boolean access_in_use flag
>>       to indicate if the tree is empty to avoid lookups.
>> v7: - Remove radix tree init/destroy and move p2m_access_t store to 
>> page_info.
>>     - Add p2m_gpfn_lock/unlock functions.
>>     - Add bool_t lock input to p2m_lookup and apply_p2m_changes so the caller
>>       can specify if locking should be performed. This is needed in order to
>>       support mem_access_check from common.
>> v6: - Move mem_event header include to first patch that needs it.
>> v5: - #include grouping style-fix.
>> v4: - Move p2m_get_hostp2m definition here.
>> ---
>>  xen/arch/arm/p2m.c              | 52 
>> +++++++++++++++++++++++++++--------------
>>  xen/include/asm-arm/domain.h    |  1 +
>>  xen/include/asm-arm/p2m.h       | 35 ++++++++++++++++++++++++++-
>>  xen/include/asm-arm/processor.h |  2 +-
>>  4 files changed, 70 insertions(+), 20 deletions(-)
>>
>> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
>> index 8809f5a..137e5a0 100644
>> --- a/xen/arch/arm/p2m.c
>> +++ b/xen/arch/arm/p2m.c
>> @@ -305,7 +305,7 @@ static void p2m_set_permission(lpae_t *e, p2m_type_t t, 
>> p2m_access_t a)
>>  }
>>
>>  static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
>> -                               p2m_type_t t)
>> +                               p2m_type_t t, p2m_access_t a)
>>  {
>>      paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
>>      /* sh, xn and write bit will be defined in the following switches
>> @@ -335,8 +335,7 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, 
>> unsigned int mattr,
>>          break;
>>      }
>>
>> -    /* We pass p2m_access_rwx as a placeholder for now. */
>> -    p2m_set_permission(&e, t, p2m_access_rwx);
>> +    p2m_set_permission(&e, t, a);
>>
>>      ASSERT(!(pa & ~PAGE_MASK));
>>      ASSERT(!(pa & ~PADDR_MASK));
>> @@ -394,7 +393,7 @@ static int p2m_create_table(struct domain *d, lpae_t 
>> *entry,
>>           for ( i=0 ; i < LPAE_ENTRIES; i++ )
>>           {
>>               pte = mfn_to_p2m_entry(base_pfn + 
>> (i<<(level_shift-LPAE_SHIFT)),
>> -                                    MATTR_MEM, t);
>> +                                    MATTR_MEM, t, p2m->default_access);
>>
>>               /*
>>                * First and second level super pages set p2m.table = 0, but
>> @@ -414,7 +413,8 @@ static int p2m_create_table(struct domain *d, lpae_t 
>> *entry,
>>
>>      unmap_domain_page(p);
>>
>> -    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid);
>> +    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid,
>> +                           p2m->default_access);
>>
>>      p2m_write_pte(entry, pte, flush_cache);
>>
>> @@ -537,7 +537,8 @@ static int apply_one_level(struct domain *d,
>>                             paddr_t *maddr,
>>                             bool_t *flush,
>>                             int mattr,
>> -                           p2m_type_t t)
>> +                           p2m_type_t t,
>> +                           p2m_access_t a)
>>  {
>>      const paddr_t level_size = level_sizes[level];
>>      const paddr_t level_mask = level_masks[level];
>> @@ -566,7 +567,7 @@ static int apply_one_level(struct domain *d,
>>              page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
>>              if ( page )
>>              {
>> -                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t);
>> +                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
>>                  if ( level < 3 )
>>                      pte.p2m.table = 0;
>>                  p2m_write_pte(entry, pte, flush_cache);
>> @@ -601,7 +602,7 @@ static int apply_one_level(struct domain *d,
>>               (level == 3 || !p2m_table(orig_pte)) )
>>          {
>>              /* New mapping is superpage aligned, make it */
>> -            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t);
>> +            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
>>              if ( level < 3 )
>>                  pte.p2m.table = 0; /* Superpage entry */
>>
>> @@ -770,7 +771,9 @@ static int apply_p2m_changes(struct domain *d,
>>                       paddr_t end_gpaddr,
>>                       paddr_t maddr,
>>                       int mattr,
>> -                     p2m_type_t t)
>> +                     uint32_t mask,
>
> What is this for? It is not used and you always pass 0 here and in a
> later patch ~0.

It is used by the hypercall preemption mechanism. See
common/mem_access.c:105 for more info.

>
>> +                     p2m_type_t t,
>> +                     p2m_access_t a)
>>  {
>>      int rc, ret;
>>      struct p2m_domain *p2m = &d->arch.p2m;
>> @@ -863,7 +866,7 @@ static int apply_p2m_changes(struct domain *d,
>>                                    level, flush_pt, op,
>>                                    start_gpaddr, end_gpaddr,
>>                                    &addr, &maddr, &flush,
>> -                                  mattr, t);
>> +                                  mattr, t, a);
>>              if ( ret < 0 ) { rc = ret ; goto out; }
>>              count += ret;
>>              /* L3 had better have done something! We cannot descend any 
>> further */
>> @@ -921,7 +924,7 @@ out:
>>           */
>>          apply_p2m_changes(d, REMOVE,
>>                            start_gpaddr, addr + level_sizes[level], 
>> orig_maddr,
>> -                          mattr, p2m_invalid);
>> +                          mattr, 0, p2m_invalid, 
>> d->arch.p2m.default_access);
>>      }
>>
>>      for ( level = P2M_ROOT_LEVEL; level < 4; level ++ )
>> @@ -940,7 +943,8 @@ int p2m_populate_ram(struct domain *d,
>>                       paddr_t end)
>>  {
>>      return apply_p2m_changes(d, ALLOCATE, start, end,
>> -                             0, MATTR_MEM, p2m_ram_rw);
>> +                             0, MATTR_MEM, 0, p2m_ram_rw,
>> +                             d->arch.p2m.default_access);
>>  }
>>
>>  int map_mmio_regions(struct domain *d,
>> @@ -952,7 +956,8 @@ int map_mmio_regions(struct domain *d,
>>                               pfn_to_paddr(start_gfn),
>>                               pfn_to_paddr(start_gfn + nr),
>>                               pfn_to_paddr(mfn),
>> -                             MATTR_DEV, p2m_mmio_direct);
>> +                             MATTR_DEV, 0, p2m_mmio_direct,
>> +                             d->arch.p2m.default_access);
>>  }
>>
>>  int unmap_mmio_regions(struct domain *d,
>> @@ -964,7 +969,8 @@ int unmap_mmio_regions(struct domain *d,
>>                               pfn_to_paddr(start_gfn),
>>                               pfn_to_paddr(start_gfn + nr),
>>                               pfn_to_paddr(mfn),
>> -                             MATTR_DEV, p2m_invalid);
>> +                             MATTR_DEV, 0, p2m_invalid,
>> +                             d->arch.p2m.default_access);
>>  }
>>
>>  int guest_physmap_add_entry(struct domain *d,
>> @@ -976,7 +982,8 @@ int guest_physmap_add_entry(struct domain *d,
>>      return apply_p2m_changes(d, INSERT,
>>                               pfn_to_paddr(gpfn),
>>                               pfn_to_paddr(gpfn + (1 << page_order)),
>> -                             pfn_to_paddr(mfn), MATTR_MEM, t);
>> +                             pfn_to_paddr(mfn), MATTR_MEM, 0, t,
>> +                             d->arch.p2m.default_access);
>>  }
>>
>>  void guest_physmap_remove_page(struct domain *d,
>> @@ -986,7 +993,8 @@ void guest_physmap_remove_page(struct domain *d,
>>      apply_p2m_changes(d, REMOVE,
>>                        pfn_to_paddr(gpfn),
>>                        pfn_to_paddr(gpfn + (1<<page_order)),
>> -                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
>> +                      pfn_to_paddr(mfn), MATTR_MEM, 0, p2m_invalid,
>> +                      d->arch.p2m.default_access);
>>  }
>>
>>  int p2m_alloc_table(struct domain *d)
>> @@ -1090,6 +1098,8 @@ void p2m_teardown(struct domain *d)
>>
>>      p2m_free_vmid(d);
>>
>> +    radix_tree_destroy(&p2m->mem_access_settings, NULL);
>> +
>>      spin_unlock(&p2m->lock);
>>  }
>>
>> @@ -1115,6 +1125,10 @@ int p2m_init(struct domain *d)
>>      p2m->max_mapped_gfn = 0;
>>      p2m->lowest_mapped_gfn = ULONG_MAX;
>>
>> +    p2m->default_access = p2m_access_rwx;
>> +    p2m->mem_access_enabled = false;
>> +    radix_tree_init(&p2m->mem_access_settings);
>> +
>>  err:
>>      spin_unlock(&p2m->lock);
>>
>> @@ -1129,7 +1143,8 @@ int relinquish_p2m_mapping(struct domain *d)
>>                                pfn_to_paddr(p2m->lowest_mapped_gfn),
>>                                pfn_to_paddr(p2m->max_mapped_gfn),
>>                                pfn_to_paddr(INVALID_MFN),
>> -                              MATTR_MEM, p2m_invalid);
>> +                              MATTR_MEM, 0, p2m_invalid,
>> +                              d->arch.p2m.default_access);
>>  }
>>
>>  int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t 
>> end_mfn)
>> @@ -1143,7 +1158,8 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t 
>> start_mfn, xen_pfn_t end_mfn)
>>                               pfn_to_paddr(start_mfn),
>>                               pfn_to_paddr(end_mfn),
>>                               pfn_to_paddr(INVALID_MFN),
>> -                             MATTR_MEM, p2m_invalid);
>> +                             MATTR_MEM, 0, p2m_invalid,
>> +                             d->arch.p2m.default_access);
>>  }
>>
>>  unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
>> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
>> index 9e0419e..f1a087e 100644
>> --- a/xen/include/asm-arm/domain.h
>> +++ b/xen/include/asm-arm/domain.h
>> @@ -17,6 +17,7 @@ struct hvm_domain
>>  {
>>      uint64_t              params[HVM_NR_PARAMS];
>>      struct hvm_iommu      iommu;
>> +    bool_t                introspection_enabled;
>>  }  __cacheline_aligned;
>>
>>  #ifdef CONFIG_ARM_64
>> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
>> index da36504..7583d9b 100644
>> --- a/xen/include/asm-arm/p2m.h
>> +++ b/xen/include/asm-arm/p2m.h
>> @@ -2,8 +2,9 @@
>>  #define _XEN_P2M_H
>>
>>  #include <xen/mm.h>
>> -
>> +#include <xen/radix-tree.h>
>>  #include <xen/p2m-common.h>
>> +#include <public/memory.h>
>>
>>  #define paddr_bits PADDR_BITS
>>
>> @@ -48,6 +49,18 @@ struct p2m_domain {
>>      /* If true, and an access fault comes in and there is no mem_event 
>> listener,
>>       * pause domain. Otherwise, remove access restrictions. */
>>      bool_t access_required;
>> +
>> +    /* Defines if mem_access is in use for the domain. */
>> +    bool_t mem_access_enabled;
>> +
>> +    /* Default P2M access type for each page in the the domain: new pages,
>> +     * swapped in pages, cleared pages, and pages that are ambiguously
>> +     * retyped get this access type. See definition of p2m_access_t. */
>> +    p2m_access_t default_access;
>> +
>> +    /* Radix tree to store the p2m_access_t settings as the pte's don't have
>> +     * enough available bits to store this information. */
>> +    struct radix_tree_root mem_access_settings;
>>  };
>>
>>  /* List of possible type for each page in the p2m entry.
>> @@ -217,6 +230,26 @@ static inline int get_page_and_type(struct page_info 
>> *page,
>>  /* get host p2m table */
>>  #define p2m_get_hostp2m(d) (&(d)->arch.p2m)
>>
>> +/* mem_event and mem_access are supported on any ARM guest */
>> +static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
>> +{
>> +    return 1;
>> +}
>> +
>> +static inline bool_t p2m_mem_event_sanity_check(struct domain *d)
>> +{
>> +    return 1;
>> +}
>> +
>> +/* Get access type for a pfn
>> + * If pfn == -1ul, gets the default access type */
>> +static inline
>> +int p2m_get_mem_access(struct domain *d, unsigned long pfn,
>> +                       xenmem_access_t *access)
>> +{
>> +    return -ENOSYS;
>> +}
>> +
>>  #endif /* _XEN_P2M_H */
>>
>>  /*
>> diff --git a/xen/include/asm-arm/processor.h 
>> b/xen/include/asm-arm/processor.h
>> index fcd26fb..cf7ab7c 100644
>> --- a/xen/include/asm-arm/processor.h
>> +++ b/xen/include/asm-arm/processor.h
>> @@ -441,7 +441,7 @@ union hsr {
>>      struct hsr_dabt {
>>          unsigned long dfsc:6;  /* Data Fault Status Code */
>>          unsigned long write:1; /* Write / not Read */
>> -        unsigned long s1ptw:1; /* */
>> +        unsigned long s1ptw:1; /* Stage 2 fault during stage 1 translation 
>> */
>>          unsigned long cache:1; /* Cache Maintenance */
>>          unsigned long eat:1;   /* External Abort Type */
>>  #ifdef CONFIG_ARM_32
>> --
>> 2.1.4
>>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.