[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 9/9] mm: mark pages that have their permissions controlled by a domain


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
  • Date: Wed, 2 Jul 2014 16:34:01 +0300
  • Cc: Mihai Dontu <mdontu@xxxxxxxxxxxxxxx>, tim@xxxxxxx, Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
  • Comment: DomainKeys? See http://domainkeys.sourceforge.net/
  • Delivery-date: Wed, 02 Jul 2014 13:34:30 +0000
  • Domainkey-signature: a=rsa-sha1; q=dns; c=nofws; s=default; d=bitdefender.com; b=0g3/Xl1NIXiIKNV8jhedE1BGYSOqJHQJXbEvD8qhwqboYzIE89Jl9q2DZrQn9+TuWFdKlYSmSak8ljAt4anBK7E1Sxb7wYgvr+hCj/aqJF1CpJF1/aYQCD7TuZdWTyhZiGox+qGUgwrgRC+aKvLxPQKKE0ExDV42f3tiAvmCXUAj+Ju3x1sU4b21gqR+kxcblMC6+7f5FmweU2n9Yp0UaL1MKdkf9shNkj+ZYpH+zYUicL/WicwVN34PlJXJsZ7VQmqwoHoNh9XCVXcGpmDFTcjDToHYyi3/IZKyM7kbsbRFWRfZyP2z52hWb7lKcpWYF2SzeuzuJVR53jhOVcNl4Q==; h=Received:Received:Received:Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer:In-Reply-To:References:X-BitDefender-Scanner:X-BitDefender-Spam:X-BitDefender-SpamStamp:X-BitDefender-CF-Stamp;
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

This goes together with the mem-event API changes and marks certain
pages as being controlled from outside the HV (a user domain in our
case). This prevents Xen from resetting the permissions in certain
cases, enforcing the previously expressed intention of receiving a
memory event everytime the owning domain triggers a fault.

This enhancement makes use of an unused bit in the EPT-PTE entry (vmx)
and adjusts the definitions of get_entry() and set_entry() to carry a
variable controlling this bit. It would probably have been better to add
a new access type to p2m_access_t (which I tried), but in testing it
caused subtle failures in the application using the mem-event API (the
domains themselves seemed to work just fine though).

Signed-off-by: Mihai Dontu <mdontu@xxxxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            |    5 +--
 xen/arch/x86/hvm/svm/svm.c        |    4 +--
 xen/arch/x86/hvm/vmx/vmx.c        |    2 +-
 xen/arch/x86/mm/hap/nested_hap.c  |    2 +-
 xen/arch/x86/mm/mem_access.c      |    2 +-
 xen/arch/x86/mm/mem_sharing.c     |    4 +--
 xen/arch/x86/mm/p2m-ept.c         |   13 +++++---
 xen/arch/x86/mm/p2m-pod.c         |   12 +++----
 xen/arch/x86/mm/p2m-pt.c          |    7 +++--
 xen/arch/x86/mm/p2m.c             |   63 +++++++++++++++++++------------------
 xen/include/asm-x86/hvm/vmx/vmx.h |    5 ++-
 xen/include/asm-x86/p2m.h         |   21 +++++++------
 12 files changed, 77 insertions(+), 63 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index df696d1..952aa06 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2628,6 +2628,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
     int rc, fall_through = 0, paged = 0;
     int sharing_enomem = 0;
     mem_event_request_t *req_ptr = NULL;
+    bool_t mem_ev = 0;
 
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
@@ -2683,7 +2684,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
 
     p2m = p2m_get_hostp2m(v->domain);
     mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 
-                              P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), NULL);
+                              P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), NULL, 
&mem_ev);
 
     /* Check access permissions first, then handle faults */
     if ( mfn_x(mfn) != INVALID_MFN )
@@ -2775,7 +2776,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
          * a large page, we do not change other pages type within that large
          * page.
          */
-        if ( access_w )
+        if ( access_w && !mem_ev )
         {
             paging_mark_dirty(v->domain, mfn_x(mfn));
             p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 76616ac..55b2000 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1420,7 +1420,7 @@ static void svm_do_nested_pgfault(struct vcpu *v,
         p2m = p2m_get_p2m(v);
         _d.gpa = gpa;
         _d.qualification = 0;
-        mfn = __get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL, 0);
+        mfn = __get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL, 0, 
NULL);
         _d.mfn = mfn_x(mfn);
         
         __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
@@ -1441,7 +1441,7 @@ static void svm_do_nested_pgfault(struct vcpu *v,
     if ( p2m == NULL )
         p2m = p2m_get_p2m(v);
     /* Everything else is an error. */
-    mfn = __get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL, 0);
+    mfn = __get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL, 0, NULL);
     gdprintk(XENLOG_ERR,
          "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
          gpa, mfn_x(mfn), p2mt);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 4976215..3ed5ab3 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2580,7 +2580,7 @@ static int vmx_stop_reexecute_instruction(struct vcpu *v)
     for ( i = v->rexec_level - 1; i >= 0; i-- )
     {
         if ( 0 != p2m_set_mem_access(v->domain, v->rexec_context[i].gpa >> 
PAGE_SHIFT,
-                                     1, 0, 0xff, 
v->rexec_context[i].old_access) )
+                                     1, 0, 0xff, 
v->rexec_context[i].old_access, 0) )
         {
             ret = -1;
             return ret;
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 137a87c..9b1b973 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -169,7 +169,7 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t 
L1_gpa, paddr_t *L0_gpa,
 
     /* walk L0 P2M table */
     mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, p2mt, p2ma,
-                              0, page_order);
+                              0, page_order, NULL);
 
     rc = NESTEDHVM_PAGEFAULT_DIRECT_MMIO;
     if ( *p2mt == p2m_mmio_direct )
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index e8465a5..4f4eaed 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -74,7 +74,7 @@ int mem_access_memop(unsigned long cmd,
             break;
 
         rc = p2m_set_mem_access(d, mao.pfn, mao.nr, start_iter,
-                                MEMOP_CMD_MASK, mao.access);
+                                MEMOP_CMD_MASK, mao.access, 1);
         if ( rc > 0 )
         {
             ASSERT(!(rc & MEMOP_CMD_MASK));
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 7293f31..a0757ec 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1274,7 +1274,7 @@ int relinquish_shared_pages(struct domain *d)
 
         if ( atomic_read(&d->shr_pages) == 0 )
             break;
-        mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);
+        mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL);
         if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
         {
             /* Does not fail with ENOMEM given the DESTROY flag */
@@ -1284,7 +1284,7 @@ int relinquish_shared_pages(struct domain *d)
              * unshare.  Must succeed: we just read the old entry and
              * we hold the p2m lock. */
             set_rc = p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K,
-                                    p2m_invalid, p2m_access_rwx);
+                                    p2m_invalid, p2m_access_rwx, 0);
             ASSERT(set_rc == 0);
             count += 0x10;
         }
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 15c6e83..03c90e7 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -244,7 +244,7 @@ static int ept_split_super_page(struct p2m_domain *p2m, 
ept_entry_t *ept_entry,
         epte->mfn += i * trunk;
         epte->snp = (iommu_enabled && iommu_snoop);
         ASSERT(!epte->rsvd1);
-        ASSERT(!epte->avail3);
+        /* ASSERT(!epte->avail3); */
 
         ept_p2m_type_to_flags(epte, epte->sa_p2mt, epte->access);
 
@@ -608,7 +608,7 @@ bool_t ept_handle_misconfig(uint64_t gpa)
  */
 static int
 ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
-              unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma)
+              unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma, bool_t 
mem_ev)
 {
     ept_entry_t *table, *ept_entry = NULL;
     unsigned long gfn_remainder = gfn;
@@ -743,6 +743,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
mfn_t mfn,
         new_entry.sa_p2mt = p2mt;
         new_entry.access = p2ma;
         new_entry.snp = (iommu_enabled && iommu_snoop);
+        new_entry.mem_ev = mem_ev;
 
         /* the caller should take care of the previous page */
         new_entry.mfn = mfn_x(mfn);
@@ -799,8 +800,8 @@ out:
 
 /* Read ept p2m entries */
 static mfn_t ept_get_entry(struct p2m_domain *p2m,
-                           unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
-                           p2m_query_t q, unsigned int *page_order)
+                           unsigned long gfn, p2m_type_t *t, p2m_access_t *a,
+                           p2m_query_t q, unsigned int *page_order, bool_t 
*mem_ev)
 {
     ept_entry_t *table = 
map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
     unsigned long gfn_remainder = gfn;
@@ -814,6 +815,8 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
 
     *t = p2m_mmio_dm;
     *a = p2m_access_n;
+    if ( mem_ev )
+        *mem_ev = 0;
 
     /* This pfn is higher than the highest the p2m map currently holds */
     if ( gfn > p2m->max_mapped_pfn )
@@ -879,6 +882,8 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
         else
             *t = ept_entry->sa_p2mt;
         *a = ept_entry->access;
+        if ( mem_ev )
+            *mem_ev = ept_entry->mem_ev;
 
         mfn = _mfn(ept_entry->mfn);
         if ( i )
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index bd4c7c8..289a377 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -536,7 +536,7 @@ recount:
         p2m_access_t a;
         p2m_type_t t;
 
-        (void)p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL);
+        (void)p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
 
         if ( t == p2m_populate_on_demand )
             pod++;
@@ -587,7 +587,7 @@ recount:
         p2m_type_t t;
         p2m_access_t a;
 
-        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL);
+        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
         if ( t == p2m_populate_on_demand )
         {
             p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
@@ -676,7 +676,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, 
unsigned long gfn)
     for ( i=0; i<SUPERPAGE_PAGES; i++ )
     {
         p2m_access_t a; 
-        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL);
+        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL, NULL);
 
         if ( i == 0 )
         {
@@ -808,7 +808,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long 
*gfns, int count)
     for ( i=0; i<count; i++ )
     {
         p2m_access_t a;
-        mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, 0, NULL);
+        mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, 0, NULL, NULL);
         /* If this is ram, and not a pagetable or from the xen heap, and 
probably not mapped
            elsewhere, map it; otherwise, skip. */
         if ( p2m_is_ram(types[i])
@@ -947,7 +947,7 @@ p2m_pod_emergency_sweep(struct p2m_domain *p2m)
     for ( i=p2m->pod.reclaim_single; i > 0 ; i-- )
     {
         p2m_access_t a;
-        (void)p2m->get_entry(p2m, i, &t, &a, 0, NULL);
+        (void)p2m->get_entry(p2m, i, &t, &a, 0, NULL, NULL);
         if ( p2m_is_ram(t) )
         {
             gfns[j] = i;
@@ -1135,7 +1135,7 @@ guest_physmap_mark_populate_on_demand(struct domain *d, 
unsigned long gfn,
     for ( i = 0; i < (1UL << order); i++ )
     {
         p2m_access_t a;
-        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
+        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL, NULL);
         if ( p2m_is_ram(ot) )
         {
             P2M_DEBUG("gfn_to_mfn returned type %d!\n", ot);
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 085ab6f..4242b3b 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -481,7 +481,8 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa)
 /* Returns: 0 for success, -errno for failure */
 static int
 p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
-                 unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
+                 unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma,
+                 bool_t mem_ev)
 {
     /* XXX -- this might be able to be faster iff current->domain == d */
     void *table;
@@ -688,7 +689,7 @@ static inline p2m_type_t recalc_type(bool_t recalc, 
p2m_type_t t,
 static mfn_t
 p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
                  p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
-                 unsigned int *page_order)
+                 unsigned int *page_order, bool_t* mem_ev)
 {
     mfn_t mfn;
     paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
@@ -707,6 +708,8 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
     *t = p2m_mmio_dm;
     /* Not implemented except with EPT */
     *a = p2m_access_rwx; 
+    if ( mem_ev )
+        *mem_ev = 0;
 
     if ( gfn > p2m->max_mapped_pfn )
         /* This pfn is higher than the highest the p2m map currently holds */
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index ff67b09..41fd120 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -240,7 +240,7 @@ void p2m_memory_type_changed(struct domain *d)
 
 mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
                     p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
-                    unsigned int *page_order, bool_t locked)
+                    unsigned int *page_order, bool_t locked, bool_t *mem_ev)
 {
     mfn_t mfn;
 
@@ -260,7 +260,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
unsigned long gfn,
         /* Grab the lock here, don't release until put_gfn */
         gfn_lock(p2m, gfn, 0);
 
-    mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
+    mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, mem_ev);
 
     if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) )
     {
@@ -269,7 +269,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
unsigned long gfn,
          * sleeping. */
         if ( mem_sharing_unshare_page(p2m->domain, gfn, 0) < 0 )
             (void)mem_sharing_notify_enomem(p2m->domain, gfn, 0);
-        mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
+        mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, mem_ev);
     }
 
     if (unlikely((p2m_is_broken(*t))))
@@ -312,7 +312,7 @@ struct page_info *get_page_from_gfn_p2m(
     {
         /* Fast path: look up and get out */
         p2m_read_lock(p2m);
-        mfn = __get_gfn_type_access(p2m, gfn, t, a, 0, NULL, 0);
+        mfn = __get_gfn_type_access(p2m, gfn, t, a, 0, NULL, 0, NULL);
         if ( p2m_is_any_ram(*t) && mfn_valid(mfn)
              && !((q & P2M_UNSHARE) && p2m_is_shared(*t)) )
         {
@@ -340,7 +340,7 @@ struct page_info *get_page_from_gfn_p2m(
     }
 
     /* Slow path: take the write lock and do fixups */
-    mfn = get_gfn_type_access(p2m, gfn, t, a, q, NULL);
+    mfn = get_gfn_type_access(p2m, gfn, t, a, q, NULL, NULL);
     if ( p2m_is_ram(*t) && mfn_valid(mfn) )
     {
         page = mfn_to_page(mfn);
@@ -373,7 +373,7 @@ int p2m_set_entry(struct p2m_domain *p2m, unsigned long 
gfn, mfn_t mfn,
         else
             order = 0;
 
-        set_rc = p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma);
+        set_rc = p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma, 0);
         if ( set_rc )
             rc = set_rc;
 
@@ -537,7 +537,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, 
unsigned long mfn,
     {
         for ( i = 0; i < (1UL << page_order); i++ )
         {
-            mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL);
+            mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL, NULL);
             if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
                 set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
@@ -600,7 +600,7 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
     /* First, remove m->p mappings for existing p->m mappings */
     for ( i = 0; i < (1UL << page_order); i++ )
     {
-        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
+        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL, NULL);
         if ( p2m_is_shared(ot) )
         {
             /* Do an unshare to cleanly take care of all corner 
@@ -624,7 +624,7 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
                 (void)mem_sharing_notify_enomem(p2m->domain, gfn + i, 0);
                 return rc;
             }
-            omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
+            omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL, NULL);
             ASSERT(!p2m_is_shared(ot));
         }
         if ( p2m_is_grant(ot) || p2m_is_foreign(ot) )
@@ -672,7 +672,7 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
              * address */
             P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
                       mfn + i, ogfn, gfn + i);
-            omfn = p2m->get_entry(p2m, ogfn, &ot, &a, 0, NULL);
+            omfn = p2m->get_entry(p2m, ogfn, &ot, &a, 0, NULL, NULL);
             if ( p2m_is_ram(ot) && !p2m_is_paged(ot) )
             {
                 ASSERT(mfn_valid(omfn));
@@ -739,7 +739,7 @@ int p2m_change_type_one(struct domain *d, unsigned long gfn,
 
     gfn_lock(p2m, gfn, 0);
 
-    mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL);
+    mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL, NULL);
     rc = likely(pt == ot)
          ? p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt,
                          p2m->default_access)
@@ -823,7 +823,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn, mfn_t mfn,
         return -EIO;
 
     gfn_lock(p2m, gfn, 0);
-    omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL);
+    omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL, NULL);
     if ( p2m_is_grant(ot) || p2m_is_foreign(ot) )
     {
         p2m_unlock(p2m);
@@ -872,7 +872,7 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long 
gfn)
         return -EIO;
 
     gfn_lock(p2m, gfn, 0);
-    mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);
+    mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL);
 
     /* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
     if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) )
@@ -904,7 +904,7 @@ int set_shared_p2m_entry(struct domain *d, unsigned long 
gfn, mfn_t mfn)
         return -EIO;
 
     gfn_lock(p2m, gfn, 0);
-    omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL);
+    omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL, NULL);
     /* At the moment we only allow p2m change if gfn has already been made
      * sharable first */
     ASSERT(p2m_is_shared(ot));
@@ -956,7 +956,7 @@ int p2m_mem_paging_nominate(struct domain *d, unsigned long 
gfn)
 
     gfn_lock(p2m, gfn, 0);
 
-    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
 
     /* Check if mfn is valid */
     if ( !mfn_valid(mfn) )
@@ -1018,7 +1018,7 @@ int p2m_mem_paging_evict(struct domain *d, unsigned long 
gfn)
     gfn_lock(p2m, gfn, 0);
 
     /* Get mfn */
-    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
     if ( unlikely(!mfn_valid(mfn)) )
         goto out;
 
@@ -1144,7 +1144,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned 
long gfn)
 
     /* Fix p2m mapping */
     gfn_lock(p2m, gfn, 0);
-    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
     /* Allow only nominated or evicted pages to enter page-in path */
     if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
     {
@@ -1206,7 +1206,7 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long 
gfn, uint64_t buffer)
 
     gfn_lock(p2m, gfn, 0);
 
-    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
 
     ret = -ENOENT;
     /* Allow missing pages */
@@ -1297,7 +1297,7 @@ void p2m_mem_paging_resume(struct domain *d)
         if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
         {
             gfn_lock(p2m, rsp.gfn, 0);
-            mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, 0, NULL);
+            mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, 0, NULL, NULL);
             /* Allow only pages which were prepared properly, or pages which
              * were nominated but not evicted */
             if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) )
@@ -1553,7 +1553,7 @@ release_and_exit:
 
     /* Apply the changes inside the EPT. */
     if ( 0 != p2m_set_mem_access(v->domain, gpa >> PAGE_SHIFT,
-                                 1, 0, 0xff, new_access) )
+                                 1, 0, 0xff, new_access, 0) )
         return -1;
 
     /* Step 6: Reconfigure the VMCS, so it suits our needs. We want a
@@ -1592,11 +1592,11 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t 
gla_valid, unsigned long gla,
      * These calls to p2m->set_entry() must succeed: we have the gfn
      * locked and just did a successful get_entry(). */
     gfn_lock(p2m, gfn, 0);
-    mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
 
     if ( access_w && p2ma == p2m_access_rx2rw ) 
     {
-        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
+        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, 
0);
         ASSERT(rc == 0);
         gfn_unlock(p2m, gfn, 0);
         return 1;
@@ -1605,7 +1605,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t 
gla_valid, unsigned long gla,
     {
         ASSERT(access_w || access_r || access_x);
         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                            p2mt, p2m_access_rwx);
+                            p2mt, p2m_access_rwx, 0);
         ASSERT(rc == 0);
     }
     gfn_unlock(p2m, gfn, 0);
@@ -1625,14 +1625,14 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t 
gla_valid, unsigned long gla,
         else
         {
             gfn_lock(p2m, gfn, 0);
-            mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL);
+            mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
             if ( p2ma != p2m_access_n2rwx )
             {
                 /* A listener is not required, so clear the access
                  * restrictions.  This set must succeed: we have the
                  * gfn locked and just did a successful get_entry(). */
                 rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                                    p2mt, p2m_access_rwx);
+                                    p2mt, p2m_access_rwx, 0);
                 ASSERT(rc == 0);
             }
             gfn_unlock(p2m, gfn, 0);
@@ -1797,7 +1797,8 @@ void p2m_mem_access_resume(struct domain *d)
 /* Set access type for a region of pfns.
  * If start_pfn == -1ul, sets the default access type */
 long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
-                        uint32_t start, uint32_t mask, xenmem_access_t access)
+                        uint32_t start, uint32_t mask, xenmem_access_t access,
+                        bool_t mem_ev)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_access_t a, _a;
@@ -1842,8 +1843,8 @@ long p2m_set_mem_access(struct domain *d, unsigned long 
pfn, uint32_t nr,
     p2m_lock(p2m);
     for ( pfn += start; nr > start; ++pfn )
     {
-        mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL);
-        rc = p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a);
+        mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL, NULL);
+        rc = p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a, mem_ev);
         if ( rc )
             break;
 
@@ -1891,12 +1892,12 @@ int p2m_get_mem_access(struct domain *d, unsigned long 
pfn,
     }
 
     gfn_lock(p2m, gfn, 0);
-    mfn = p2m->get_entry(p2m, pfn, &t, &a, 0, NULL);
+    mfn = p2m->get_entry(p2m, pfn, &t, &a, 0, NULL, NULL);
     gfn_unlock(p2m, gfn, 0);
 
     if ( mfn_x(mfn) == INVALID_MFN )
         return -ESRCH;
-    
+
     if ( (unsigned) a >= ARRAY_SIZE(memaccess) )
         return -ERANGE;
 
@@ -2130,7 +2131,7 @@ void audit_p2m(struct domain *d,
             continue;
         }
 
-        p2mfn = get_gfn_type_access(p2m, gfn, &type, &p2ma, 0, NULL);
+        p2mfn = get_gfn_type_access(p2m, gfn, &type, &p2ma, 0, NULL, NULL);
         if ( mfn_x(p2mfn) != mfn )
         {
             mpbad++;
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h 
b/xen/include/asm-x86/hvm/vmx/vmx.h
index c8bb548..20270e8 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -46,7 +46,10 @@ typedef union {
         access      :   4,  /* bits 61:58 - p2m_access_t */
         tm          :   1,  /* bit 62 - VT-d transient-mapping hint in
                                shared EPT/VT-d usage */
-        avail3      :   1;  /* bit 63 - Software available 3 */
+        mem_ev      :   1;  /* bit 63 - Used to determine if the permissions
+                               on this page have been set via the mem-events
+                               API. If yes, then we should not ever reset it
+                               when handling a page fault */
     };
     u64 epte;
 } ept_entry_t;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 0ddbadb..86614d3 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -239,13 +239,13 @@ struct p2m_domain {
                                        unsigned long gfn,
                                        mfn_t mfn, unsigned int page_order,
                                        p2m_type_t p2mt,
-                                       p2m_access_t p2ma);
+                                       p2m_access_t p2ma, bool_t mem_ev);
     mfn_t              (*get_entry   )(struct p2m_domain *p2m,
                                        unsigned long gfn,
                                        p2m_type_t *p2mt,
                                        p2m_access_t *p2ma,
                                        p2m_query_t q,
-                                       unsigned int *page_order);
+                                       unsigned int *page_order, bool_t 
*mem_ev);
     void               (*change_entry_type_global)(struct p2m_domain *p2m,
                                                    p2m_type_t ot,
                                                    p2m_type_t nt);
@@ -328,7 +328,7 @@ struct p2m_domain *p2m_get_p2m(struct vcpu *v);
 
 mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
                     p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
-                    unsigned int *page_order, bool_t locked);
+                    unsigned int *page_order, bool_t locked, bool_t *mem_ev);
 
 /* Read a particular P2M table, mapping pages as we go.  Most callers
  * should _not_ call this directly; use the other get_gfn* functions
@@ -337,8 +337,8 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
unsigned long gfn,
  * If the lookup succeeds, the return value is != INVALID_MFN and 
  * *page_order is filled in with the order of the superpage (if any) that
  * the entry was found in.  */
-#define get_gfn_type_access(p, g, t, a, q, o)   \
-        __get_gfn_type_access((p), (g), (t), (a), (q), (o), 1)
+#define get_gfn_type_access(p, g, t, a, q, o, m)   \
+        __get_gfn_type_access((p), (g), (t), (a), (q), (o), 1, m)
 
 /* General conversion function from gfn to mfn */
 static inline mfn_t get_gfn_type(struct domain *d,
@@ -346,7 +346,7 @@ static inline mfn_t get_gfn_type(struct domain *d,
                                     p2m_query_t q)
 {
     p2m_access_t a;
-    return get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, q, NULL);
+    return get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, q, NULL, NULL);
 }
 
 /* Syntactic sugar: most callers will use one of these. 
@@ -382,7 +382,7 @@ static inline mfn_t get_gfn_query_unlocked(struct domain *d,
                                            p2m_type_t *t)
 {
     p2m_access_t a;
-    return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0);
+    return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0, 
NULL);
 }
 
 /* Atomically look up a GFN and take a reference count on the backing page.
@@ -464,9 +464,9 @@ do {                                                    \
 
     /* Now do the gets */
     *first_mfn  = get_gfn_type_access(p2m_get_hostp2m(rval->first_domain), 
-                                      rval->first_gfn, first_t, first_a, q, 
NULL);
+                                      rval->first_gfn, first_t, first_a, q, 
NULL, NULL);
     *second_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->second_domain), 
-                                      rval->second_gfn, second_t, second_a, q, 
NULL);
+                                      rval->second_gfn, second_t, second_a, q, 
NULL, NULL);
 }
 
 static inline void put_two_gfns(struct two_gfns *arg)
@@ -606,7 +606,8 @@ void p2m_mem_access_resume(struct domain *d);
 /* Set access type for a region of pfns.
  * If start_pfn == -1ul, sets the default access type */
 long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
-                        uint32_t start, uint32_t mask, xenmem_access_t access);
+                        uint32_t start, uint32_t mask, xenmem_access_t access,
+                        bool_t mem_ev);
 
 /* Get access type for a pfn
  * If pfn == -1ul, gets the default access type */
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.