[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/3] x86/ioreq server: Rename p2m_mmio_write_dm to p2m_ioreq_server



Previously p2m type p2m_mmio_write_dm was introduced for write-
protected memory pages whose write operations are supposed to be
forwarded to and emulated by an ioreq server. Yet limitations of
rangeset restrict the number of guest pages to be write-protected.

This patch replaces the p2m type p2m_mmio_write_dm with a new name:
p2m_ioreq_server, which means this p2m type can be claimed by one
ioreq server, instead of being tracked inside the rangeset of ioreq
server. Patches following up will add the related hvmop handling
code which map/unmap type p2m_ioreq_server to/from an ioreq server.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Signed-off-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hvm/hvm.c          | 10 +++++-----
 xen/arch/x86/mm/p2m-ept.c       |  2 +-
 xen/arch/x86/mm/p2m-pt.c        |  2 +-
 xen/arch/x86/mm/shadow/multi.c  |  2 +-
 xen/include/asm-x86/p2m.h       |  4 ++--
 xen/include/public/hvm/hvm_op.h |  2 +-
 6 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f700923..bec6a8a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3166,7 +3166,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
      */
     if ( (p2mt == p2m_mmio_dm) || 
          (npfec.write_access &&
-          (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
+          (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
     {
         __put_gfn(p2m, gfn);
         if ( ap2m_active )
@@ -6578,8 +6578,8 @@ static int hvmop_get_mem_type(
     get_gfn_query_unlocked(d, a.pfn, &t);
     if ( p2m_is_mmio(t) )
         a.mem_type =  HVMMEM_mmio_dm;
-    else if ( t == p2m_mmio_write_dm )
-        a.mem_type = HVMMEM_mmio_write_dm;
+    else if ( t == p2m_ioreq_server )
+        a.mem_type = HVMMEM_ioreq_server;
     else if ( p2m_is_readonly(t) )
         a.mem_type =  HVMMEM_ram_ro;
     else if ( p2m_is_ram(t) )
@@ -6614,7 +6614,7 @@ static bool_t hvm_allow_p2m_type_change(p2m_type_t old, 
p2m_type_t new)
 {
     if ( p2m_is_ram(old) ||
          (p2m_is_hole(old) && new == p2m_mmio_dm) ||
-         (old == p2m_mmio_write_dm && new == p2m_ram_rw) )
+         (old == p2m_ioreq_server && new == p2m_ram_rw) )
         return 1;
 
     return 0;
@@ -6634,7 +6634,7 @@ static int hvmop_set_mem_type(
         [HVMMEM_ram_rw]  = p2m_ram_rw,
         [HVMMEM_ram_ro]  = p2m_ram_ro,
         [HVMMEM_mmio_dm] = p2m_mmio_dm,
-        [HVMMEM_mmio_write_dm] = p2m_mmio_write_dm
+        [HVMMEM_ioreq_server] = p2m_ioreq_server
     };
 
     if ( copy_from_guest(&a, arg, 1) )
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 3cb6868..380ec25 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -171,7 +171,7 @@ static void ept_p2m_type_to_flags(struct p2m_domain *p2m, 
ept_entry_t *entry,
             entry->a = entry->d = !!cpu_has_vmx_ept_ad;
             break;
         case p2m_grant_map_ro:
-        case p2m_mmio_write_dm:
+        case p2m_ioreq_server:
             entry->r = 1;
             entry->w = entry->x = 0;
             entry->a = !!cpu_has_vmx_ept_ad;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 3d80612..eabd2e3 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -94,7 +94,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t 
mfn,
     default:
         return flags | _PAGE_NX_BIT;
     case p2m_grant_map_ro:
-    case p2m_mmio_write_dm:
+    case p2m_ioreq_server:
         return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index e5c8499..c81302a 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3225,7 +3225,7 @@ static int sh_page_fault(struct vcpu *v,
 
     /* Need to hand off device-model MMIO to the device model */
     if ( p2mt == p2m_mmio_dm
-         || (p2mt == p2m_mmio_write_dm && ft == ft_demand_write) )
+         || (p2mt == p2m_ioreq_server && ft == ft_demand_write) )
     {
         gpa = guest_walk_to_gpa(&gw);
         goto mmio;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 5392eb0..ee2ea9c 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -71,7 +71,7 @@ typedef enum {
     p2m_ram_shared = 12,          /* Shared or sharable memory */
     p2m_ram_broken = 13,          /* Broken page, access cause domain crash */
     p2m_map_foreign  = 14,        /* ram pages from foreign domain */
-    p2m_mmio_write_dm = 15,       /* Read-only; writes go to the device model 
*/
+    p2m_ioreq_server = 15,
 } p2m_type_t;
 
 /* Modifiers to the query */
@@ -112,7 +112,7 @@ typedef unsigned int p2m_query_t;
                       | p2m_to_mask(p2m_ram_ro)         \
                       | p2m_to_mask(p2m_grant_map_ro)   \
                       | p2m_to_mask(p2m_ram_shared)     \
-                      | p2m_to_mask(p2m_mmio_write_dm))
+                      | p2m_to_mask(p2m_ioreq_server))
 
 /* Write-discard types, which should discard the write operations */
 #define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro)     \
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 1606185..a1eae52 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -83,7 +83,7 @@ typedef enum {
     HVMMEM_ram_rw,             /* Normal read/write guest RAM */
     HVMMEM_ram_ro,             /* Read-only; writes are discarded */
     HVMMEM_mmio_dm,            /* Reads and write go to the device model */
-    HVMMEM_mmio_write_dm       /* Read-only; writes go to the device model */
+    HVMMEM_ioreq_server,
 } hvmmem_type_t;
 
 /* Following tools-only interfaces may change in future. */
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.