[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] xen-mapcache: pass the right size argument to test_bits



From: Hanweidong <hanweidong@xxxxxxxxxx>

Compute the correct size for test_bits().
qemu_get_ram_ptr() and qemu_safe_ram_ptr() will call xen_map_cache()
with size is 0 if the requested address is in the RAM.  Then
xen_map_cache() will pass the size 0 to test_bits() for checking if the
corresponding pfn was mapped in cache. But test_bits() will always
return 1 when size is 0 without any bit testing. Actually, for this
case, test_bits should check one bit. So this patch introduced a
__test_bit_size which is greater than 0 and a multiple of XC_PAGE_SIZE,
then test_bits can work correctly with __test_bit_size
>> XC_PAGE_SHIFT as its size.

Signed-off-by: Zhenguo Wang <wangzhenguo@xxxxxxxxxx>
Signed-off-by: Weidong Han <hanweidong@xxxxxxxxxx>
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen-mapcache.c |   26 ++++++++++++++++++++++----
 1 files changed, 22 insertions(+), 4 deletions(-)

diff --git a/xen-mapcache.c b/xen-mapcache.c
index a80cbdb..5a626cd 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -200,6 +200,7 @@ uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
     hwaddr address_index;
     hwaddr address_offset;
     hwaddr __size = size;
+    hwaddr __test_bit_size = size;
     bool translated = false;
 
 tryagain:
@@ -208,9 +209,23 @@ tryagain:
 
     trace_xen_map_cache(phys_addr);
 
+    /* __test_bit_size is always a multiple of XC_PAGE_SIZE */
+    if (size) {
+        __test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));
+
+        if (__test_bit_size % XC_PAGE_SIZE) {
+            __test_bit_size += XC_PAGE_SIZE - (__test_bit_size % XC_PAGE_SIZE);
+        }
+    } else {
+        __test_bit_size = XC_PAGE_SIZE;
+    }
+
     if (mapcache->last_entry != NULL &&
         mapcache->last_entry->paddr_index == address_index &&
-        !lock && !__size) {
+        !lock && !__size &&
+        test_bits(address_offset >> XC_PAGE_SHIFT,
+                  __test_bit_size >> XC_PAGE_SHIFT,
+                  mapcache->last_entry->valid_mapping)) {
         trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + 
address_offset);
         return mapcache->last_entry->vaddr_base + address_offset;
     }
@@ -229,7 +244,8 @@ tryagain:
 
     while (entry && entry->lock && entry->vaddr_base &&
             (entry->paddr_index != address_index || entry->size != __size ||
-             !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
+             !test_bits(address_offset >> XC_PAGE_SHIFT,
+                 __test_bit_size >> XC_PAGE_SHIFT,
                  entry->valid_mapping))) {
         pentry = entry;
         entry = entry->next;
@@ -241,13 +257,15 @@ tryagain:
     } else if (!entry->lock) {
         if (!entry->vaddr_base || entry->paddr_index != address_index ||
                 entry->size != __size ||
-                !test_bits(address_offset >> XC_PAGE_SHIFT, size >> 
XC_PAGE_SHIFT,
+                !test_bits(address_offset >> XC_PAGE_SHIFT,
+                    __test_bit_size >> XC_PAGE_SHIFT,
                     entry->valid_mapping)) {
             xen_remap_bucket(entry, __size, address_index);
         }
     }
 
-    if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
+    if(!test_bits(address_offset >> XC_PAGE_SHIFT,
+                __test_bit_size >> XC_PAGE_SHIFT,
                 entry->valid_mapping)) {
         mapcache->last_entry = NULL;
         if (!translated && mapcache->phys_offset_to_gaddr) {
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.