[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3 of 9] xenpaging: reduce number of qemu cache flushes


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Mon, 20 Feb 2012 21:48:05 +0100
  • Delivery-date: Mon, 20 Feb 2012 20:48:44 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1329769124 -3600
# Node ID 41d398992bc05ec5c3d9c7e10f5d5156d8794725
# Parent  9c678c4e4658f17325e4549cb152d610f85efad4
xenpaging: reduce number of qemu cache flushes

Currently the command to flush the qemu cache is called alot if there
are no more pages to evict. This causes churn in the logfiles, and qemu
can not release more pages anyway since the last command.

Fix this by remembering the current number of paged-out gfns, if this
number did not change since the last flush command then sending another
new flush command will not free any more gfns.

Remove return code from xenpaging_mem_paging_flush_ioemu_cache() since
errors do not matter, and will be handled elsewhere. Also failure to
send the flush command is not fatal.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r 9c678c4e4658 -r 41d398992bc0 tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -61,18 +61,15 @@ static void close_handler(int sig)
     unlink_pagefile();
 }
 
-static int xenpaging_mem_paging_flush_ioemu_cache(struct xenpaging *paging)
+static void xenpaging_mem_paging_flush_ioemu_cache(struct xenpaging *paging)
 {
     struct xs_handle *xsh = paging->xs_handle;
     domid_t domain_id = paging->mem_event.domain_id;
     char path[80];
-    bool rc;
 
     sprintf(path, "/local/domain/0/device-model/%u/command", domain_id);
 
-    rc = xs_write(xsh, XBT_NULL, path, "flush-cache", strlen("flush-cache")); 
-
-    return rc == true ? 0 : -1;
+    xs_write(xsh, XBT_NULL, path, "flush-cache", strlen("flush-cache")); 
 }
 
 static int xenpaging_wait_for_event_or_timeout(struct xenpaging *paging)
@@ -728,7 +725,7 @@ static int evict_victim(struct xenpaging
 {
     xc_interface *xch = paging->xc_handle;
     unsigned long gfn;
-    int j = 0;
+    static int num_paged_out;
     int ret;
 
     do
@@ -736,6 +733,17 @@ static int evict_victim(struct xenpaging
         gfn = policy_choose_victim(paging);
         if ( gfn == INVALID_MFN )
         {
+            /* If the number did not change after last flush command then
+             * the command did not reach qemu yet, or qemu still processes
+             * the command, or qemu has nothing to release.
+             * Right now there is no need to issue the command again.
+             */
+            if ( num_paged_out != paging->num_paged_out )
+            {
+                DPRINTF("Flushing qemu cache\n");
+                xenpaging_mem_paging_flush_ioemu_cache(paging);
+                num_paged_out = paging->num_paged_out;
+            }
             ret = -ENOSPC;
             goto out;
         }
@@ -748,12 +756,6 @@ static int evict_victim(struct xenpaging
         ret = xc_mem_paging_nominate(xch, paging->mem_event.domain_id, gfn);
         if ( ret == 0 )
             ret = xenpaging_evict_page(paging, gfn, fd, slot);
-        else
-        {
-            if ( j++ % 1000 == 0 )
-                if ( xenpaging_mem_paging_flush_ioemu_cache(paging) )
-                    PERROR("Error flushing ioemu cache");
-        }
     }
     while ( ret );
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.