|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2 of 3 v3] xen, pod: Zero-check recently populated pages (checklast)
# HG changeset patch
# User George Dunlap <george.dunlap@xxxxxxxxxxxxx>
# Date 1340893083 -3600
# Node ID 9de241075c7f622758f00223805b0279635ff4d9
# Parent fb0187ae8a20d0850dea0cd3e4167503411e5950
xen,pod: Zero-check recently populated pages (checklast)
When demand-populating pages due to guest accesses, check recently populated
pages to see if we can reclaim them for the cache. This should keep the PoD
cache filled when the start-of-day scrubber is going through.
The number 128 was chosen by experiment. Windows does its page
scrubbing in parallel; while a small nubmer like 4 works well for
single VMs, it breaks down as multiple vcpus are scrubbing different
pages in parallel. Increasing to 128 works well for higher numbers of
vcpus.
v2:
- Wrapped some long lines
- unsigned int for index, unsigned long for array
v3:
- Use PAGE_ORDER_2M instead of 9
- Removed inappropriate use of p2m_pod_zero_check_superpage() return value
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -926,6 +926,27 @@ p2m_pod_emergency_sweep_super(struct p2m
p2m->pod.reclaim_super = i ? i - SUPERPAGE_PAGES : 0;
}
+/* When populating a new superpage, look at recently populated superpages
+ * hoping that they've been zeroed. This will snap up zeroed pages as soon as
+ * the guest OS is done with them. */
+static void
+p2m_pod_check_last_super(struct p2m_domain *p2m, unsigned long gfn_aligned)
+{
+ unsigned long check_gfn;
+
+ ASSERT(p2m->pod.last_populated_index < POD_HISTORY_MAX);
+
+ check_gfn = p2m->pod.last_populated[p2m->pod.last_populated_index];
+
+ p2m->pod.last_populated[p2m->pod.last_populated_index] = gfn_aligned;
+
+ p2m->pod.last_populated_index =
+ ( p2m->pod.last_populated_index + 1 ) % POD_HISTORY_MAX;
+
+ p2m_pod_zero_check_superpage(p2m, check_gfn);
+}
+
+
#define POD_SWEEP_STRIDE 16
static void
p2m_pod_emergency_sweep(struct p2m_domain *p2m)
@@ -1083,6 +1104,12 @@ p2m_pod_demand_populate(struct p2m_domai
__trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), &t);
}
+ /* Check the last guest demand-populate */
+ if ( p2m->pod.entry_count > p2m->pod.count
+ && (order == PAGE_ORDER_2M)
+ && (q & P2M_ALLOC) )
+ p2m_pod_check_last_super(p2m, gfn_aligned);
+
pod_unlock(p2m);
return 0;
out_of_memory:
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -287,6 +287,10 @@ struct p2m_domain {
unsigned reclaim_super; /* Last gpfn of a scan */
unsigned reclaim_single; /* Last gpfn of a scan */
unsigned max_guest; /* gpfn of max guest demand-populate */
+#define POD_HISTORY_MAX 128
+ /* gpfn of last guest superpage demand-populated */
+ unsigned long last_populated[POD_HISTORY_MAX];
+ unsigned int last_populated_index;
mm_lock_t lock; /* Locking of private pod structs, *
* not relying on the p2m lock. */
} pod;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |