|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 07/10] xen/balloon: factor out some helper functions
They will be used in the page migration routine.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
drivers/xen/balloon.c | 121 +++++++++++++++++++++++++++----------------------
1 file changed, 68 insertions(+), 53 deletions(-)
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 24efdf6..815e1d5 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -312,6 +312,67 @@ static inline void __reinsert_balloon_pages(struct
list_head *head)
spin_unlock_irqrestore(&xen_balloon.xb_dev_info->pages_lock, flags);
}
+static int __memory_op_hypercall(int cmd, xen_pfn_t *list, xen_ulong_t nr)
+{
+ int rc;
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = 0,
+ .domid = DOMID_SELF
+ };
+
+ set_xen_guest_handle(reservation.extent_start, list);
+ reservation.nr_extents = nr;
+ rc = HYPERVISOR_memory_op(cmd, &reservation);
+
+ return rc;
+}
+
+static void __link_back_to_pagetable(struct page *page, xen_ulong_t mfn,
+ pte_t pte)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ unsigned long pfn = page_to_pfn(page);
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ set_phys_to_machine(pfn, mfn);
+
+ /* Link back into the page tables if not highmem. */
+ if (!PageHighMem(page)) {
+ int ret;
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ pte, 0);
+ BUG_ON(ret);
+ }
+ }
+#endif
+}
+
+static void __replace_mapping_with_scratch_page(struct page *page)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ /*
+ * Ballooned out frames are effectively replaced with
+ * a scratch frame. Ensure direct mappings and the
+ * p2m are consistent.
+ */
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ unsigned long p, smfn;
+ struct page *scratch_page = get_balloon_scratch_page();
+
+ p = page_to_pfn(scratch_page);
+ smfn = pfn_to_mfn(p);
+
+ __link_back_to_pagetable(page, smfn,
+ mfn_pte(smfn, PAGE_KERNEL_RO));
+
+ put_balloon_scratch_page();
+ }
+#endif
+}
+
+
/* This function will always try to fill in pages managed by Xen
* balloon driver, then pages managed by generic balloon driver.
*/
@@ -322,11 +383,6 @@ static enum bp_state increase_reservation(unsigned long
nr_pages)
struct page *page;
LIST_HEAD(queue);
bool xen_pages;
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
if (!xen_balloon.balloon_stats.balloon_low &&
@@ -365,9 +421,8 @@ static enum bp_state increase_reservation(unsigned long
nr_pages)
}
/* Second step: issue hypercall */
- set_xen_guest_handle(reservation.extent_start, frame_list);
- reservation.nr_extents = nr_pages;
- rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+ rc = __memory_op_hypercall(XENMEM_populate_physmap, frame_list,
+ nr_pages);
if (rc <= 0) {
rc = BP_EAGAIN;
goto move_pages_back;
@@ -382,21 +437,8 @@ static enum bp_state increase_reservation(unsigned long
nr_pages)
list_del(&page->lru);
pfn = page_to_pfn(page);
-#ifdef CONFIG_XEN_HAVE_PVMMU
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- set_phys_to_machine(pfn, frame_list[i]);
-
- /* Link back into the page tables if not highmem. */
- if (!PageHighMem(page)) {
- int ret;
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn <<
PAGE_SHIFT),
- mfn_pte(frame_list[i],
PAGE_KERNEL),
- 0);
- BUG_ON(ret);
- }
- }
-#endif
+ __link_back_to_pagetable(page, frame_list[i],
+ mfn_pte(frame_list[i], PAGE_KERNEL));
/* Relinquish the page back to the allocator. */
if (xen_pages)
@@ -439,11 +481,6 @@ static enum bp_state decrease_reservation(unsigned long
nr_pages, gfp_t gfp,
unsigned long pfn, i;
struct page *page;
int ret;
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
if (xen_balloon.balloon_stats.hotplug_pages) {
@@ -489,36 +526,14 @@ static enum bp_state decrease_reservation(unsigned long
nr_pages, gfp_t gfp,
frame_list[i] = pfn_to_mfn(pfn);
page = pfn_to_page(pfn);
-#ifdef CONFIG_XEN_HAVE_PVMMU
- /*
- * Ballooned out frames are effectively replaced with
- * a scratch frame. Ensure direct mappings and the
- * p2m are consistent.
- */
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- if (!PageHighMem(page)) {
- struct page *scratch_page =
get_balloon_scratch_page();
-
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn <<
PAGE_SHIFT),
-
pfn_pte(page_to_pfn(scratch_page),
- PAGE_KERNEL_RO), 0);
- BUG_ON(ret);
-
- put_balloon_scratch_page();
- }
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
- }
-#endif
-
+ __replace_mapping_with_scratch_page(page);
balloon_append(page, core_driver);
}
flush_tlb_all();
- set_xen_guest_handle(reservation.extent_start, frame_list);
- reservation.nr_extents = nr_pages;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+ ret = __memory_op_hypercall(XENMEM_decrease_reservation, frame_list,
+ nr_pages);
BUG_ON(ret != nr_pages);
xen_balloon.balloon_stats.current_pages -= nr_pages;
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |