[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 03/10] xen/balloon: consolidate data structures



Put Xen balloon mutex, page list and stats in to struct xen_balloon, so
that we can easily back-reference those structures. Page migration
callback will need to get hold of those structures in later patch(es).

No functional change is introduced.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 drivers/xen/balloon.c     |  168 ++++++++++++++++++++++++---------------------
 drivers/xen/xen-balloon.c |   24 ++++---
 include/xen/balloon.h     |   15 +++-
 3 files changed, 119 insertions(+), 88 deletions(-)

diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 30a0baf..d8055f0 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -84,20 +84,13 @@ enum bp_state {
        BP_ECANCELED
 };
 
-
-static DEFINE_MUTEX(balloon_mutex);
-
-struct balloon_stats balloon_stats;
-EXPORT_SYMBOL_GPL(balloon_stats);
+struct xen_balloon xen_balloon;
+EXPORT_SYMBOL_GPL(xen_balloon);
 
 /* We increase/decrease in batches which fit in a page */
 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
 static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
 
-
-/* List of ballooned pages, threaded through the mem_map array. */
-static LIST_HEAD(ballooned_pages);
-
 /* Main work function, always executed in process context. */
 static void balloon_process(struct work_struct *work);
 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
@@ -119,11 +112,11 @@ static void __balloon_append(struct page *page)
 {
        /* Lowmem is re-populated first, so highmem pages go at list tail. */
        if (PageHighMem(page)) {
-               list_add_tail(&page->lru, &ballooned_pages);
-               balloon_stats.balloon_high++;
+               list_add_tail(&page->lru, &xen_balloon.ballooned_pages);
+               xen_balloon.balloon_stats.balloon_high++;
        } else {
-               list_add(&page->lru, &ballooned_pages);
-               balloon_stats.balloon_low++;
+               list_add(&page->lru, &xen_balloon.ballooned_pages);
+               xen_balloon.balloon_stats.balloon_low++;
        }
 }
 
@@ -138,19 +131,21 @@ static struct page *balloon_retrieve(bool prefer_highmem)
 {
        struct page *page;
 
-       if (list_empty(&ballooned_pages))
+       if (list_empty(&xen_balloon.ballooned_pages))
                return NULL;
 
        if (prefer_highmem)
-               page = list_entry(ballooned_pages.prev, struct page, lru);
+               page = list_entry(xen_balloon.ballooned_pages.prev,
+                                 struct page, lru);
        else
-               page = list_entry(ballooned_pages.next, struct page, lru);
+               page = list_entry(xen_balloon.ballooned_pages.next,
+                                 struct page, lru);
        list_del(&page->lru);
 
        if (PageHighMem(page))
-               balloon_stats.balloon_high--;
+               xen_balloon.balloon_stats.balloon_high--;
        else
-               balloon_stats.balloon_low--;
+               xen_balloon.balloon_stats.balloon_low--;
 
        adjust_managed_page_count(page, 1);
 
@@ -160,7 +155,7 @@ static struct page *balloon_retrieve(bool prefer_highmem)
 static struct page *balloon_next_page(struct page *page)
 {
        struct list_head *next = page->lru.next;
-       if (next == &ballooned_pages)
+       if (next == &xen_balloon.ballooned_pages)
                return NULL;
        return list_entry(next, struct page, lru);
 }
@@ -168,24 +163,27 @@ static struct page *balloon_next_page(struct page *page)
 static enum bp_state update_schedule(enum bp_state state)
 {
        if (state == BP_DONE) {
-               balloon_stats.schedule_delay = 1;
-               balloon_stats.retry_count = 1;
+               xen_balloon.balloon_stats.schedule_delay = 1;
+               xen_balloon.balloon_stats.retry_count = 1;
                return BP_DONE;
        }
 
-       ++balloon_stats.retry_count;
+       ++xen_balloon.balloon_stats.retry_count;
 
-       if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
-                       balloon_stats.retry_count > 
balloon_stats.max_retry_count) {
-               balloon_stats.schedule_delay = 1;
-               balloon_stats.retry_count = 1;
+       if (xen_balloon.balloon_stats.max_retry_count != RETRY_UNLIMITED &&
+           xen_balloon.balloon_stats.retry_count >
+           xen_balloon.balloon_stats.max_retry_count) {
+               xen_balloon.balloon_stats.schedule_delay = 1;
+               xen_balloon.balloon_stats.retry_count = 1;
                return BP_ECANCELED;
        }
 
-       balloon_stats.schedule_delay <<= 1;
+       xen_balloon.balloon_stats.schedule_delay <<= 1;
 
-       if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
-               balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
+       if (xen_balloon.balloon_stats.schedule_delay >
+           xen_balloon.balloon_stats.max_schedule_delay)
+               xen_balloon.balloon_stats.schedule_delay =
+                       xen_balloon.balloon_stats.max_schedule_delay;
 
        return BP_EAGAIN;
 }
@@ -193,14 +191,16 @@ static enum bp_state update_schedule(enum bp_state state)
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 static long current_credit(void)
 {
-       return balloon_stats.target_pages - balloon_stats.current_pages -
-               balloon_stats.hotplug_pages;
+       return xen_balloon.balloon_stats.target_pages -
+               xen_balloon.balloon_stats.current_pages -
+               xen_balloon.balloon_stats.hotplug_pages;
 }
 
 static bool balloon_is_inflated(void)
 {
-       if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
-                       balloon_stats.balloon_hotplug)
+       if (xen_balloon.balloon_stats.balloon_low ||
+           xen_balloon.balloon_stats.balloon_high ||
+           xen_balloon.balloon_stats.balloon_hotplug)
                return true;
        else
                return false;
@@ -236,8 +236,8 @@ static enum bp_state reserve_additional_memory(long credit)
 
        balloon_hotplug -= credit;
 
-       balloon_stats.hotplug_pages += credit;
-       balloon_stats.balloon_hotplug = balloon_hotplug;
+       xen_balloon.balloon_stats.hotplug_pages += credit;
+       xen_balloon.balloon_stats.balloon_hotplug = balloon_hotplug;
 
        return BP_DONE;
 }
@@ -246,16 +246,16 @@ static void xen_online_page(struct page *page)
 {
        __online_page_set_limits(page);
 
-       mutex_lock(&balloon_mutex);
+       mutex_lock(&xen_balloon.balloon_mutex);
 
        __balloon_append(page);
 
-       if (balloon_stats.hotplug_pages)
-               --balloon_stats.hotplug_pages;
+       if (xen_balloon.balloon_stats.hotplug_pages)
+               --xen_balloon.balloon_stats.hotplug_pages;
        else
-               --balloon_stats.balloon_hotplug;
+               --xen_balloon.balloon_stats.balloon_hotplug;
 
-       mutex_unlock(&balloon_mutex);
+       mutex_unlock(&xen_balloon.balloon_mutex);
 }
 
 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, 
void *v)
@@ -273,19 +273,20 @@ static struct notifier_block xen_memory_nb = {
 #else
 static long current_credit(void)
 {
-       unsigned long target = balloon_stats.target_pages;
+       unsigned long target = xen_balloon.balloon_stats.target_pages;
 
        target = min(target,
-                    balloon_stats.current_pages +
-                    balloon_stats.balloon_low +
-                    balloon_stats.balloon_high);
+                    xen_balloon.balloon_stats.current_pages +
+                    xen_balloon.balloon_stats.balloon_low +
+                    xen_balloon.balloon_stats.balloon_high);
 
-       return target - balloon_stats.current_pages;
+       return target - xen_balloon.balloon_stats.current_pages;
 }
 
 static bool balloon_is_inflated(void)
 {
-       if (balloon_stats.balloon_low || balloon_stats.balloon_high)
+       if (xen_balloon.balloon_stats.balloon_low ||
+           xen_balloon.balloon_stats.balloon_high)
                return true;
        else
                return false;
@@ -293,7 +294,8 @@ static bool balloon_is_inflated(void)
 
 static enum bp_state reserve_additional_memory(long credit)
 {
-       balloon_stats.target_pages = balloon_stats.current_pages;
+       xen_balloon.balloon_stats.target_pages =
+               xen_balloon.balloon_stats.current_pages;
        return BP_DONE;
 }
 #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
@@ -310,10 +312,12 @@ static enum bp_state increase_reservation(unsigned long 
nr_pages)
        };
 
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-       if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
-               nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
-               balloon_stats.hotplug_pages += nr_pages;
-               balloon_stats.balloon_hotplug -= nr_pages;
+       if (!xen_balloon.balloon_stats.balloon_low &&
+           !xen_balloon.balloon_stats.balloon_high) {
+               nr_pages = min(nr_pages,
+                              xen_balloon.balloon_stats.balloon_hotplug);
+               xen_balloon.balloon_stats.hotplug_pages += nr_pages;
+               xen_balloon.balloon_stats.balloon_hotplug -= nr_pages;
                return BP_DONE;
        }
 #endif
@@ -321,7 +325,8 @@ static enum bp_state increase_reservation(unsigned long 
nr_pages)
        if (nr_pages > ARRAY_SIZE(frame_list))
                nr_pages = ARRAY_SIZE(frame_list);
 
-       page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
+       page = list_first_entry_or_null(&xen_balloon.ballooned_pages,
+                                       struct page, lru);
        for (i = 0; i < nr_pages; i++) {
                if (!page) {
                        nr_pages = i;
@@ -363,7 +368,7 @@ static enum bp_state increase_reservation(unsigned long 
nr_pages)
                __free_reserved_page(page);
        }
 
-       balloon_stats.current_pages += rc;
+       xen_balloon.balloon_stats.current_pages += rc;
 
        return BP_DONE;
 }
@@ -381,10 +386,11 @@ static enum bp_state decrease_reservation(unsigned long 
nr_pages, gfp_t gfp)
        };
 
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-       if (balloon_stats.hotplug_pages) {
-               nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
-               balloon_stats.hotplug_pages -= nr_pages;
-               balloon_stats.balloon_hotplug += nr_pages;
+       if (xen_balloon.balloon_stats.hotplug_pages) {
+               nr_pages = min(nr_pages,
+                              xen_balloon.balloon_stats.hotplug_pages);
+               xen_balloon.balloon_stats.hotplug_pages -= nr_pages;
+               xen_balloon.balloon_stats.balloon_hotplug += nr_pages;
                return BP_DONE;
        }
 #endif
@@ -451,7 +457,7 @@ static enum bp_state decrease_reservation(unsigned long 
nr_pages, gfp_t gfp)
        ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
        BUG_ON(ret != nr_pages);
 
-       balloon_stats.current_pages -= nr_pages;
+       xen_balloon.balloon_stats.current_pages -= nr_pages;
 
        return state;
 }
@@ -467,7 +473,7 @@ static void balloon_process(struct work_struct *work)
        enum bp_state state = BP_DONE;
        long credit;
 
-       mutex_lock(&balloon_mutex);
+       mutex_lock(&xen_balloon.balloon_mutex);
 
        do {
                credit = current_credit();
@@ -492,9 +498,10 @@ static void balloon_process(struct work_struct *work)
 
        /* Schedule more work if there is some still to be done. */
        if (state == BP_EAGAIN)
-               schedule_delayed_work(&balloon_worker, 
balloon_stats.schedule_delay * HZ);
+               schedule_delayed_work(&balloon_worker,
+                                     xen_balloon.balloon_stats.schedule_delay 
* HZ);
 
-       mutex_unlock(&balloon_mutex);
+       mutex_unlock(&xen_balloon.balloon_mutex);
 }
 
 struct page *get_balloon_scratch_page(void)
@@ -513,7 +520,7 @@ void put_balloon_scratch_page(void)
 void balloon_set_new_target(unsigned long target)
 {
        /* No need for lock. Not read-modify-write updates. */
-       balloon_stats.target_pages = target;
+       xen_balloon.balloon_stats.target_pages = target;
        schedule_delayed_work(&balloon_worker, 0);
 }
 EXPORT_SYMBOL_GPL(balloon_set_new_target);
@@ -529,7 +536,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page 
**pages, bool highmem)
 {
        int pgno = 0;
        struct page *page;
-       mutex_lock(&balloon_mutex);
+       mutex_lock(&xen_balloon.balloon_mutex);
        while (pgno < nr_pages) {
                page = balloon_retrieve(highmem);
                if (page && (highmem || !PageHighMem(page))) {
@@ -544,14 +551,14 @@ int alloc_xenballooned_pages(int nr_pages, struct page 
**pages, bool highmem)
                                goto out_undo;
                }
        }
-       mutex_unlock(&balloon_mutex);
+       mutex_unlock(&xen_balloon.balloon_mutex);
        return 0;
  out_undo:
        while (pgno)
                balloon_append(pages[--pgno]);
        /* Free the memory back to the kernel soon */
        schedule_delayed_work(&balloon_worker, 0);
-       mutex_unlock(&balloon_mutex);
+       mutex_unlock(&xen_balloon.balloon_mutex);
        return -ENOMEM;
 }
 EXPORT_SYMBOL(alloc_xenballooned_pages);
@@ -566,7 +573,7 @@ void free_xenballooned_pages(int nr_pages, struct page 
**pages)
 {
        int i;
 
-       mutex_lock(&balloon_mutex);
+       mutex_lock(&xen_balloon.balloon_mutex);
 
        for (i = 0; i < nr_pages; i++) {
                if (pages[i])
@@ -577,7 +584,7 @@ void free_xenballooned_pages(int nr_pages, struct page 
**pages)
        if (current_credit())
                schedule_delayed_work(&balloon_worker, 0);
 
-       mutex_unlock(&balloon_mutex);
+       mutex_unlock(&xen_balloon.balloon_mutex);
 }
 EXPORT_SYMBOL(free_xenballooned_pages);
 
@@ -660,21 +667,28 @@ static int __init balloon_init(void)
 
        pr_info("Initialising balloon driver\n");
 
-       balloon_stats.current_pages = xen_pv_domain()
+       memset(&xen_balloon, 0, sizeof(xen_balloon));
+
+       mutex_init(&xen_balloon.balloon_mutex);
+
+       INIT_LIST_HEAD(&xen_balloon.ballooned_pages);
+
+       xen_balloon.balloon_stats.current_pages = xen_pv_domain()
                ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
                : get_num_physpages();
-       balloon_stats.target_pages  = balloon_stats.current_pages;
-       balloon_stats.balloon_low   = 0;
-       balloon_stats.balloon_high  = 0;
+       xen_balloon.balloon_stats.target_pages  =
+               xen_balloon.balloon_stats.current_pages;
+       xen_balloon.balloon_stats.balloon_low   = 0;
+       xen_balloon.balloon_stats.balloon_high  = 0;
 
-       balloon_stats.schedule_delay = 1;
-       balloon_stats.max_schedule_delay = 32;
-       balloon_stats.retry_count = 1;
-       balloon_stats.max_retry_count = RETRY_UNLIMITED;
+       xen_balloon.balloon_stats.schedule_delay = 1;
+       xen_balloon.balloon_stats.max_schedule_delay = 32;
+       xen_balloon.balloon_stats.retry_count = 1;
+       xen_balloon.balloon_stats.max_retry_count = RETRY_UNLIMITED;
 
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-       balloon_stats.hotplug_pages = 0;
-       balloon_stats.balloon_hotplug = 0;
+       xen_balloon.balloon_stats.hotplug_pages = 0;
+       xen_balloon.balloon_stats.balloon_hotplug = 0;
 
        set_online_page_callback(&xen_online_page);
        register_memory_notifier(&xen_memory_nb);
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e555845..ef04236 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -126,19 +126,23 @@ module_exit(balloon_exit);
        }                                                               \
        static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 
-BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
-BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
-BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
-
-static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
-static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, 
balloon_stats.max_schedule_delay);
-static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
-static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+BALLOON_SHOW(current_kb, "%lu\n", 
PAGES2KB(xen_balloon.balloon_stats.current_pages));
+BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(xen_balloon.balloon_stats.balloon_low));
+BALLOON_SHOW(high_kb, "%lu\n", 
PAGES2KB(xen_balloon.balloon_stats.balloon_high));
+
+static DEVICE_ULONG_ATTR(schedule_delay, 0444,
+                        xen_balloon.balloon_stats.schedule_delay);
+static DEVICE_ULONG_ATTR(max_schedule_delay, 0644,
+                        xen_balloon.balloon_stats.max_schedule_delay);
+static DEVICE_ULONG_ATTR(retry_count, 0444,
+                        xen_balloon.balloon_stats.retry_count);
+static DEVICE_ULONG_ATTR(max_retry_count, 0644,
+                        xen_balloon.balloon_stats.max_retry_count);
 
 static ssize_t show_target_kb(struct device *dev, struct device_attribute 
*attr,
                              char *buf)
 {
-       return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
+       return sprintf(buf, "%lu\n", 
PAGES2KB(xen_balloon.balloon_stats.target_pages));
 }
 
 static ssize_t store_target_kb(struct device *dev,
@@ -167,7 +171,7 @@ static ssize_t show_target(struct device *dev, struct 
device_attribute *attr,
                              char *buf)
 {
        return sprintf(buf, "%llu\n",
-                      (unsigned long long)balloon_stats.target_pages
+                      (unsigned long 
long)xen_balloon.balloon_stats.target_pages
                       << PAGE_SHIFT);
 }
 
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index a4c1c6a..1d7efae 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -21,7 +21,20 @@ struct balloon_stats {
 #endif
 };
 
-extern struct balloon_stats balloon_stats;
+struct xen_balloon {
+       /* Mutex to protect xen_balloon across inflation / deflation /
+        * page migration.
+        */
+       struct mutex balloon_mutex;
+
+       /* List of ballooned pages managed by Xen balloon driver */
+       struct list_head ballooned_pages;
+
+       /* Memory statistic */
+       struct balloon_stats balloon_stats;
+};
+
+extern struct xen_balloon xen_balloon;
 
 void balloon_set_new_target(unsigned long target);
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.