[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 06/14] xen/grant-table: add helpers for allocating pages



Add gnttab_alloc_pages() and gnttab_free_pages() to allocate/free pages
suitable to for granted maps.

Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
---
 drivers/block/xen-blkback/blkback.c |    8 ++++----
 drivers/net/xen-netback/interface.c |    7 +++----
 drivers/xen/gntdev.c                |    4 ++--
 drivers/xen/grant-table.c           |   27 +++++++++++++++++++++++++++
 drivers/xen/xen-scsiback.c          |    6 +++---
 include/xen/grant_table.h           |    3 +++
 6 files changed, 42 insertions(+), 13 deletions(-)

diff --git a/drivers/block/xen-blkback/blkback.c 
b/drivers/block/xen-blkback/blkback.c
index 63fc7f0..908e630 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -100,7 +100,7 @@ module_param(log_stats, int, 0644);
 
 #define BLKBACK_INVALID_HANDLE (~0)
 
-/* Number of free pages to remove on each call to free_xenballooned_pages */
+/* Number of free pages to remove on each call to gnttab_free_pages */
 #define NUM_BATCH_FREE_PAGES 10
 
 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
@@ -111,7 +111,7 @@ static inline int get_free_page(struct xen_blkif *blkif, 
struct page **page)
        if (list_empty(&blkif->free_pages)) {
                BUG_ON(blkif->free_pages_num != 0);
                spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
-               return alloc_xenballooned_pages(1, page, false);
+               return gnttab_alloc_pages(1, page);
        }
        BUG_ON(blkif->free_pages_num == 0);
        page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
@@ -151,14 +151,14 @@ static inline void shrink_free_pagepool(struct xen_blkif 
*blkif, int num)
                blkif->free_pages_num--;
                if (++num_pages == NUM_BATCH_FREE_PAGES) {
                        spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
-                       free_xenballooned_pages(num_pages, page);
+                       gnttab_free_pages(num_pages, page);
                        spin_lock_irqsave(&blkif->free_pages_lock, flags);
                        num_pages = 0;
                }
        }
        spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
        if (num_pages != 0)
-               free_xenballooned_pages(num_pages, page);
+               gnttab_free_pages(num_pages, page);
 }
 
 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
index 9259a73..2e07f84 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
         * better enable it. The long term solution would be to use just a
         * bunch of valid page descriptors, without dependency on ballooning
         */
-       err = alloc_xenballooned_pages(MAX_PENDING_REQS,
-                                      queue->mmap_pages,
-                                      false);
+       err = gnttab_alloc_pages(MAX_PENDING_REQS,
+                                queue->mmap_pages);
        if (err) {
                netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
                return -ENOMEM;
@@ -662,7 +661,7 @@ void xenvif_disconnect(struct xenvif *vif)
  */
 void xenvif_deinit_queue(struct xenvif_queue *queue)
 {
-       free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
+       gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
 }
 
 void xenvif_free(struct xenvif *vif)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 32f6bfe..a28807a 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -119,7 +119,7 @@ static void gntdev_free_map(struct grant_map *map)
                return;
 
        if (map->pages)
-               free_xenballooned_pages(map->count, map->pages);
+               gnttab_free_pages(map->count, map->pages);
        kfree(map->pages);
        kfree(map->grants);
        kfree(map->map_ops);
@@ -152,7 +152,7 @@ static struct grant_map *gntdev_alloc_map(struct 
gntdev_priv *priv, int count)
            NULL == add->pages)
                goto err;
 
-       if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))
+       if (gnttab_alloc_pages(count, add->pages))
                goto err;
 
        for (i = 0; i < count; i++) {
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 999d7ab..8d6e97c 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -50,6 +50,7 @@
 #include <xen/interface/memory.h>
 #include <xen/hvc-console.h>
 #include <xen/swiotlb-xen.h>
+#include <xen/balloon.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/interface.h>
 
@@ -671,6 +672,32 @@ void gnttab_free_auto_xlat_frames(void)
 }
 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 
+/**
+ * gnttab_alloc_pages - alloc pages suitable for grant mapping into
+ * @nr_pages: number of pages to alloc
+ * @pages: returns the pages
+ */
+int gnttab_alloc_pages(int nr_pages, struct page **pages)
+{
+       int ret;
+
+       ret = alloc_xenballooned_pages(nr_pages, pages, false);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+/**
+ * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
+ * @nr_pages; number of pages to free
+ * @pages: the pages
+ */
+void gnttab_free_pages(int nr_pages, struct page **pages)
+{
+       free_xenballooned_pages(nr_pages, pages);
+}
+
 /* Handling of paged out grant targets (GNTST_eagain) */
 #define MAX_DELAY 256
 static inline void
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index e999496e..2a69e9e 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -227,7 +227,7 @@ static void put_free_pages(struct page **page, int num)
                return;
        if (i > scsiback_max_buffer_pages) {
                n = min(num, i - scsiback_max_buffer_pages);
-               free_xenballooned_pages(n, page + num - n);
+               gnttab_free_pages(n, page + num - n);
                n = num - n;
        }
        spin_lock_irqsave(&free_pages_lock, flags);
@@ -244,7 +244,7 @@ static int get_free_page(struct page **page)
        spin_lock_irqsave(&free_pages_lock, flags);
        if (list_empty(&scsiback_free_pages)) {
                spin_unlock_irqrestore(&free_pages_lock, flags);
-               return alloc_xenballooned_pages(1, page, false);
+               return gnttab_alloc_pages(1, page, false);
        }
        page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
        list_del(&page[0]->lru);
@@ -2106,7 +2106,7 @@ static void __exit scsiback_exit(void)
        while (free_pages_num) {
                if (get_free_page(&page))
                        BUG();
-               free_xenballooned_pages(1, &page);
+               gnttab_free_pages(1, &page);
        }
        scsiback_deregister_configfs();
        xenbus_unregister_driver(&scsiback_driver);
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 7235d8f..949803e 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -163,6 +163,9 @@ void gnttab_free_auto_xlat_frames(void);
 
 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
 
+int gnttab_alloc_pages(int nr_pages, struct page **pages);
+void gnttab_free_pages(int nr_pages, struct page **pages);
+
 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count);
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.