[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 3/8] xen: delay allocation of grant table sub structures



Delay the allocation of the grant table sub structures in order to
allow modifying parameters needed for sizing of these structures at a
per domain basis. Either do it from gnttab_setup_table() or just
before the domain is started the first time.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V3:
- move call of grant_table_init() from gnttab_setup_table() to
  gnttab_grow_table() (Paul Durrant)
---
 xen/common/domain.c           |  17 +++++-
 xen/common/grant_table.c      | 138 ++++++++++++++++++++++++------------------
 xen/include/xen/grant_table.h |   2 +
 3 files changed, 96 insertions(+), 61 deletions(-)

diff --git a/xen/common/domain.c b/xen/common/domain.c
index 5aebcf265f..11eb1778a3 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -363,6 +363,9 @@ struct domain *domain_create(domid_t domid, unsigned int 
domcr_flags,
             goto fail;
         init_status |= INIT_gnttab;
 
+        if ( domid == 0 && grant_table_init(d) )
+            goto fail;
+
         poolid = 0;
 
         err = -ENOMEM;
@@ -998,7 +1001,8 @@ int __domain_pause_by_systemcontroller(struct domain *d,
         prev = cmpxchg(&d->controller_pause_count, old, new);
     } while ( prev != old );
 
-    pause_fn(d);
+    if ( pause_fn )
+        pause_fn(d);
 
     return 0;
 }
@@ -1006,6 +1010,7 @@ int __domain_pause_by_systemcontroller(struct domain *d,
 int domain_unpause_by_systemcontroller(struct domain *d)
 {
     int old, new, prev = d->controller_pause_count;
+    int ret;
 
     do
     {
@@ -1029,8 +1034,16 @@ int domain_unpause_by_systemcontroller(struct domain *d)
      * Creation is considered finished when the controller reference count
      * first drops to 0.
      */
-    if ( new == 0 )
+    if ( new == 0 && !d->creation_finished )
+    {
+        ret = grant_table_init(d);
+        if ( ret )
+        {
+            __domain_pause_by_systemcontroller(d, NULL);
+            return ret;
+        }
         d->creation_finished = true;
+    }
 
     domain_unpause(d);
 
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 4520e36d90..29e7fa539b 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1655,6 +1655,78 @@ gnttab_unpopulate_status_frames(struct domain *d, struct 
grant_table *gt)
     gt->nr_status_frames = 0;
 }
 
+int
+grant_table_init(struct domain *d)
+{
+    struct grant_table *gt = d->grant_table;
+    unsigned int i, j;
+
+    if ( gt->nr_grant_frames )
+        return 0;
+
+    gt->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
+
+    /* Active grant table. */
+    if ( (gt->active = xzalloc_array(struct active_grant_entry *,
+                                     max_nr_active_grant_frames)) == NULL )
+        goto no_mem_1;
+    for ( i = 0;
+          i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
+    {
+        if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
+            goto no_mem_2;
+        clear_page(gt->active[i]);
+        for ( j = 0; j < ACGNT_PER_PAGE; j++ )
+            spin_lock_init(&gt->active[i][j].lock);
+    }
+
+    /* Tracking of mapped foreign frames table */
+    gt->maptrack = vzalloc(max_maptrack_frames * sizeof(*gt->maptrack));
+    if ( gt->maptrack == NULL )
+        goto no_mem_2;
+
+    /* Shared grant table. */
+    if ( (gt->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL )
+        goto no_mem_3;
+    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+    {
+        if ( (gt->shared_raw[i] = alloc_xenheap_page()) == NULL )
+            goto no_mem_4;
+        clear_page(gt->shared_raw[i]);
+    }
+
+    /* Status pages for grant table - for version 2 */
+    gt->status = xzalloc_array(grant_status_t *,
+                               grant_to_status_frames(max_grant_frames));
+    if ( gt->status == NULL )
+        goto no_mem_4;
+
+    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+        gnttab_create_shared_page(d, gt, i);
+
+    gt->nr_status_frames = 0;
+
+    return 0;
+
+ no_mem_4:
+    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+        free_xenheap_page(gt->shared_raw[i]);
+    xfree(gt->shared_raw);
+    gt->shared_raw = NULL;
+ no_mem_3:
+    vfree(gt->maptrack);
+    gt->maptrack = NULL;
+ no_mem_2:
+    for ( i = 0;
+          i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
+        free_xenheap_page(gt->active[i]);
+    xfree(gt->active);
+    gt->active = NULL;
+ no_mem_1:
+    gt->nr_grant_frames = 0;
+    return -ENOMEM;
+}
+
 /*
  * Grow the grant table. The caller must hold the grant table's
  * write lock before calling this function.
@@ -1665,6 +1737,12 @@ gnttab_grow_table(struct domain *d, unsigned int 
req_nr_frames)
     struct grant_table *gt = d->grant_table;
     unsigned int i, j;
 
+    if ( !gt->nr_grant_frames && grant_table_init(d) )
+    {
+        gdprintk(XENLOG_INFO, "Allocation failure in grant table init.\n");
+        return 0;
+    }
+
     ASSERT(req_nr_frames <= max_grant_frames);
 
     gdprintk(XENLOG_INFO,
@@ -3380,75 +3458,17 @@ grant_table_create(
     struct domain *d)
 {
     struct grant_table *t;
-    unsigned int i, j;
 
     if ( (t = xzalloc(struct grant_table)) == NULL )
-        goto no_mem_0;
+        return -ENOMEM;
 
     /* Simple stuff. */
     percpu_rwlock_resource_init(&t->lock, grant_rwlock);
     spin_lock_init(&t->maptrack_lock);
-    t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
-
-    /* Active grant table. */
-    if ( (t->active = xzalloc_array(struct active_grant_entry *,
-                                    max_nr_active_grant_frames)) == NULL )
-        goto no_mem_1;
-    for ( i = 0;
-          i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
-    {
-        if ( (t->active[i] = alloc_xenheap_page()) == NULL )
-            goto no_mem_2;
-        clear_page(t->active[i]);
-        for ( j = 0; j < ACGNT_PER_PAGE; j++ )
-            spin_lock_init(&t->active[i][j].lock);
-    }
-
-    /* Tracking of mapped foreign frames table */
-    t->maptrack = vzalloc(max_maptrack_frames * sizeof(*t->maptrack));
-    if ( t->maptrack == NULL )
-        goto no_mem_2;
-
-    /* Shared grant table. */
-    if ( (t->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL )
-        goto no_mem_3;
-    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
-    {
-        if ( (t->shared_raw[i] = alloc_xenheap_page()) == NULL )
-            goto no_mem_4;
-        clear_page(t->shared_raw[i]);
-    }
-
-    /* Status pages for grant table - for version 2 */
-    t->status = xzalloc_array(grant_status_t *,
-                              grant_to_status_frames(max_grant_frames));
-    if ( t->status == NULL )
-        goto no_mem_4;
-
-    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
-        gnttab_create_shared_page(d, t, i);
-
-    t->nr_status_frames = 0;
 
     /* Okay, install the structure. */
     d->grant_table = t;
     return 0;
-
- no_mem_4:
-    for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
-        free_xenheap_page(t->shared_raw[i]);
-    xfree(t->shared_raw);
- no_mem_3:
-    vfree(t->maptrack);
- no_mem_2:
-    for ( i = 0;
-          i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
-        free_xenheap_page(t->active[i]);
-    xfree(t->active);
- no_mem_1:
-    xfree(t);
- no_mem_0:
-    return -ENOMEM;
 }
 
 void
diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
index 43b07e60c5..84a8d61616 100644
--- a/xen/include/xen/grant_table.h
+++ b/xen/include/xen/grant_table.h
@@ -35,6 +35,8 @@ extern unsigned int max_grant_frames;
 /* Create/destroy per-domain grant table context. */
 int grant_table_create(
     struct domain *d);
+int grant_table_init(
+    struct domain *d);
 void grant_table_destroy(
     struct domain *d);
 void grant_table_init_vcpu(struct vcpu *v);
-- 
2.12.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.