[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 3/8] xen: delay allocation of grant table sub structures
> -----Original Message----- > From: Xen-devel [mailto:xen-devel-bounces@xxxxxxxxxxxxx] On Behalf Of > Juergen Gross > Sent: 06 September 2017 09:26 > To: xen-devel@xxxxxxxxxxxxx > Cc: Juergen Gross <jgross@xxxxxxxx>; sstabellini@xxxxxxxxxx; Wei Liu > <wei.liu2@xxxxxxxxxx>; George Dunlap <George.Dunlap@xxxxxxxxxx>; > Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Ian Jackson > <Ian.Jackson@xxxxxxxxxx>; Tim (Xen.org) <tim@xxxxxxx>; > jbeulich@xxxxxxxx > Subject: [Xen-devel] [PATCH v2 3/8] xen: delay allocation of grant table sub > structures > > Delay the allocation of the grant table sub structures in order to > allow modifying parameters needed for sizing of these structures at a > per domain basis. Either do it from gnttab_setup_table() or just > before the domain is started the first time. Why does it need to be in two places? Could it not just be done on the first call to gnttab_grow_table()? (My new direct mapping series drops the call to gnttab_setup_table() if the new API is available). Paul > > Signed-off-by: Juergen Gross <jgross@xxxxxxxx> > --- > xen/common/domain.c | 17 +++++- > xen/common/grant_table.c | 139 ++++++++++++++++++++++++---------- > -------- > xen/include/xen/grant_table.h | 2 + > 3 files changed, 97 insertions(+), 61 deletions(-) > > diff --git a/xen/common/domain.c b/xen/common/domain.c > index 5aebcf265f..11eb1778a3 100644 > --- a/xen/common/domain.c > +++ b/xen/common/domain.c > @@ -363,6 +363,9 @@ struct domain *domain_create(domid_t domid, > unsigned int domcr_flags, > goto fail; > init_status |= INIT_gnttab; > > + if ( domid == 0 && grant_table_init(d) ) > + goto fail; > + > poolid = 0; > > err = -ENOMEM; > @@ -998,7 +1001,8 @@ int __domain_pause_by_systemcontroller(struct > domain *d, > prev = cmpxchg(&d->controller_pause_count, old, new); > } while ( prev != old ); > > - pause_fn(d); > + if ( pause_fn ) > + pause_fn(d); > > return 0; > } > @@ -1006,6 +1010,7 @@ int __domain_pause_by_systemcontroller(struct > domain *d, > int domain_unpause_by_systemcontroller(struct domain *d) > { > int old, new, prev = d->controller_pause_count; > + int ret; > > do > { > @@ -1029,8 +1034,16 @@ int domain_unpause_by_systemcontroller(struct > domain *d) > * Creation is considered finished when the controller reference count > * first drops to 0. > */ > - if ( new == 0 ) > + if ( new == 0 && !d->creation_finished ) > + { > + ret = grant_table_init(d); > + if ( ret ) > + { > + __domain_pause_by_systemcontroller(d, NULL); > + return ret; > + } > d->creation_finished = true; > + } > > domain_unpause(d); > > diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c > index a94dfdda7b..b50c334f70 100644 > --- a/xen/common/grant_table.c > +++ b/xen/common/grant_table.c > @@ -1719,6 +1719,78 @@ active_alloc_failed: > return 0; > } > > +int > +grant_table_init(struct domain *d) > +{ > + struct grant_table *gt = d->grant_table; > + unsigned int i, j; > + > + if ( gt->nr_grant_frames ) > + return 0; > + > + gt->nr_grant_frames = INITIAL_NR_GRANT_FRAMES; > + > + /* Active grant table. */ > + if ( (gt->active = xzalloc_array(struct active_grant_entry *, > + max_nr_active_grant_frames)) == NULL ) > + goto no_mem_1; > + for ( i = 0; > + i < > num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ ) > + { > + if ( (gt->active[i] = alloc_xenheap_page()) == NULL ) > + goto no_mem_2; > + clear_page(gt->active[i]); > + for ( j = 0; j < ACGNT_PER_PAGE; j++ ) > + spin_lock_init(>->active[i][j].lock); > + } > + > + /* Tracking of mapped foreign frames table */ > + gt->maptrack = vzalloc(max_maptrack_frames * sizeof(*gt->maptrack)); > + if ( gt->maptrack == NULL ) > + goto no_mem_2; > + > + /* Shared grant table. */ > + if ( (gt->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL > ) > + goto no_mem_3; > + for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ ) > + { > + if ( (gt->shared_raw[i] = alloc_xenheap_page()) == NULL ) > + goto no_mem_4; > + clear_page(gt->shared_raw[i]); > + } > + > + /* Status pages for grant table - for version 2 */ > + gt->status = xzalloc_array(grant_status_t *, > + grant_to_status_frames(max_grant_frames)); > + if ( gt->status == NULL ) > + goto no_mem_4; > + > + for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ ) > + gnttab_create_shared_page(d, gt, i); > + > + gt->nr_status_frames = 0; > + > + return 0; > + > + no_mem_4: > + for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ ) > + free_xenheap_page(gt->shared_raw[i]); > + xfree(gt->shared_raw); > + gt->shared_raw = NULL; > + no_mem_3: > + vfree(gt->maptrack); > + gt->maptrack = NULL; > + no_mem_2: > + for ( i = 0; > + i < > num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ ) > + free_xenheap_page(gt->active[i]); > + xfree(gt->active); > + gt->active = NULL; > + no_mem_1: > + gt->nr_grant_frames = 0; > + return -ENOMEM; > +} > + > static long > gnttab_setup_table( > XEN_GUEST_HANDLE_PARAM(gnttab_setup_table_t) uop, unsigned int > count) > @@ -1764,7 +1836,14 @@ gnttab_setup_table( > grant_write_lock(gt); > > if ( gt->gt_version == 0 ) > + { > + if ( grant_table_init(d) ) > + { > + op.status = GNTST_general_error; > + goto unlock; > + } > gt->gt_version = 1; > + } > > if ( (op.nr_frames > nr_grant_frames(gt) || > ((gt->gt_version > 1) && > @@ -3378,75 +3457,17 @@ grant_table_create( > struct domain *d) > { > struct grant_table *t; > - unsigned int i, j; > > if ( (t = xzalloc(struct grant_table)) == NULL ) > - goto no_mem_0; > + return -ENOMEM; > > /* Simple stuff. */ > percpu_rwlock_resource_init(&t->lock, grant_rwlock); > spin_lock_init(&t->maptrack_lock); > - t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES; > - > - /* Active grant table. */ > - if ( (t->active = xzalloc_array(struct active_grant_entry *, > - max_nr_active_grant_frames)) == NULL ) > - goto no_mem_1; > - for ( i = 0; > - i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); > i++ ) > - { > - if ( (t->active[i] = alloc_xenheap_page()) == NULL ) > - goto no_mem_2; > - clear_page(t->active[i]); > - for ( j = 0; j < ACGNT_PER_PAGE; j++ ) > - spin_lock_init(&t->active[i][j].lock); > - } > - > - /* Tracking of mapped foreign frames table */ > - t->maptrack = vzalloc(max_maptrack_frames * sizeof(*t->maptrack)); > - if ( t->maptrack == NULL ) > - goto no_mem_2; > - > - /* Shared grant table. */ > - if ( (t->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL ) > - goto no_mem_3; > - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ ) > - { > - if ( (t->shared_raw[i] = alloc_xenheap_page()) == NULL ) > - goto no_mem_4; > - clear_page(t->shared_raw[i]); > - } > - > - /* Status pages for grant table - for version 2 */ > - t->status = xzalloc_array(grant_status_t *, > - grant_to_status_frames(max_grant_frames)); > - if ( t->status == NULL ) > - goto no_mem_4; > - > - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ ) > - gnttab_create_shared_page(d, t, i); > - > - t->nr_status_frames = 0; > > /* Okay, install the structure. */ > d->grant_table = t; > return 0; > - > - no_mem_4: > - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ ) > - free_xenheap_page(t->shared_raw[i]); > - xfree(t->shared_raw); > - no_mem_3: > - vfree(t->maptrack); > - no_mem_2: > - for ( i = 0; > - i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); > i++ ) > - free_xenheap_page(t->active[i]); > - xfree(t->active); > - no_mem_1: > - xfree(t); > - no_mem_0: > - return -ENOMEM; > } > > void > diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h > index 43b07e60c5..84a8d61616 100644 > --- a/xen/include/xen/grant_table.h > +++ b/xen/include/xen/grant_table.h > @@ -35,6 +35,8 @@ extern unsigned int max_grant_frames; > /* Create/destroy per-domain grant table context. */ > int grant_table_create( > struct domain *d); > +int grant_table_init( > + struct domain *d); > void grant_table_destroy( > struct domain *d); > void grant_table_init_vcpu(struct vcpu *v); > -- > 2.12.3 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@xxxxxxxxxxxxx > https://lists.xen.org/xen-devel _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |