[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2.1 04/15] tmem: bugfix in obj allocate path
From: Bob Liu <lliubbo@xxxxxxxxx> There is a potential bug in the obj allocate path. When there are parallel callers allocate a obj and insert it to pool->obj_rb_root, an unexpected obj might be returned (both callers use the same oid). Caller A: Caller B: obj_find(oidp) == NULL obj_find(oidp) == NULL write_lock(&pool->pool_rwlock) obj_new(): objA = tmem_malloc() obj_rb_insert(objA) wirte_unlock() write_lock(&pool->pool_rwlock) obj_new(): objB = tmem_malloc() obj_rb_insert(objB) write_unlock() Continue write data to objA But in future obj_find(), objB will always be returned. The route cause is the allocate path didn't check the return value of obj_rb_insert(). This patch fix it and replace obj_new() with better name obj_alloc(). Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> --- xen/common/tmem.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 8c788ac..39ffe17 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -959,12 +959,11 @@ static int obj_rb_insert(struct rb_root *root, struct tmem_object_root *obj) * allocate, initialize, and insert an tmem_object_root * (should be called only if find failed) */ -static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oidp) +static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, struct oid *oidp) { struct tmem_object_root *obj; ASSERT(pool != NULL); - ASSERT_WRITELOCK(&pool->pool_rwlock); if ( (obj = tmem_malloc(sizeof(struct tmem_object_root), pool)) == NULL ) return NULL; pool->obj_count++; @@ -979,9 +978,6 @@ static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oid obj->objnode_count = 0; obj->pgp_count = 0; obj->last_client = TMEM_CLI_ID_NULL; - spin_lock(&obj->obj_spinlock); - obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj); - ASSERT_SPINLOCK(&obj->obj_spinlock); return obj; } @@ -1552,10 +1548,13 @@ static int do_tmem_put(struct tmem_pool *pool, ASSERT(pool != NULL); client = pool->client; + ASSERT(client != NULL); ret = client->frozen ? -EFROZEN : -ENOMEM; pool->puts++; + +refind: /* does page already exist (dup)? if so, handle specially */ - if ( (obj = obj_find(pool,oidp)) != NULL ) + if ( (obj = obj_find(pool, oidp)) != NULL ) { if ((pgp = pgp_lookup_in_obj(obj, index)) != NULL) { @@ -1573,12 +1572,22 @@ static int do_tmem_put(struct tmem_pool *pool, /* no puts allowed into a frozen pool (except dup puts) */ if ( client->frozen ) return ret; + if ( (obj = obj_alloc(pool, oidp)) == NULL ) + return -ENOMEM; + write_lock(&pool->pool_rwlock); - if ( (obj = obj_new(pool,oidp)) == NULL ) + /* + * Parallel callers may already allocated obj and inserted to obj_rb_root + * before us. + */ + if (!obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj)) { + tmem_free(obj, pool); write_unlock(&pool->pool_rwlock); - return -ENOMEM; + goto refind; } + + spin_lock(&obj->obj_spinlock); newobj = 1; write_unlock(&pool->pool_rwlock); } -- 1.8.5.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |