[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 09/15] tmem: cleanup: drop tmem_lock_all
On 11/12/13 08:50, Bob Liu wrote: > tmem_lock_all is used for debug only, remove it from upstream to make > tmem source code more readable and easier to maintain. > And no_evict is meaningless without tmem_lock_all, this patch removes it > also. > > Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx> This should probably be tagged with Coverity ID 1055654 which is a locking order reversal between tmem_spinlock and the heap_lock. I certainly think this is an appropriate fix. ~Andrew > --- > xen/common/tmem.c | 281 > ++++++++++++++++---------------------------- > xen/common/tmem_xen.c | 3 - > xen/include/xen/tmem_xen.h | 8 -- > 3 files changed, 101 insertions(+), 191 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index 205ee95..a09a506 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -98,7 +98,6 @@ struct tmem_object_root { > struct tmem_pool *pool; > domid_t last_client; > spinlock_t obj_spinlock; > - bool_t no_evict; /* if globally locked, pseudo-locks against eviction */ > }; > > struct tmem_object_node { > @@ -167,22 +166,12 @@ static atomic_t client_weight_total = ATOMIC_INIT(0); > static int tmem_initialized = 0; > > /************ CONCURRENCY ***********************************************/ > -DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmem_lock_all */ > -DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmem_lock_all */ > +DEFINE_RWLOCK(tmem_rwlock); > static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */ > static DEFINE_SPINLOCK(pers_lists_spinlock); > > -#define tmem_spin_lock(_l) do {if (!tmem_lock_all) spin_lock(_l);}while(0) > -#define tmem_spin_unlock(_l) do {if (!tmem_lock_all) > spin_unlock(_l);}while(0) > -#define tmem_read_lock(_l) do {if (!tmem_lock_all) read_lock(_l);}while(0) > -#define tmem_read_unlock(_l) do {if (!tmem_lock_all) > read_unlock(_l);}while(0) > -#define tmem_write_lock(_l) do {if (!tmem_lock_all) write_lock(_l);}while(0) > -#define tmem_write_unlock(_l) do {if (!tmem_lock_all) > write_unlock(_l);}while(0) > -#define tmem_write_trylock(_l) ((tmem_lock_all)?1:write_trylock(_l)) > -#define tmem_spin_trylock(_l) (tmem_lock_all?1:spin_trylock(_l)) > - > -#define ASSERT_SPINLOCK(_l) ASSERT(tmem_lock_all || spin_is_locked(_l)) > -#define ASSERT_WRITELOCK(_l) ASSERT(tmem_lock_all || rw_is_write_locked(_l)) > +#define ASSERT_SPINLOCK(_l) ASSERT(spin_is_locked(_l)) > +#define ASSERT_WRITELOCK(_l) ASSERT(rw_is_write_locked(_l)) > > /* global counters (should use long_atomic_t access) */ > static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock > */ > @@ -250,7 +239,7 @@ static int pcd_copy_to_client(xen_pfn_t cmfn, struct > tmem_page_descriptor *pgp) > int ret; > > ASSERT(tmem_dedup_enabled()); > - tmem_read_lock(&pcd_tree_rwlocks[firstbyte]); > + read_lock(&pcd_tree_rwlocks[firstbyte]); > pcd = pgp->pcd; > if ( pgp->size < PAGE_SIZE && pgp->size != 0 && > pcd->size < PAGE_SIZE && pcd->size != 0 ) > @@ -260,7 +249,7 @@ static int pcd_copy_to_client(xen_pfn_t cmfn, struct > tmem_page_descriptor *pgp) > ret = tmem_copy_tze_to_client(cmfn, pcd->tze, pcd->size); > else > ret = tmem_copy_to_client(cmfn, pcd->pfp, tmem_cli_buf_null); > - tmem_read_unlock(&pcd_tree_rwlocks[firstbyte]); > + read_unlock(&pcd_tree_rwlocks[firstbyte]); > return ret; > } > > @@ -283,14 +272,14 @@ static void pcd_disassociate(struct > tmem_page_descriptor *pgp, struct tmem_pool > if ( have_pcd_rwlock ) > ASSERT_WRITELOCK(&pcd_tree_rwlocks[firstbyte]); > else > - tmem_write_lock(&pcd_tree_rwlocks[firstbyte]); > + write_lock(&pcd_tree_rwlocks[firstbyte]); > list_del_init(&pgp->pcd_siblings); > pgp->pcd = NULL; > pgp->firstbyte = NOT_SHAREABLE; > pgp->size = -1; > if ( --pcd->pgp_ref_count ) > { > - tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); > + write_unlock(&pcd_tree_rwlocks[firstbyte]); > return; > } > > @@ -317,7 +306,7 @@ static void pcd_disassociate(struct tmem_page_descriptor > *pgp, struct tmem_pool > /* real physical page */ > tmem_page_free(pool,pfp); > } > - tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); > + write_unlock(&pcd_tree_rwlocks[firstbyte]); > } > > > @@ -349,7 +338,7 @@ static int pcd_associate(struct tmem_page_descriptor > *pgp, char *cdata, pagesize > ASSERT(pfp_size <= PAGE_SIZE); > ASSERT(!(pfp_size & (sizeof(uint64_t)-1))); > } > - tmem_write_lock(&pcd_tree_rwlocks[firstbyte]); > + write_lock(&pcd_tree_rwlocks[firstbyte]); > > /* look for page match */ > root = &pcd_tree_roots[firstbyte]; > @@ -443,7 +432,7 @@ match: > pgp->pcd = pcd; > > unlock: > - tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); > + write_unlock(&pcd_tree_rwlocks[firstbyte]); > return ret; > } > > @@ -552,7 +541,7 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, > bool_t no_eph_lock) > if ( !is_persistent(pgp->us.obj->pool) ) > { > if ( !no_eph_lock ) > - tmem_spin_lock(&eph_lists_spinlock); > + spin_lock(&eph_lists_spinlock); > if ( !list_empty(&pgp->us.client_eph_pages) ) > client->eph_count--; > ASSERT(client->eph_count >= 0); > @@ -562,20 +551,20 @@ static void pgp_delist(struct tmem_page_descriptor > *pgp, bool_t no_eph_lock) > ASSERT(global_eph_count >= 0); > list_del_init(&pgp->global_eph_pages); > if ( !no_eph_lock ) > - tmem_spin_unlock(&eph_lists_spinlock); > + spin_unlock(&eph_lists_spinlock); > } else { > if ( client->live_migrating ) > { > - tmem_spin_lock(&pers_lists_spinlock); > + spin_lock(&pers_lists_spinlock); > list_add_tail(&pgp->client_inv_pages, > &client->persistent_invalidated_list); > if ( pgp != pgp->us.obj->pool->cur_pgp ) > list_del_init(&pgp->us.pool_pers_pages); > - tmem_spin_unlock(&pers_lists_spinlock); > + spin_unlock(&pers_lists_spinlock); > } else { > - tmem_spin_lock(&pers_lists_spinlock); > + spin_lock(&pers_lists_spinlock); > list_del_init(&pgp->us.pool_pers_pages); > - tmem_spin_unlock(&pers_lists_spinlock); > + spin_unlock(&pers_lists_spinlock); > } > } > } > @@ -709,7 +698,7 @@ static struct tmem_object_root * obj_find(struct > tmem_pool *pool, struct oid *oi > struct tmem_object_root *obj; > > restart_find: > - tmem_read_lock(&pool->pool_rwlock); > + read_lock(&pool->pool_rwlock); > node = pool->obj_rb_root[oid_hash(oidp)].rb_node; > while ( node ) > { > @@ -717,17 +706,12 @@ restart_find: > switch ( oid_compare(&obj->oid, oidp) ) > { > case 0: /* equal */ > - if ( tmem_lock_all ) > - obj->no_evict = 1; > - else > + if ( !spin_trylock(&obj->obj_spinlock) ) > { > - if ( !tmem_spin_trylock(&obj->obj_spinlock) ) > - { > - tmem_read_unlock(&pool->pool_rwlock); > - goto restart_find; > - } > - tmem_read_unlock(&pool->pool_rwlock); > + read_unlock(&pool->pool_rwlock); > + goto restart_find; > } > + read_unlock(&pool->pool_rwlock); > return obj; > case -1: > node = node->rb_left; > @@ -736,7 +720,7 @@ restart_find: > node = node->rb_right; > } > } > - tmem_read_unlock(&pool->pool_rwlock); > + read_unlock(&pool->pool_rwlock); > return NULL; > } > > @@ -763,7 +747,7 @@ static void obj_free(struct tmem_object_root *obj, int > no_rebalance) > /* use no_rebalance only if all objects are being destroyed anyway */ > if ( !no_rebalance ) > rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]); > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > tmem_free(obj, pool); > } > > @@ -813,9 +797,8 @@ static struct tmem_object_root * obj_new(struct tmem_pool > *pool, struct oid *oid > obj->oid = *oidp; > obj->pgp_count = 0; > obj->last_client = TMEM_CLI_ID_NULL; > - tmem_spin_lock(&obj->obj_spinlock); > + spin_lock(&obj->obj_spinlock); > obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj); > - obj->no_evict = 1; > ASSERT_SPINLOCK(&obj->obj_spinlock); > return obj; > } > @@ -835,7 +818,7 @@ static void pool_destroy_objs(struct tmem_pool *pool, > bool_t selective, domid_t > struct tmem_object_root *obj; > int i; > > - tmem_write_lock(&pool->pool_rwlock); > + write_lock(&pool->pool_rwlock); > pool->is_dying = 1; > for (i = 0; i < OBJ_HASH_BUCKETS; i++) > { > @@ -843,19 +826,18 @@ static void pool_destroy_objs(struct tmem_pool *pool, > bool_t selective, domid_t > while ( node != NULL ) > { > obj = container_of(node, struct tmem_object_root, rb_tree_node); > - tmem_spin_lock(&obj->obj_spinlock); > + spin_lock(&obj->obj_spinlock); > node = rb_next(node); > - ASSERT(obj->no_evict == 0); > if ( !selective ) > /* FIXME: should be obj,1 but walking/erasing rbtree is racy > */ > obj_destroy(obj,0); > else if ( obj->last_client == cli_id ) > obj_destroy(obj,0); > else > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > } > } > - tmem_write_unlock(&pool->pool_rwlock); > + write_unlock(&pool->pool_rwlock); > } > > > @@ -1114,9 +1096,7 @@ static bool_t tmem_try_to_evict_pgp(struct > tmem_page_descriptor *pgp, bool_t *ho > > if ( pool->is_dying ) > return 0; > - if ( tmem_lock_all && !obj->no_evict ) > - return 1; > - if ( tmem_spin_trylock(&obj->obj_spinlock) ) > + if ( spin_trylock(&obj->obj_spinlock) ) > { > if ( tmem_dedup_enabled() ) > { > @@ -1124,7 +1104,7 @@ static bool_t tmem_try_to_evict_pgp(struct > tmem_page_descriptor *pgp, bool_t *ho > if ( firstbyte == NOT_SHAREABLE ) > goto obj_unlock; > ASSERT(firstbyte < 256); > - if ( !tmem_write_trylock(&pcd_tree_rwlocks[firstbyte]) ) > + if ( !write_trylock(&pcd_tree_rwlocks[firstbyte]) ) > goto obj_unlock; > if ( pgp->pcd->pgp_ref_count > 1 && !pgp->eviction_attempted ) > { > @@ -1138,15 +1118,15 @@ static bool_t tmem_try_to_evict_pgp(struct > tmem_page_descriptor *pgp, bool_t *ho > } > if ( obj->pgp_count > 1 ) > return 1; > - if ( tmem_write_trylock(&pool->pool_rwlock) ) > + if ( write_trylock(&pool->pool_rwlock) ) > { > *hold_pool_rwlock = 1; > return 1; > } > pcd_unlock: > - tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); > + write_unlock(&pcd_tree_rwlocks[firstbyte]); > obj_unlock: > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > } > return 0; > } > @@ -1160,7 +1140,7 @@ static int tmem_evict(void) > int ret = 0; > bool_t hold_pool_rwlock = 0; > > - tmem_spin_lock(&eph_lists_spinlock); > + spin_lock(&eph_lists_spinlock); > if ( (client != NULL) && client_over_quota(client) && > !list_empty(&client->ephemeral_page_list) ) > { > @@ -1182,7 +1162,6 @@ found: > ASSERT(pgp != NULL); > obj = pgp->us.obj; > ASSERT(obj != NULL); > - ASSERT(obj->no_evict == 0); > ASSERT(obj->pool != NULL); > pool = obj->pool; > > @@ -1201,13 +1180,13 @@ found: > obj_free(obj,0); > } > else > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > if ( hold_pool_rwlock ) > - tmem_write_unlock(&pool->pool_rwlock); > + write_unlock(&pool->pool_rwlock); > ret = 1; > > out: > - tmem_spin_unlock(&eph_lists_spinlock); > + spin_unlock(&eph_lists_spinlock); > return ret; > } > > @@ -1348,8 +1327,7 @@ done: > /* successfully replaced data, clean up and return success */ > if ( is_shared(pool) ) > obj->last_client = client->cli_id; > - obj->no_evict = 0; > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > return 1; > > failed_dup: > @@ -1362,12 +1340,11 @@ cleanup: > pgp_delete(pgpfound,0); > if ( obj->pgp_count == 0 ) > { > - tmem_write_lock(&pool->pool_rwlock); > + write_lock(&pool->pool_rwlock); > obj_free(obj,0); > - tmem_write_unlock(&pool->pool_rwlock); > + write_unlock(&pool->pool_rwlock); > } else { > - obj->no_evict = 0; > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > } > return ret; > } > @@ -1403,14 +1380,14 @@ static int do_tmem_put(struct tmem_pool *pool, > /* no puts allowed into a frozen pool (except dup puts) */ > if ( client->frozen ) > return ret; > - tmem_write_lock(&pool->pool_rwlock); > + write_lock(&pool->pool_rwlock); > if ( (obj = obj_new(pool,oidp)) == NULL ) > { > - tmem_write_unlock(&pool->pool_rwlock); > + write_unlock(&pool->pool_rwlock); > return -ENOMEM; > } > newobj = 1; > - tmem_write_unlock(&pool->pool_rwlock); > + write_unlock(&pool->pool_rwlock); > } > > /* When arrive here, we have a spinlocked obj for use */ > @@ -1463,29 +1440,28 @@ copy_uncompressed: > insert_page: > if ( !is_persistent(pool) ) > { > - tmem_spin_lock(&eph_lists_spinlock); > + spin_lock(&eph_lists_spinlock); > list_add_tail(&pgp->global_eph_pages, > &global_ephemeral_page_list); > ++global_eph_count; > list_add_tail(&pgp->us.client_eph_pages, > &client->ephemeral_page_list); > ++client->eph_count; > - tmem_spin_unlock(&eph_lists_spinlock); > + spin_unlock(&eph_lists_spinlock); > } > else > { /* is_persistent */ > - tmem_spin_lock(&pers_lists_spinlock); > + spin_lock(&pers_lists_spinlock); > list_add_tail(&pgp->us.pool_pers_pages, > &pool->persistent_page_list); > - tmem_spin_unlock(&pers_lists_spinlock); > + spin_unlock(&pers_lists_spinlock); > } > > if ( is_shared(pool) ) > obj->last_client = client->cli_id; > - obj->no_evict = 0; > > /* free the obj spinlock */ > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > return 1; > > del_pgp_from_obj: > @@ -1497,14 +1473,13 @@ free_pgp: > unlock_obj: > if ( newobj ) > { > - tmem_write_lock(&pool->pool_rwlock); > + write_lock(&pool->pool_rwlock); > obj_free(obj, 0); > - tmem_write_unlock(&pool->pool_rwlock); > + write_unlock(&pool->pool_rwlock); > } > else > { > - obj->no_evict = 0; > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > } > return ret; > } > @@ -1531,8 +1506,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct > oid *oidp, uint32_t index, > pgp = pgp_delete_from_obj(obj, index); > if ( pgp == NULL ) > { > - obj->no_evict = 0; > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > return 0; > } > ASSERT(pgp->size != -1); > @@ -1555,31 +1529,29 @@ static int do_tmem_get(struct tmem_pool *pool, struct > oid *oidp, uint32_t index, > pgp_delete(pgp,0); > if ( obj->pgp_count == 0 ) > { > - tmem_write_lock(&pool->pool_rwlock); > + write_lock(&pool->pool_rwlock); > obj_free(obj,0); > obj = NULL; > - tmem_write_unlock(&pool->pool_rwlock); > + write_unlock(&pool->pool_rwlock); > } > } else { > - tmem_spin_lock(&eph_lists_spinlock); > + spin_lock(&eph_lists_spinlock); > list_del(&pgp->global_eph_pages); > > list_add_tail(&pgp->global_eph_pages,&global_ephemeral_page_list); > list_del(&pgp->us.client_eph_pages); > > list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list); > - tmem_spin_unlock(&eph_lists_spinlock); > + spin_unlock(&eph_lists_spinlock); > obj->last_client = tmem_get_cli_id_from_current(); > } > } > if ( obj != NULL ) > { > - obj->no_evict = 0; > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > } > return 1; > > out: > - obj->no_evict = 0; > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > return rc; > } > > @@ -1594,19 +1566,17 @@ static int do_tmem_flush_page(struct tmem_pool *pool, > struct oid *oidp, uint32_t > pgp = pgp_delete_from_obj(obj, index); > if ( pgp == NULL ) > { > - obj->no_evict = 0; > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > goto out; > } > pgp_delete(pgp,0); > if ( obj->pgp_count == 0 ) > { > - tmem_write_lock(&pool->pool_rwlock); > + write_lock(&pool->pool_rwlock); > obj_free(obj,0); > - tmem_write_unlock(&pool->pool_rwlock); > + write_unlock(&pool->pool_rwlock); > } else { > - obj->no_evict = 0; > - tmem_spin_unlock(&obj->obj_spinlock); > + spin_unlock(&obj->obj_spinlock); > } > > out: > @@ -1623,9 +1593,9 @@ static int do_tmem_flush_object(struct tmem_pool *pool, > struct oid *oidp) > obj = obj_find(pool,oidp); > if ( obj == NULL ) > goto out; > - tmem_write_lock(&pool->pool_rwlock); > + write_lock(&pool->pool_rwlock); > obj_destroy(obj,0); > - tmem_write_unlock(&pool->pool_rwlock); > + write_unlock(&pool->pool_rwlock); > > out: > if ( pool->client->frozen ) > @@ -2032,7 +2002,7 @@ static int tmemc_save_get_next_page(int cli_id, > uint32_t pool_id, > if ( bufsize < pagesize + sizeof(struct tmem_handle) ) > return -ENOMEM; > > - tmem_spin_lock(&pers_lists_spinlock); > + spin_lock(&pers_lists_spinlock); > if ( list_empty(&pool->persistent_page_list) ) > { > ret = -1; > @@ -2064,7 +2034,7 @@ static int tmemc_save_get_next_page(int cli_id, > uint32_t pool_id, > ret = do_tmem_get(pool, &oid, pgp->index, 0, buf); > > out: > - tmem_spin_unlock(&pers_lists_spinlock); > + spin_unlock(&pers_lists_spinlock); > return ret; > } > > @@ -2080,7 +2050,7 @@ static int tmemc_save_get_next_inv(int cli_id, > tmem_cli_va_param_t buf, > return 0; > if ( bufsize < sizeof(struct tmem_handle) ) > return 0; > - tmem_spin_lock(&pers_lists_spinlock); > + spin_lock(&pers_lists_spinlock); > if ( list_empty(&client->persistent_invalidated_list) ) > goto out; > if ( client->cur_pgp == NULL ) > @@ -2106,7 +2076,7 @@ static int tmemc_save_get_next_inv(int cli_id, > tmem_cli_va_param_t buf, > tmem_copy_to_client_buf(buf, &h, 1); > ret = 1; > out: > - tmem_spin_unlock(&pers_lists_spinlock); > + spin_unlock(&pers_lists_spinlock); > return ret; > } > > @@ -2222,7 +2192,7 @@ long do_tmem_op(tmem_cli_op_t uops) > struct tmem_pool *pool = NULL; > struct oid *oidp; > int rc = 0; > - bool_t tmem_write_lock_set = 0, tmem_read_lock_set = 0; > + bool_t write_lock_set = 0, read_lock_set = 0; > > if ( !tmem_initialized ) > return -ENODEV; > @@ -2230,47 +2200,30 @@ long do_tmem_op(tmem_cli_op_t uops) > if ( !tmem_current_permitted() ) > return -EPERM; > > - if ( tmem_lock_all ) > - { > - if ( tmem_lock_all > 1 ) > - spin_lock_irq(&tmem_spinlock); > - else > - spin_lock(&tmem_spinlock); > - } > - > if ( client != NULL && tmem_client_is_dying(client) ) > - { > - rc = -ENODEV; > - if ( tmem_lock_all ) > - goto out; > - simple_error: > - return rc; > - } > + return -ENODEV; > > if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) ) > { > tmem_client_err("tmem: can't get tmem struct from %s\n", > tmem_client_str); > - rc = -EFAULT; > - if ( !tmem_lock_all ) > - goto simple_error; > - goto out; > + return -EFAULT; > } > > if ( op.cmd == TMEM_CONTROL ) > { > - tmem_write_lock(&tmem_rwlock); > - tmem_write_lock_set = 1; > + write_lock(&tmem_rwlock); > + write_lock_set = 1; > rc = do_tmem_control(&op); > goto out; > } else if ( op.cmd == TMEM_AUTH ) { > - tmem_write_lock(&tmem_rwlock); > - tmem_write_lock_set = 1; > + write_lock(&tmem_rwlock); > + write_lock_set = 1; > rc = tmemc_shared_pool_auth(op.u.creat.arg1,op.u.creat.uuid[0], > op.u.creat.uuid[1],op.u.creat.flags); > goto out; > } else if ( op.cmd == TMEM_RESTORE_NEW ) { > - tmem_write_lock(&tmem_rwlock); > - tmem_write_lock_set = 1; > + write_lock(&tmem_rwlock); > + write_lock_set = 1; > rc = do_tmem_new_pool(op.u.creat.arg1, op.pool_id, op.u.creat.flags, > op.u.creat.uuid[0], op.u.creat.uuid[1]); > goto out; > @@ -2279,8 +2232,8 @@ long do_tmem_op(tmem_cli_op_t uops) > /* create per-client tmem structure dynamically on first use by client */ > if ( client == NULL ) > { > - tmem_write_lock(&tmem_rwlock); > - tmem_write_lock_set = 1; > + write_lock(&tmem_rwlock); > + write_lock_set = 1; > if ( (client = client_create(tmem_get_cli_id_from_current())) == > NULL ) > { > tmem_client_err("tmem: can't create tmem structure for %s\n", > @@ -2292,18 +2245,18 @@ long do_tmem_op(tmem_cli_op_t uops) > > if ( op.cmd == TMEM_NEW_POOL || op.cmd == TMEM_DESTROY_POOL ) > { > - if ( !tmem_write_lock_set ) > + if ( !write_lock_set ) > { > - tmem_write_lock(&tmem_rwlock); > - tmem_write_lock_set = 1; > + write_lock(&tmem_rwlock); > + write_lock_set = 1; > } > } > else > { > - if ( !tmem_write_lock_set ) > + if ( !write_lock_set ) > { > - tmem_read_lock(&tmem_rwlock); > - tmem_read_lock_set = 1; > + read_lock(&tmem_rwlock); > + read_lock_set = 1; > } > if ( ((uint32_t)op.pool_id >= MAX_POOLS_PER_DOMAIN) || > ((pool = client->pools[op.pool_id]) == NULL) ) > @@ -2346,21 +2299,12 @@ long do_tmem_op(tmem_cli_op_t uops) > } > > out: > - if ( tmem_lock_all ) > - { > - if ( tmem_lock_all > 1 ) > - spin_unlock_irq(&tmem_spinlock); > - else > - spin_unlock(&tmem_spinlock); > - } else { > - if ( tmem_write_lock_set ) > - write_unlock(&tmem_rwlock); > - else if ( tmem_read_lock_set ) > - read_unlock(&tmem_rwlock); > - else > - ASSERT(0); > - } > - > + if ( write_lock_set ) > + write_unlock(&tmem_rwlock); > + else if ( read_lock_set ) > + read_unlock(&tmem_rwlock); > + else > + ASSERT(0); > return rc; > } > > @@ -2378,38 +2322,26 @@ void tmem_destroy(void *v) > return; > } > > - if ( tmem_lock_all ) > - spin_lock(&tmem_spinlock); > - else > - write_lock(&tmem_rwlock); > + write_lock(&tmem_rwlock); > > printk("tmem: flushing tmem pools for %s=%d\n", > tmem_cli_id_str, client->cli_id); > client_flush(client, 1); > > - if ( tmem_lock_all ) > - spin_unlock(&tmem_spinlock); > - else > - write_unlock(&tmem_rwlock); > + write_unlock(&tmem_rwlock); > } > > /* freezing all pools guarantees that no additional memory will be consumed > */ > void tmem_freeze_all(unsigned char key) > { > static int freeze = 0; > - > - if ( tmem_lock_all ) > - spin_lock(&tmem_spinlock); > - else > - write_lock(&tmem_rwlock); > + > + write_lock(&tmem_rwlock); > > freeze = !freeze; > tmemc_freeze_pools(TMEM_CLI_ID_NULL,freeze); > > - if ( tmem_lock_all ) > - spin_unlock(&tmem_spinlock); > - else > - write_unlock(&tmem_rwlock); > + write_unlock(&tmem_rwlock); > } > > #define MAX_EVICTS 10 /* should be variable or set via TMEMC_ ?? */ > @@ -2431,12 +2363,7 @@ void *tmem_relinquish_pages(unsigned int order, > unsigned int memflags) > } > > if ( tmem_called_from_tmem(memflags) ) > - { > - if ( tmem_lock_all ) > - spin_lock(&tmem_spinlock); > - else > - read_lock(&tmem_rwlock); > - } > + read_lock(&tmem_rwlock); > > while ( (pfp = tmem_alloc_page(NULL,1)) == NULL ) > { > @@ -2451,12 +2378,7 @@ void *tmem_relinquish_pages(unsigned int order, > unsigned int memflags) > } > > if ( tmem_called_from_tmem(memflags) ) > - { > - if ( tmem_lock_all ) > - spin_unlock(&tmem_spinlock); > - else > - read_unlock(&tmem_rwlock); > - } > + read_unlock(&tmem_rwlock); > > return pfp; > } > @@ -2482,9 +2404,8 @@ static int __init init_tmem(void) > > if ( tmem_init() ) > { > - printk("tmem: initialized comp=%d dedup=%d tze=%d global-lock=%d\n", > - tmem_compression_enabled(), tmem_dedup_enabled(), > tmem_tze_enabled(), > - tmem_lock_all); > + printk("tmem: initialized comp=%d dedup=%d tze=%d\n", > + tmem_compression_enabled(), tmem_dedup_enabled(), > tmem_tze_enabled()); > if ( > tmem_dedup_enabled()&&tmem_compression_enabled()&&tmem_tze_enabled() ) > { > tmem_tze_disable(); > diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c > index fbd1acc..bc8e249 100644 > --- a/xen/common/tmem_xen.c > +++ b/xen/common/tmem_xen.c > @@ -29,9 +29,6 @@ boolean_param("tmem_tze", opt_tmem_tze); > bool_t __read_mostly opt_tmem_shared_auth = 0; > boolean_param("tmem_shared_auth", opt_tmem_shared_auth); > > -int __read_mostly opt_tmem_lock = 0; > -integer_param("tmem_lock", opt_tmem_lock); > - > atomic_t freeable_page_count = ATOMIC_INIT(0); > > /* these are a concurrency bottleneck, could be percpu and dynamically > diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h > index 3777543..7612a3f 100644 > --- a/xen/include/xen/tmem_xen.h > +++ b/xen/include/xen/tmem_xen.h > @@ -34,11 +34,6 @@ extern spinlock_t tmem_page_list_lock; > extern unsigned long tmem_page_list_pages; > extern atomic_t freeable_page_count; > > -extern spinlock_t tmem_lock; > -extern spinlock_t tmem_spinlock; > -extern rwlock_t tmem_rwlock; > - > -extern void tmem_copy_page(char *to, char*from); > extern int tmem_init(void); > #define tmem_hash hash_long > > @@ -77,8 +72,6 @@ static inline bool_t tmem_enabled(void) > return opt_tmem; > } > > -extern int opt_tmem_lock; > - > /* > * Memory free page list management > */ > @@ -182,7 +175,6 @@ static inline unsigned long tmem_free_mb(void) > return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT); > } > > -#define tmem_lock_all opt_tmem_lock > #define tmem_called_from_tmem(_memflags) (_memflags & MEMF_tmem) > > /* "Client" (==domain) abstraction */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |