[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 12/16] tmem: cleanup: refactor the alloc/free path
There are two allocate path for each persistant and ephemeral pool. This path try to refactor those allocate/free functions with better name and more readable call layer. Also added more comment. Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx> --- xen/common/tmem.c | 112 +++++++++++++++++++++++++++++++++++++------- xen/common/tmem_xen.c | 63 ------------------------- xen/include/xen/tmem_xen.h | 20 ++------ 3 files changed, 100 insertions(+), 95 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 67fa1ee..ee61899 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -166,6 +166,13 @@ static bool_t global_shared_auth = 0; static atomic_t client_weight_total = ATOMIC_INIT(0); static int tmem_initialized = 0; +struct xmem_pool *tmem_mempool = 0; +unsigned int tmem_mempool_maxalloc = 0; + +DEFINE_SPINLOCK(tmem_page_list_lock); +PAGE_LIST_HEAD(tmem_page_list); +unsigned long tmem_page_list_pages = 0; + /************ CONCURRENCY ***********************************************/ DEFINE_RWLOCK(tmem_rwlock); static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */ @@ -177,7 +184,29 @@ static DEFINE_SPINLOCK(pers_lists_spinlock); /* global counters (should use long_atomic_t access) */ static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */ -/************ MEMORY ALLOCATION INTERFACE *****************************/ +/* + * There two types of memory allocation interfaces in tmem. + * One is based on xmem_pool and the other is used for allocate a whole page. + * Both of them are based on the lowlevel function __tmem_alloc_page/_thispool(). + * The call trace of alloc path is like below. + * Persistant pool: + * 1.tmem_malloc() + * > xmem_pool_alloc() + * > tmem_persistent_pool_page_get() + * > __tmem_alloc_page_thispool() + * 2.tmem_alloc_page() + * > __tmem_alloc_page_thispool() + * + * Ephemeral pool: + * 1.tmem_malloc() + * > xmem_pool_alloc() + * > tmem_mempool_page_get() + * > __tmem_alloc_page() + * 2.tmem_alloc_page() + * > __tmem_alloc_page() + * + * The free path is done in the same manner. + */ static void *tmem_malloc(size_t size, struct tmem_pool *pool) { void *v = NULL; @@ -209,28 +238,76 @@ static void tmem_free(void *p, struct tmem_pool *pool) } } -static struct page_info *tmem_page_alloc(struct tmem_pool *pool) +static struct page_info *tmem_alloc_page(struct tmem_pool *pool) { struct page_info *pfp = NULL; if ( pool != NULL && is_persistent(pool) ) - pfp = tmem_alloc_page_thispool(pool->client->domain); + pfp = __tmem_alloc_page_thispool(pool->client->domain); else - pfp = tmem_alloc_page(pool,0); + pfp = __tmem_alloc_page(pool,0); return pfp; } -static void tmem_page_free(struct tmem_pool *pool, struct page_info *pfp) +static void tmem_free_page(struct tmem_pool *pool, struct page_info *pfp) { ASSERT(pfp); if ( pool == NULL || !is_persistent(pool) ) - tmem_free_page(pfp); + __tmem_free_page(pfp); else - tmem_free_page_thispool(pfp); + __tmem_free_page_thispool(pfp); } -/************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/ +static noinline void *tmem_mempool_page_get(unsigned long size) +{ + struct page_info *pi; + ASSERT(size == PAGE_SIZE); + if ( (pi = __tmem_alloc_page(NULL,0)) == NULL ) + return NULL; + ASSERT(IS_VALID_PAGE(pi)); + return page_to_virt(pi); +} + +static void tmem_mempool_page_put(void *page_va) +{ + ASSERT(IS_PAGE_ALIGNED(page_va)); + __tmem_free_page(virt_to_page(page_va)); +} + +static int __init tmem_mempool_init(void) +{ + tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get, + tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE); + if ( tmem_mempool ) + tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool); + return tmem_mempool != NULL; +} + +/* persistent pools are per-domain */ +static void *tmem_persistent_pool_page_get(unsigned long size) +{ + struct page_info *pi; + struct domain *d = current->domain; + + ASSERT(size == PAGE_SIZE); + if ( (pi = __tmem_alloc_page_thispool(d)) == NULL ) + return NULL; + ASSERT(IS_VALID_PAGE(pi)); + return page_to_virt(pi); +} + +static void tmem_persistent_pool_page_put(void *page_va) +{ + struct page_info *pi; + + ASSERT(IS_PAGE_ALIGNED(page_va)); + pi = mfn_to_page(virt_to_mfn(page_va)); + ASSERT(IS_VALID_PAGE(pi)); + __tmem_free_page_thispool(pi); +} + +/************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/ #define NOT_SHAREABLE ((uint16_t)-1UL) static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp) @@ -305,7 +382,7 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool tmem_free(pcd_tze, pool); } else { /* real physical page */ - tmem_page_free(pool,pfp); + tmem_free_page(pool,pfp); } write_unlock(&pcd_tree_rwlocks[firstbyte]); } @@ -384,7 +461,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize /* match! if not compressed, free the no-longer-needed page */ /* but if compressed, data is assumed static so don't free! */ if ( cdata == NULL ) - tmem_page_free(pgp->us.obj->pool,pgp->pfp); + tmem_free_page(pgp->us.obj->pool,pgp->pfp); goto match; } } @@ -417,7 +494,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize ((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) { tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size); pcd->size = pfp_size; - tmem_page_free(pgp->us.obj->pool,pgp->pfp); + tmem_free_page(pgp->us.obj->pool,pgp->pfp); } else { pcd->pfp = pgp->pfp; pcd->size = PAGE_SIZE; @@ -486,7 +563,7 @@ static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *po else if ( pgp_size ) tmem_free(pgp->cdata, pool); else - tmem_page_free(pgp->us.obj->pool,pgp->pfp); + tmem_free_page(pgp->us.obj->pool,pgp->pfp); pgp->pfp = NULL; pgp->size = -1; } @@ -1258,7 +1335,7 @@ static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn ret = tmem_compress_from_client(cmfn, &dst, &size, clibuf); if ( ret <= 0 ) goto out; - else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) { + else if ( (size == 0) || (size >= tmem_mempool_maxalloc) ) { ret = 0; goto out; } else if ( tmem_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) { @@ -1315,7 +1392,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, copy_uncompressed: if ( pgp->pfp ) pgp_free_data(pgp, pool); - if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL ) + if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL ) goto failed_dup; pgp->size = 0; ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null); @@ -1427,7 +1504,7 @@ static int do_tmem_put(struct tmem_pool *pool, } copy_uncompressed: - if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL ) + if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL ) { ret = -ENOMEM; goto del_pgp_from_obj; @@ -2390,7 +2467,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) if ( tmem_called_from_tmem(memflags) ) read_lock(&tmem_rwlock); - while ( (pfp = tmem_alloc_page(NULL,1)) == NULL ) + while ( (pfp = __tmem_alloc_page(NULL,1)) == NULL ) { if ( (max_evictions-- <= 0) || !tmem_evict()) break; @@ -2427,6 +2504,9 @@ static int __init init_tmem(void) rwlock_init(&pcd_tree_rwlocks[i]); } + if ( !tmem_mempool_init() ) + return 0; + if ( tmem_init() ) { printk("tmem: initialized comp=%d dedup=%d tze=%d\n", diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index bc8e249..5ef131b 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -238,67 +238,7 @@ int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, return 1; } -/****************** XEN-SPECIFIC MEMORY ALLOCATION ********************/ - -struct xmem_pool *tmem_mempool = 0; -unsigned int tmem_mempool_maxalloc = 0; - -DEFINE_SPINLOCK(tmem_page_list_lock); -PAGE_LIST_HEAD(tmem_page_list); -unsigned long tmem_page_list_pages = 0; - -static noinline void *tmem_mempool_page_get(unsigned long size) -{ - struct page_info *pi; - - ASSERT(size == PAGE_SIZE); - if ( (pi = tmem_alloc_page(NULL,0)) == NULL ) - return NULL; - ASSERT(IS_VALID_PAGE(pi)); - return page_to_virt(pi); -} - -static void tmem_mempool_page_put(void *page_va) -{ - ASSERT(IS_PAGE_ALIGNED(page_va)); - tmem_free_page(virt_to_page(page_va)); -} - -static int __init tmem_mempool_init(void) -{ - tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get, - tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE); - if ( tmem_mempool ) - tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool); - return tmem_mempool != NULL; -} - -/* persistent pools are per-domain */ - -void *tmem_persistent_pool_page_get(unsigned long size) -{ - struct page_info *pi; - struct domain *d = current->domain; - - ASSERT(size == PAGE_SIZE); - if ( (pi = tmem_alloc_page_thispool(d)) == NULL ) - return NULL; - ASSERT(IS_VALID_PAGE(pi)); - return page_to_virt(pi); -} - -void tmem_persistent_pool_page_put(void *page_va) -{ - struct page_info *pi; - - ASSERT(IS_PAGE_ALIGNED(page_va)); - pi = mfn_to_page(virt_to_mfn(page_va)); - ASSERT(IS_VALID_PAGE(pi)); - tmem_free_page_thispool(pi); -} - /****************** XEN-SPECIFIC HOST INITIALIZATION ********************/ - static int dstmem_order, workmem_order; static int cpu_callback( @@ -351,9 +291,6 @@ int __init tmem_init(void) { unsigned int cpu; - if ( !tmem_mempool_init() ) - return 0; - dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES); workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS); diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index 9907575..7468c28 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -27,8 +27,6 @@ typedef uint32_t pagesize_t; /* like size_t, must handle largest PAGE_SIZE */ ((void *)((((unsigned long)addr + (PAGE_SIZE - 1)) & PAGE_MASK)) == addr) #define IS_VALID_PAGE(_pi) ( mfn_valid(page_to_mfn(_pi)) ) -extern struct xmem_pool *tmem_mempool; -extern unsigned int tmem_mempool_maxalloc; extern struct page_list_head tmem_page_list; extern spinlock_t tmem_page_list_lock; extern unsigned long tmem_page_list_pages; @@ -100,7 +98,7 @@ static inline void tmem_page_list_put(struct page_info *pi) /* * Memory allocation for persistent data */ -static inline struct page_info *tmem_alloc_page_thispool(struct domain *d) +static inline struct page_info *__tmem_alloc_page_thispool(struct domain *d) { struct page_info *pi; @@ -128,7 +126,7 @@ out: return pi; } -static inline void tmem_free_page_thispool(struct page_info *pi) +static inline void __tmem_free_page_thispool(struct page_info *pi) { struct domain *d = page_get_owner(pi); @@ -146,7 +144,7 @@ static inline void tmem_free_page_thispool(struct page_info *pi) /* * Memory allocation for ephemeral (non-persistent) data */ -static inline struct page_info *tmem_alloc_page(void *pool, int no_heap) +static inline struct page_info *__tmem_alloc_page(void *pool, int no_heap) { struct page_info *pi = tmem_page_list_get(); @@ -158,18 +156,13 @@ static inline struct page_info *tmem_alloc_page(void *pool, int no_heap) return pi; } -static inline void tmem_free_page(struct page_info *pi) +static inline void __tmem_free_page(struct page_info *pi) { ASSERT(IS_VALID_PAGE(pi)); tmem_page_list_put(pi); atomic_dec(&freeable_page_count); } -static inline unsigned int tmem_subpage_maxsize(void) -{ - return tmem_mempool_maxalloc; -} - static inline unsigned long tmem_free_mb(void) { return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT); @@ -373,17 +366,12 @@ static inline void tmem_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf, int tmem_decompress_to_client(xen_pfn_t, void *, size_t, tmem_cli_va_param_t); - int tmem_compress_from_client(xen_pfn_t, void **, size_t *, tmem_cli_va_param_t); int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t); - int tmem_copy_to_client(xen_pfn_t, struct page_info *, tmem_cli_va_param_t); - extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len); -extern void *tmem_persistent_pool_page_get(unsigned long size); -extern void tmem_persistent_pool_page_put(void *page_va); #define tmem_client_err(fmt, args...) printk(XENLOG_G_ERR fmt, ##args) #define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args) -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |