|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 13/16] tmem: cleanup: __tmem_alloc_page: drop unneed parameters
On 11/23/2013 02:17 AM, Konrad Rzeszutek Wilk wrote:
> On Wed, Nov 20, 2013 at 04:46:22PM +0800, Bob Liu wrote:
>> The two parameters of __tmem_alloc_page() can be reduced.
>
> That is dangerous. The last one (no_heap) was added to guard
> against recursion. Check how tmem_alloc_page_thispool calls
> alloc_domheap_pages, whcih calls alloc_heap_pages which
> can call tmem_relinquish_pages, which can .. well, you will
> get the idea.
>
>> tmem_called_from_tmem() was also dropped by this patch.
>>
>> Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
>> ---
>> xen/common/tmem.c | 13 +++++--------
>> xen/include/xen/tmem_xen.h | 11 +++++------
>> 2 files changed, 10 insertions(+), 14 deletions(-)
>>
>> diff --git a/xen/common/tmem.c b/xen/common/tmem.c
>> index ee61899..bf0fa1b 100644
>> --- a/xen/common/tmem.c
>> +++ b/xen/common/tmem.c
>> @@ -245,7 +245,7 @@ static struct page_info *tmem_alloc_page(struct
>> tmem_pool *pool)
>> if ( pool != NULL && is_persistent(pool) )
>> pfp = __tmem_alloc_page_thispool(pool->client->domain);
>> else
>> - pfp = __tmem_alloc_page(pool,0);
>> + pfp = __tmem_alloc_page();
>> return pfp;
>> }
>>
>> @@ -263,9 +263,8 @@ static noinline void *tmem_mempool_page_get(unsigned
>> long size)
>> struct page_info *pi;
>>
>> ASSERT(size == PAGE_SIZE);
>> - if ( (pi = __tmem_alloc_page(NULL,0)) == NULL )
>> + if ( (pi = __tmem_alloc_page()) == NULL )
>> return NULL;
>> - ASSERT(IS_VALID_PAGE(pi));
>> return page_to_virt(pi);
>> }
>>
>> @@ -2450,7 +2449,6 @@ void tmem_freeze_all(unsigned char key)
>> void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
>> {
>> struct page_info *pfp;
>> - unsigned long evicts_per_relinq = 0;
>> int max_evictions = 10;
>>
>> if (!tmem_enabled() || !tmem_freeable_pages())
>> @@ -2464,14 +2462,13 @@ void *tmem_relinquish_pages(unsigned int order,
>> unsigned int memflags)
>> return NULL;
>> }
>>
>> - if ( tmem_called_from_tmem(memflags) )
>> + if ( memflags & MEMF_tmem )
>> read_lock(&tmem_rwlock);
>>
>> - while ( (pfp = __tmem_alloc_page(NULL,1)) == NULL )
>> + while ( (pfp = tmem_page_list_get()) == NULL )
>
> No no. That could lead to bad recursion. You need to pass '1' somehow.
>
I don't think so.
If pass '1' to __tmem_alloc_page(), __tmem_alloc_page() only call
tmem_page_list_get() and return.
It's no difference, so an extra parameter is unneeded.
>
>> {
>> if ( (max_evictions-- <= 0) || !tmem_evict())
>> break;
>> - evicts_per_relinq++;
>
>
> ? Should this be a seperate patch?
>
>> }
>> if ( pfp != NULL )
>> {
>> @@ -2479,7 +2476,7 @@ void *tmem_relinquish_pages(unsigned int order,
>> unsigned int memflags)
>> scrub_one_page(pfp);
>> }
>>
>> - if ( tmem_called_from_tmem(memflags) )
>> + if ( memflags & MEMF_tmem )
>
> Perhaps a comment too? /* Called recursively from within tmem. */
>
>> read_unlock(&tmem_rwlock);
>>
>> return pfp;
>> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
>> index 7468c28..c4b9f5a 100644
>> --- a/xen/include/xen/tmem_xen.h
>> +++ b/xen/include/xen/tmem_xen.h
>> @@ -144,15 +144,16 @@ static inline void __tmem_free_page_thispool(struct
>> page_info *pi)
>> /*
>> * Memory allocation for ephemeral (non-persistent) data
>> */
>> -static inline struct page_info *__tmem_alloc_page(void *pool, int no_heap)
>> +static inline struct page_info *__tmem_alloc_page(void)
>> {
>> struct page_info *pi = tmem_page_list_get();
>>
>> - if ( pi == NULL && !no_heap )
>> + if ( pi == NULL)
>> pi = alloc_domheap_pages(0,0,MEMF_tmem);
>> - ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
>> - if ( pi != NULL && !no_heap )
>> +
>> + if ( pi )
>> atomic_inc(&freeable_page_count);
>> + ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
>> return pi;
>> }
>>
See __tmem_alloc_page() implementation here.
--
Regards,
-Bob
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |