[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT PATCH v7 1/5] lib/ukalloc: cleanup page allocation interface
Remove LIBUKALLOC_IFPAGES config option: currently unikraft does not compile without since it is required by the binary buddy allocator. Furthermore the LIBUKALLOC_IFPAGES requirement has propagated through the code, now required by uksched and plat/xen (both not even selecting LIBUKALLOC_IFPAGES). Remove uk_memalign_ifpages declaration in alloc_impl.h: this function is not defined anywhere. Instead there is a generic wrapper uk_memalign_compat sitting on top of posix_memalign. Remove uk_malloc_page and uk_free_page: these functions are little used, badly named (they have, in fact, nothing to do with malloc or free as they rely on palloc and pfree) and their benefit to code readability is unclear. Signed-off-by: Hugo Lefeuvre <hugo.lefeuvre@xxxxxxxxx> diff --git a/lib/ukalloc/Config.uk b/lib/ukalloc/Config.uk index 5878689..0de7464 100644 --- a/lib/ukalloc/Config.uk +++ b/lib/ukalloc/Config.uk @@ -5,12 +5,6 @@ menuconfig LIBUKALLOC select LIBUKDEBUG if LIBUKALLOC - config LIBUKALLOC_IFPAGES - bool "Page allocation interface" - default n - help - Provide uk_palloc() and uk_pfree() calls - config LIBUKALLOC_IFSTATS bool "Statistics interface" default n diff --git a/lib/ukalloc/alloc.c b/lib/ukalloc/alloc.c index 708bd02..56d6b7d 100644 --- a/lib/ukalloc/alloc.c +++ b/lib/ukalloc/alloc.c @@ -109,7 +109,6 @@ int uk_alloc_set_default(struct uk_alloc *a) return 0; } -#if CONFIG_LIBUKALLOC_IFPAGES static void *uk_get_real_start(const void *ptr) { void *intptr; @@ -264,8 +263,6 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a, return 0; } -#endif - void *uk_calloc_compat(struct uk_alloc *a, size_t nmemb, size_t size) { void *ptr; diff --git a/lib/ukalloc/include/uk/alloc.h b/lib/ukalloc/include/uk/alloc.h index 798d45a..9e5a411 100644 --- a/lib/ukalloc/include/uk/alloc.h +++ b/lib/ukalloc/include/uk/alloc.h @@ -68,12 +68,10 @@ typedef void* (*uk_alloc_realloc_func_t) (struct uk_alloc *a, void *ptr, size_t size); typedef void (*uk_alloc_free_func_t) (struct uk_alloc *a, void *ptr); -#if CONFIG_LIBUKALLOC_IFPAGES typedef void* (*uk_alloc_palloc_func_t) (struct uk_alloc *a, size_t order); typedef void (*uk_alloc_pfree_func_t) (struct uk_alloc *a, void *ptr, size_t order); -#endif typedef int (*uk_alloc_addmem_func_t) (struct uk_alloc *a, void *base, size_t size); #if CONFIG_LIBUKALLOC_IFSTATS @@ -90,11 +88,9 @@ struct uk_alloc { uk_alloc_memalign_func_t memalign; uk_alloc_free_func_t free; -#if CONFIG_LIBUKALLOC_IFPAGES /* page allocation interface */ uk_alloc_palloc_func_t palloc; uk_alloc_pfree_func_t pfree; -#endif #if CONFIG_LIBUKALLOC_IFSTATS /* optional interface */ uk_alloc_availmem_func_t availmem; @@ -113,6 +109,7 @@ static inline void *uk_do_malloc(struct uk_alloc *a, size_t size) UK_ASSERT(a); return a->malloc(a, size); } + static inline void *uk_malloc(struct uk_alloc *a, size_t size) { if (unlikely(!a)) { @@ -128,6 +125,7 @@ static inline void *uk_do_calloc(struct uk_alloc *a, UK_ASSERT(a); return a->calloc(a, nmemb, size); } + static inline void *uk_calloc(struct uk_alloc *a, size_t nmemb, size_t size) { @@ -144,6 +142,7 @@ static inline void *uk_do_realloc(struct uk_alloc *a, UK_ASSERT(a); return a->realloc(a, ptr, size); } + static inline void *uk_realloc(struct uk_alloc *a, void *ptr, size_t size) { if (unlikely(!a)) { @@ -159,6 +158,7 @@ static inline int uk_do_posix_memalign(struct uk_alloc *a, void **memptr, UK_ASSERT(a); return a->posix_memalign(a, memptr, align, size); } + static inline int uk_posix_memalign(struct uk_alloc *a, void **memptr, size_t align, size_t size) { @@ -175,6 +175,7 @@ static inline void *uk_do_memalign(struct uk_alloc *a, UK_ASSERT(a); return a->memalign(a, align, size); } + static inline void *uk_memalign(struct uk_alloc *a, size_t align, size_t size) { @@ -188,41 +189,36 @@ static inline void uk_do_free(struct uk_alloc *a, void *ptr) UK_ASSERT(a); a->free(a, ptr); } + static inline void uk_free(struct uk_alloc *a, void *ptr) { uk_do_free(a, ptr); } -#if CONFIG_LIBUKALLOC_IFPAGES static inline void *uk_do_palloc(struct uk_alloc *a, size_t order) { UK_ASSERT(a); return a->palloc(a, order); } + static inline void *uk_palloc(struct uk_alloc *a, size_t order) { if (unlikely(!a || !a->palloc)) return NULL; - return a->palloc(a, order); -} -static inline void *uk_malloc_page(struct uk_alloc *a) -{ - return uk_palloc(a, 0); + return uk_do_palloc(a, order); } + static inline void uk_do_pfree(struct uk_alloc *a, void *ptr, size_t order) { UK_ASSERT(a); a->pfree(a, ptr, order); } + static inline void uk_pfree(struct uk_alloc *a, void *ptr, size_t order) { uk_do_pfree(a, ptr, order); } -static inline void uk_free_page(struct uk_alloc *a, void *ptr) -{ - return uk_pfree(a, ptr, 0); -} -#endif + static inline int uk_alloc_addmem(struct uk_alloc *a, void *base, size_t size) { diff --git a/lib/ukalloc/include/uk/alloc_impl.h b/lib/ukalloc/include/uk/alloc_impl.h index 379c17b..f87feb3 100644 --- a/lib/ukalloc/include/uk/alloc_impl.h +++ b/lib/ukalloc/include/uk/alloc_impl.h @@ -55,21 +55,17 @@ int uk_alloc_register(struct uk_alloc *a); * API functionality is actually implemented. */ -#if CONFIG_LIBUKALLOC_IFPAGES /* Functions that can be used by allocators that implement palloc(), pfree() only */ void *uk_malloc_ifpages(struct uk_alloc *a, size_t size); void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size); int uk_posix_memalign_ifpages(struct uk_alloc *a, void **memptr, size_t align, size_t size); -void *uk_memalign_ifpages(struct uk_alloc *a, size_t align, size_t size); void uk_free_ifpages(struct uk_alloc *a, void *ptr); -#endif /* CONFIG_LIBUKALLOC_IFPAGES */ /* Functionality that is provided based on malloc() */ void *uk_calloc_compat(struct uk_alloc *a, size_t num, size_t len); void *uk_memalign_compat(struct uk_alloc *a, size_t align, size_t len); -#if CONFIG_LIBUKALLOC_IFPAGES /* Shortcut for doing a registration of an allocator that only * implements palloc(), pfree(), addmem() */ #define uk_alloc_init_palloc(a, palloc_func, pfree_func, addmem_func) \ @@ -86,7 +82,6 @@ void *uk_memalign_compat(struct uk_alloc *a, size_t align, size_t len); \ uk_alloc_register((a)); \ } while (0) -#endif /* CONFIG_LIBUKALLOC_IFPAGES */ #ifdef __cplusplus } diff --git a/lib/ukallocbbuddy/Config.uk b/lib/ukallocbbuddy/Config.uk index e3e74bc..c5a3025 100644 --- a/lib/ukallocbbuddy/Config.uk +++ b/lib/ukallocbbuddy/Config.uk @@ -4,4 +4,3 @@ config LIBUKALLOCBBUDDY select LIBNOLIBC if !HAVE_LIBC select LIBUKDEBUG select LIBUKALLOC - select LIBUKALLOC_IFPAGES diff --git a/plat/xen/drivers/blk/blkfront.c b/plat/xen/drivers/blk/blkfront.c index d543c4b..cd5297e 100644 --- a/plat/xen/drivers/blk/blkfront.c +++ b/plat/xen/drivers/blk/blkfront.c @@ -562,7 +562,7 @@ static int blkfront_ring_init(struct uk_blkdev_queue *queue) UK_ASSERT(queue); dev = queue->dev; - sring = uk_malloc_page(queue->a); + sring = uk_palloc(queue->a, 0); if (!sring) return -ENOMEM; @@ -587,7 +587,7 @@ static void blkfront_ring_fini(struct uk_blkdev_queue *queue) } if (queue->ring.sring != NULL) - uk_free_page(queue->a, queue->ring.sring); + uk_pfree(queue->a, queue->ring.sring, 0); } #if CONFIG_XEN_BLKFRONT_GREFPOOL diff --git a/plat/xen/gnttab.c b/plat/xen/gnttab.c index f48fa44..5963950 100644 --- a/plat/xen/gnttab.c +++ b/plat/xen/gnttab.c @@ -219,7 +219,7 @@ grant_ref_t gnttab_alloc_and_grant(void **map, struct uk_alloc *a) UK_ASSERT(map != NULL); UK_ASSERT(a != NULL); - page = uk_malloc_page(a); + page = uk_palloc(a, 0); if (page == NULL) return GRANT_INVALID_REF; diff --git a/plat/xen/x86/mm.c b/plat/xen/x86/mm.c index 94f2fe2..2dd2618 100644 --- a/plat/xen/x86/mm.c +++ b/plat/xen/x86/mm.c @@ -281,7 +281,7 @@ static pgentry_t *need_pte(unsigned long va, struct uk_alloc *a) #if defined(__x86_64__) offset = l4_table_offset(va); if (!(tab[offset] & _PAGE_PRESENT)) { - pt_pfn = virt_to_pfn(uk_malloc_page(a)); + pt_pfn = virt_to_pfn(uk_palloc(a, 0)); if (!pt_pfn) return NULL; new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME); @@ -293,7 +293,7 @@ static pgentry_t *need_pte(unsigned long va, struct uk_alloc *a) #endif offset = l3_table_offset(va); if (!(tab[offset] & _PAGE_PRESENT)) { - pt_pfn = virt_to_pfn(uk_malloc_page(a)); + pt_pfn = virt_to_pfn(uk_palloc(a, 0)); if (!pt_pfn) return NULL; new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME); @@ -304,7 +304,7 @@ static pgentry_t *need_pte(unsigned long va, struct uk_alloc *a) tab = mfn_to_virt(pt_mfn); offset = l2_table_offset(va); if (!(tab[offset] & _PAGE_PRESENT)) { - pt_pfn = virt_to_pfn(uk_malloc_page(a)); + pt_pfn = virt_to_pfn(uk_palloc(a, 0)); if (!pt_pfn) return NULL; new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME); @@ -700,10 +700,10 @@ void _arch_init_p2m(struct uk_alloc *a) if (((max_pfn - 1) >> L3_P2M_SHIFT) > 0) UK_CRASH("Error: Too many pfns.\n"); - l3_list = uk_malloc_page(a); + l3_list = uk_palloc(a, 0); for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES) { if (!(pfn % (P2M_ENTRIES * P2M_ENTRIES))) { - l2_list = uk_malloc_page(a); + l2_list = uk_palloc(a, 0); l3_list[L3_P2M_IDX(pfn)] = virt_to_mfn(l2_list); l2_list_pages[L3_P2M_IDX(pfn)] = l2_list; } -- 2.7.4 _______________________________________________ Minios-devel mailing list Minios-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/minios-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |