[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [UNIKRAFT PATCH 17/18] lib/ukalloc: Use Unikraft internal types
I have just an inline comment for this patch:
Use Unikraft-internal types for the API headers in order to have minimal
dependency to libc definitions. This is done so that libcs can integrate
`<uk/alloc.h> directly with their header files without breaking the
declaration set of libc headers.
This is done towards receiving per-library statistics
(see the following commit).
Signed-off-by: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>
---
lib/ukalloc/alloc.c | 140 ++++++++++++++--------------
lib/ukalloc/include/uk/alloc.h | 88 +++++++++--------
lib/ukalloc/include/uk/alloc_impl.h | 26 +++---
lib/ukalloc/libstats.c | 75 +++++++--------
lib/ukallocregion/region.c | 3 +
plat/common/include/pci/pci_bus.h | 2 +
plat/common/memory.c | 2 +
plat/xen/gnttab.c | 1 +
plat/xen/memory.c | 2 +
plat/xen/x86/gnttab.c | 1 +
10 files changed, 175 insertions(+), 165 deletions(-)
diff --git a/lib/ukalloc/alloc.c b/lib/ukalloc/alloc.c
index 1bf596b0..8401124d 100644
--- a/lib/ukalloc/alloc.c
+++ b/lib/ukalloc/alloc.c
@@ -53,14 +53,14 @@ int uk_alloc_register(struct uk_alloc *a)
if (!_uk_alloc_head) {
_uk_alloc_head = a;
- a->next = NULL;
+ a->next = __NULL;
return 0;
}
while (this && this->next)
this = this->next;
this->next = a;
- a->next = NULL;
+ a->next = __NULL;
return 0;
}
@@ -81,7 +81,7 @@ UK_CTASSERT(!(sizeof(struct metadata_ifpages) > METADATA_IFPAGES_SIZE_POW2));
static struct metadata_ifpages *uk_get_metadata(const void *ptr)
{
- uintptr_t metadata;
+ __uptr metadata;
/* a ptr less or equal to page size would mean that the actual allocated
* object started at 0x0, so it was NULL.
@@ -89,11 +89,11 @@ static struct metadata_ifpages *uk_get_metadata(const void *ptr)
* also imply that the actual allocated object started at 0x0 because
* we need space to store metadata.
*/
- UK_ASSERT((uintptr_t) ptr >= __PAGE_SIZE +
+ UK_ASSERT((__uptr) ptr >= __PAGE_SIZE +
sizeof(struct metadata_ifpages));
- metadata = ALIGN_DOWN((uintptr_t) ptr, (uintptr_t) __PAGE_SIZE);
- if (metadata == (uintptr_t) ptr) {
+ metadata = ALIGN_DOWN((__uptr) ptr, (__uptr) __PAGE_SIZE);
+ if (metadata == (__uptr) ptr) {
/* special case: the memory was page-aligned.
* In this case the metadata lies at the start of the
* previous page, with the rest of that page unused.
@@ -104,12 +104,12 @@ static struct metadata_ifpages *uk_get_metadata(const void *ptr)
return (struct metadata_ifpages *) metadata;
}
-static size_t uk_getmallocsize(const void *ptr)
+static __sz uk_getmallocsize(const void *ptr)
{
struct metadata_ifpages *metadata = uk_get_metadata(ptr);
- return (size_t)metadata->base + (size_t)(metadata->num_pages) *
- __PAGE_SIZE - (size_t)ptr;
+ return (__sz)metadata->base + (__sz)(metadata->num_pages) *
+ __PAGE_SIZE - (__sz)ptr;
}
/* This is a very simple, naive implementation of malloc.
@@ -124,23 +124,23 @@ static size_t uk_getmallocsize(const void *ptr)
* locking support yet. Eventually, this should probably be replaced by
* something better.
*/
-void *uk_malloc_ifpages(struct uk_alloc *a, size_t size)
+void *uk_malloc_ifpages(struct uk_alloc *a, __sz size)
{
- uintptr_t intptr;
+ __uptr intptr;
unsigned long num_pages;
struct metadata_ifpages *metadata;
- size_t realsize = sizeof(*metadata) + size;
+ __sz realsize = sizeof(*metadata) + size;
UK_ASSERT(a);
/* check for invalid size and overflow */
if (!size || realsize < size)
- return NULL;
+ return __NULL;
num_pages = size_to_num_pages(realsize);
- intptr = (uintptr_t)uk_palloc(a, num_pages);
+ intptr = (__uptr)uk_palloc(a, num_pages);
if (!intptr)
- return NULL;
+ return __NULL;
metadata = (struct metadata_ifpages *) intptr;
metadata->num_pages = num_pages;
@@ -159,15 +159,15 @@ void uk_free_ifpages(struct uk_alloc *a, void *ptr)
metadata = uk_get_metadata(ptr);
- UK_ASSERT(metadata->base != NULL);
+ UK_ASSERT(metadata->base != __NULL);
UK_ASSERT(metadata->num_pages != 0);
uk_pfree(a, metadata->base, metadata->num_pages);
}
-void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size)
+void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, __sz size)
{
void *retptr;
- size_t mallocsize;
+ __sz mallocsize;
UK_ASSERT(a);
if (!ptr)
@@ -175,12 +175,12 @@ void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size)
if (ptr && !size) {
uk_free_ifpages(a, ptr);
- return NULL;
+ return __NULL;
}
retptr = uk_malloc_ifpages(a, size);
if (!retptr)
- return NULL;
+ return __NULL;
mallocsize = uk_getmallocsize(ptr);
@@ -194,12 +194,12 @@ void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size)
}
int uk_posix_memalign_ifpages(struct uk_alloc *a,
- void **memptr, size_t align, size_t size)
+ void **memptr, __sz align, __sz size)
{
struct metadata_ifpages *metadata;
unsigned long num_pages;
- uintptr_t intptr;
- size_t realsize, padding;
+ __uptr intptr;
+ __sz realsize, padding;
UK_ASSERT(a);
if (((align - 1) & align) != 0
@@ -252,18 +252,18 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a,
return EINVAL;
num_pages = size_to_num_pages(realsize);
- intptr = (uintptr_t) uk_palloc(a, num_pages);
+ intptr = (__uptr) uk_palloc(a, num_pages);
if (!intptr)
return ENOMEM;
*memptr = (void *) ALIGN_UP(intptr + sizeof(*metadata),
- (uintptr_t) align);
+ (__uptr) align);
metadata = uk_get_metadata(*memptr);
/* check for underflow (should not happen) */
- UK_ASSERT(intptr <= (uintptr_t) metadata);
+ UK_ASSERT(intptr <= (__uptr) metadata);
metadata->num_pages = num_pages;
metadata->base = (void *) intptr;
@@ -271,7 +271,7 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a,
return 0;
}
-ssize_t uk_alloc_maxalloc_ifpages(struct uk_alloc *a)
+__ssz uk_alloc_maxalloc_ifpages(struct uk_alloc *a)
{
long num_pages;
@@ -279,12 +279,12 @@ ssize_t uk_alloc_maxalloc_ifpages(struct uk_alloc *a)
num_pages = uk_alloc_pmaxalloc(a);
if (num_pages < 0)
- return (ssize_t) num_pages;
+ return (__ssz) num_pages;
- return ((ssize_t) num_pages) << __PAGE_SHIFT;
+ return ((__ssz) num_pages) << __PAGE_SHIFT;
}
-ssize_t uk_alloc_availmem_ifpages(struct uk_alloc *a)
+__ssz uk_alloc_availmem_ifpages(struct uk_alloc *a)
{
long num_pages;
@@ -292,15 +292,15 @@ ssize_t uk_alloc_availmem_ifpages(struct uk_alloc *a)
num_pages = uk_alloc_pavail(a);
if (num_pages < 0)
- return (ssize_t) num_pages;
+ return (__ssz) num_pages;
- return ((ssize_t) num_pages) << __PAGE_SHIFT;
+ return ((__ssz) num_pages) << __PAGE_SHIFT;
}
#if CONFIG_LIBUKALLOC_IFMALLOC
struct metadata_ifmalloc {
- size_t size;
+ __sz size;
void *base;
};
@@ -309,16 +309,16 @@ UK_CTASSERT(!(sizeof(struct metadata_ifmalloc) > METADATA_IFMALLOC_SIZE_POW2));
static struct metadata_ifmalloc *uk_get_metadata_ifmalloc(const void *ptr)
{
- return (struct metadata_ifmalloc *)((uintptr_t) ptr -
+ return (struct metadata_ifmalloc *)((__uptr) ptr -
METADATA_IFMALLOC_SIZE_POW2);
}
-static size_t uk_getmallocsize_ifmalloc(const void *ptr)
+static __sz uk_getmallocsize_ifmalloc(const void *ptr)
{
struct metadata_ifmalloc *metadata = uk_get_metadata_ifmalloc(ptr);
- return (size_t) ((uintptr_t) metadata->base + metadata->size -
- (uintptr_t) ptr);
+ return (__sz) ((__uptr) metadata->base + metadata->size -
+ (__uptr) ptr);
}
void uk_free_ifmalloc(struct uk_alloc *a, void *ptr)
@@ -334,10 +334,10 @@ void uk_free_ifmalloc(struct uk_alloc *a, void *ptr)
a->free_backend(a, metadata->base);
}
-void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size)
+void *uk_malloc_ifmalloc(struct uk_alloc *a, __sz size)
{
struct metadata_ifmalloc *metadata;
- size_t realsize = size + METADATA_IFMALLOC_SIZE_POW2;
+ __sz realsize = size + METADATA_IFMALLOC_SIZE_POW2;
void *ptr;
UK_ASSERT(a);
@@ -345,23 +345,23 @@ void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size)
/* check for overflow */
if (unlikely(realsize < size))
- return NULL;
+ return __NULL;
ptr = a->malloc_backend(a, realsize);
if (!ptr)
- return NULL;
+ return __NULL;
metadata = ptr;
metadata->size = realsize;
metadata->base = ptr;
- return (void *) ((uintptr_t) ptr + METADATA_IFMALLOC_SIZE_POW2);
+ return (void *) ((__uptr) ptr + METADATA_IFMALLOC_SIZE_POW2);
}
-void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size)
+void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, __sz size)
{
void *retptr;
- size_t mallocsize;
+ __sz mallocsize;
UK_ASSERT(a);
if (!ptr)
@@ -369,12 +369,12 @@ void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size)
if (ptr && !size) {
uk_free_ifmalloc(a, ptr);
- return NULL;
+ return __NULL;
}
retptr = uk_malloc_ifmalloc(a, size);
if (!retptr)
- return NULL;
+ return __NULL;
mallocsize = uk_getmallocsize_ifmalloc(ptr);
@@ -385,11 +385,11 @@ void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size)
}
int uk_posix_memalign_ifmalloc(struct uk_alloc *a,
- void **memptr, size_t align, size_t size)
+ void **memptr, __sz align, __sz size)
{
struct metadata_ifmalloc *metadata;
- size_t realsize, padding;
- uintptr_t intptr;
+ __sz realsize, padding;
+ __uptr intptr;
UK_ASSERT(a);
if (((align - 1) & align) != 0
@@ -417,18 +417,18 @@ int uk_posix_memalign_ifmalloc(struct uk_alloc *a,
if (unlikely(realsize < size))
return ENOMEM;
- intptr = (uintptr_t) a->malloc_backend(a, realsize);
+ intptr = (__uptr) a->malloc_backend(a, realsize);
if (!intptr)
return ENOMEM;
*memptr = (void *) ALIGN_UP(intptr + METADATA_IFMALLOC_SIZE_POW2,
- (uintptr_t) align);
+ (__uptr) align);
metadata = uk_get_metadata_ifmalloc(*memptr);
/* check for underflow */
- UK_ASSERT(intptr <= (uintptr_t) metadata);
+ UK_ASSERT(intptr <= (__uptr) metadata);
metadata->size = realsize;
metadata->base = (void *) intptr;
@@ -456,16 +456,16 @@ void *uk_palloc_compat(struct uk_alloc *a, unsigned long num_pages)
UK_ASSERT(a);
/* check for overflow */
- if (num_pages > (~(size_t)0)/__PAGE_SIZE)
- return NULL;
+ if (num_pages > (~(__sz)0)/__PAGE_SIZE)
+ return __NULL;
if (uk_posix_memalign(a, &ptr, __PAGE_SIZE, num_pages * __PAGE_SIZE))
- return NULL;
+ return __NULL;
return ptr;
}
-void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size)
+void *uk_realloc_compat(struct uk_alloc *a, void *ptr, __sz size)
{
void *retptr;
@@ -475,12 +475,12 @@ void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size)
if (ptr && !size) {
uk_free(a, ptr);
- return NULL;
+ return __NULL;
}
retptr = uk_malloc(a, size);
if (!retptr)
- return NULL;
+ return __NULL;
memcpy(retptr, ptr, size);
@@ -488,38 +488,38 @@ void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size)
return retptr;
}
-void *uk_calloc_compat(struct uk_alloc *a, size_t nmemb, size_t size)
+void *uk_calloc_compat(struct uk_alloc *a, __sz nmemb, __sz size)
{
void *ptr;
- size_t tlen = nmemb * size;
+ __sz tlen = nmemb * size;
/* check for overflow */
- if (nmemb > (~(size_t)0)/size)
- return NULL;
+ if (nmemb > (~(__sz)0)/size)
+ return __NULL;
UK_ASSERT(a);
ptr = uk_malloc(a, tlen);
if (!ptr)
- return NULL;
+ return __NULL;
memset(ptr, 0, tlen);
return ptr;
}
-void *uk_memalign_compat(struct uk_alloc *a, size_t align, size_t size)
+void *uk_memalign_compat(struct uk_alloc *a, __sz align, __sz size)
{
void *ptr;
UK_ASSERT(a);
if (uk_posix_memalign(a, &ptr, align, size) != 0)
- return NULL;
+ return __NULL;
return ptr;
}
long uk_alloc_pmaxalloc_compat(struct uk_alloc *a)
{
- ssize_t mem;
+ __ssz mem;
UK_ASSERT(a);
@@ -532,7 +532,7 @@ long uk_alloc_pmaxalloc_compat(struct uk_alloc *a)
long uk_alloc_pavail_compat(struct uk_alloc *a)
{
- ssize_t mem;
+ __ssz mem;
UK_ASSERT(a);
@@ -543,11 +543,11 @@ long uk_alloc_pavail_compat(struct uk_alloc *a)
return (long) (mem >> __PAGE_SHIFT);
}
-size_t uk_alloc_availmem_total(void)
+__sz uk_alloc_availmem_total(void)
{
struct uk_alloc *a;
- ssize_t availmem;
- size_t total;
+ __ssz availmem;
+ __sz total;
total = 0;
uk_alloc_foreach(a) {
diff --git a/lib/ukalloc/include/uk/alloc.h b/lib/ukalloc/include/uk/alloc.h
index 5392fcfe..5c6a1188 100644
--- a/lib/ukalloc/include/uk/alloc.h
+++ b/lib/ukalloc/include/uk/alloc.h
@@ -34,13 +34,11 @@
#ifndef __UK_ALLOC_H__
#define __UK_ALLOC_H__
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <errno.h>
+#include <uk/arch/types.h>
#include <uk/config.h>
#include <uk/assert.h>
#include <uk/essentials.h>
+#include <errno.h>
#ifdef __cplusplus
extern "C" {
@@ -49,15 +47,15 @@ extern "C" {
struct uk_alloc;
typedef void* (*uk_alloc_malloc_func_t)
- (struct uk_alloc *a, size_t size);
+ (struct uk_alloc *a, __sz size);
typedef void* (*uk_alloc_calloc_func_t)
- (struct uk_alloc *a, size_t nmemb, size_t size);
+ (struct uk_alloc *a, __sz nmemb, __sz size);
typedef int (*uk_alloc_posix_memalign_func_t)
- (struct uk_alloc *a, void **memptr, size_t align, size_t size);
+ (struct uk_alloc *a, void **memptr, __sz align, __sz size);
typedef void* (*uk_alloc_memalign_func_t)
- (struct uk_alloc *a, size_t align, size_t size);
+ (struct uk_alloc *a, __sz align, __sz size);
typedef void* (*uk_alloc_realloc_func_t)
- (struct uk_alloc *a, void *ptr, size_t size);
+ (struct uk_alloc *a, void *ptr, __sz size);
typedef void (*uk_alloc_free_func_t)
(struct uk_alloc *a, void *ptr);
typedef void* (*uk_alloc_palloc_func_t)
@@ -65,27 +63,27 @@ typedef void* (*uk_alloc_palloc_func_t)
typedef void (*uk_alloc_pfree_func_t)
(struct uk_alloc *a, void *ptr, unsigned long num_pages);
typedef int (*uk_alloc_addmem_func_t)
- (struct uk_alloc *a, void *base, size_t size);
-typedef ssize_t (*uk_alloc_availmem_func_t)
+ (struct uk_alloc *a, void *base, __sz size);
+typedef __ssz (*uk_alloc_availmem_func_t)
(struct uk_alloc *a);
typedef long (*uk_alloc_pavail_func_t)
(struct uk_alloc *a);
#if CONFIG_LIBUKALLOC_IFSTATS
struct uk_alloc_stats {
- size_t last_alloc_size; /* size of the last allocation */
- size_t max_alloc_size; /* biggest satisfied allocation size */
- size_t min_alloc_size; /* smallest satisfied allocation size */
+ __sz last_alloc_size; /* size of the last allocation */
+ __sz max_alloc_size; /* biggest satisfied allocation size */
+ __sz min_alloc_size; /* smallest satisfied allocation size */
- uint64_t tot_nb_allocs; /* total number of satisfied allocations */
- uint64_t tot_nb_frees; /* total number of satisfied free operations */
- int64_t cur_nb_allocs; /* current number of active allocations */
- int64_t max_nb_allocs; /* maximum number of active allocations */
+ __u64 tot_nb_allocs; /* total number of satisfied allocations */
+ __u64 tot_nb_frees; /* total number of satisfied free operations */
+ __s64 cur_nb_allocs; /* current number of active allocations */
+ __s64 max_nb_allocs; /* maximum number of active allocations */
- ssize_t cur_mem_use; /* current used memory by allocations */
- ssize_t max_mem_use; /* maximum amount of memory used by allocations */
+ __ssz cur_mem_use; /* current used memory by allocations */
+ __ssz max_mem_use; /* maximum amount of memory used by allocations */
- uint64_t nb_enomem; /* number of times failing allocation requests */
+ __u64 nb_enomem; /* number of times failing allocation requests */
};
#endif /* CONFIG_LIBUKALLOC_IFSTATS */
@@ -120,7 +118,7 @@ struct uk_alloc {
/* internal */
struct uk_alloc *next;
- int8_t priv[];
+ __u8 priv[];
};
extern struct uk_alloc *_uk_alloc_head;
@@ -128,7 +126,7 @@ extern struct uk_alloc *_uk_alloc_head;
/* Iterate over all registered allocators */
#define uk_alloc_foreach(iter) \
for (iter = _uk_alloc_head; \
- iter != NULL; \
+ iter != __NULL; \
iter = iter->next)
#if CONFIG_LIBUKALLOC_IFSTATS_PERLIB
@@ -141,34 +139,34 @@ static inline struct uk_alloc *uk_alloc_get_default(void)
#endif /* !CONFIG_LIBUKALLOC_IFSTATS_PERLIB */
/* wrapper functions */
-static inline void *uk_do_malloc(struct uk_alloc *a, size_t size)
+static inline void *uk_do_malloc(struct uk_alloc *a, __sz size)
{
UK_ASSERT(a);
return a->malloc(a, size);
}
-static inline void *uk_malloc(struct uk_alloc *a, size_t size)
+static inline void *uk_malloc(struct uk_alloc *a, __sz size)
{
if (unlikely(!a)) {
errno = ENOMEM;
- return NULL;
+ return __NULL;
}
return uk_do_malloc(a, size);
}
static inline void *uk_do_calloc(struct uk_alloc *a,
- size_t nmemb, size_t size)
+ __sz nmemb, __sz size)
{
UK_ASSERT(a);
return a->calloc(a, nmemb, size);
}
static inline void *uk_calloc(struct uk_alloc *a,
- size_t nmemb, size_t size)
+ __sz nmemb, __sz size)
{
if (unlikely(!a)) {
errno = ENOMEM;
- return NULL;
+ return __NULL;
}
return uk_do_calloc(a, nmemb, size);
}
@@ -177,50 +175,50 @@ static inline void *uk_calloc(struct uk_alloc *a,
#define uk_zalloc(a, size) uk_calloc((a), 1, (size))
static inline void *uk_do_realloc(struct uk_alloc *a,
- void *ptr, size_t size)
+ void *ptr, __sz size)
{
UK_ASSERT(a);
return a->realloc(a, ptr, size);
}
-static inline void *uk_realloc(struct uk_alloc *a, void *ptr, size_t size)
+static inline void *uk_realloc(struct uk_alloc *a, void *ptr, __sz size)
{
if (unlikely(!a)) {
errno = ENOMEM;
- return NULL;
+ return __NULL;
}
return uk_do_realloc(a, ptr, size);
}
static inline int uk_do_posix_memalign(struct uk_alloc *a, void **memptr,
- size_t align, size_t size)
+ __sz align, __sz size)
{
UK_ASSERT(a);
return a->posix_memalign(a, memptr, align, size);
}
static inline int uk_posix_memalign(struct uk_alloc *a, void **memptr,
- size_t align, size_t size)
+ __sz align, __sz size)
{
if (unlikely(!a)) {
- *memptr = NULL;
+ *memptr = __NULL;
return ENOMEM;
}
return uk_do_posix_memalign(a, memptr, align, size);
}
static inline void *uk_do_memalign(struct uk_alloc *a,
- size_t align, size_t size)
+ __sz align, __sz size)
{
UK_ASSERT(a);
return a->memalign(a, align, size);
}
static inline void *uk_memalign(struct uk_alloc *a,
- size_t align, size_t size)
+ __sz align, __sz size)
{
if (unlikely(!a))
- return NULL;
+ return __NULL;
return uk_do_memalign(a, align, size);
}
@@ -244,7 +242,7 @@ static inline void *uk_do_palloc(struct uk_alloc *a, unsigned long num_pages)
static inline void *uk_palloc(struct uk_alloc *a, unsigned long num_pages)
{
if (unlikely(!a || !a->palloc))
- return NULL;
+ return __NULL;
return uk_do_palloc(a, num_pages);
}
@@ -262,7 +260,7 @@ static inline void uk_pfree(struct uk_alloc *a, void *ptr,
}
static inline int uk_alloc_addmem(struct uk_alloc *a, void *base,
- size_t size)
+ __sz size)
{
UK_ASSERT(a);
if (a->addmem)
@@ -272,11 +270,11 @@ static inline int uk_alloc_addmem(struct uk_alloc *a, void *base,
}
/* current biggest allocation request possible */
-static inline ssize_t uk_alloc_maxalloc(struct uk_alloc *a)
+static inline __ssz uk_alloc_maxalloc(struct uk_alloc *a)
{
UK_ASSERT(a);
if (!a->maxalloc)
- return (ssize_t) -ENOTSUP;
+ return (__ssz) -ENOTSUP;
return a->maxalloc(a);
}
@@ -289,11 +287,11 @@ static inline long uk_alloc_pmaxalloc(struct uk_alloc *a)
}
/* total free memory of the allocator */
-static inline ssize_t uk_alloc_availmem(struct uk_alloc *a)
+static inline __ssz uk_alloc_availmem(struct uk_alloc *a)
{
UK_ASSERT(a);
if (!a->availmem)
- return (ssize_t) -ENOTSUP;
+ return (__ssz) -ENOTSUP;
return a->availmem(a);
}
@@ -305,7 +303,7 @@ static inline long uk_alloc_pavail(struct uk_alloc *a)
return a->pavail(a);
}
-size_t uk_alloc_availmem_total(void);
+__sz uk_alloc_availmem_total(void);
unsigned long uk_alloc_pavail_total(void);
diff --git a/lib/ukalloc/include/uk/alloc_impl.h b/lib/ukalloc/include/uk/alloc_impl.h
index 961e36b5..1db4b5fe 100644
--- a/lib/ukalloc/include/uk/alloc_impl.h
+++ b/lib/ukalloc/include/uk/alloc_impl.h
@@ -57,26 +57,26 @@ int uk_alloc_register(struct uk_alloc *a);
/* Functions that can be used by allocators that implement palloc(),
* pfree() and potentially pavail(), pmaxalloc() only
*/
-void *uk_malloc_ifpages(struct uk_alloc *a, size_t size);
-void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size);
+void *uk_malloc_ifpages(struct uk_alloc *a, __sz size);
+void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, __sz size);
int uk_posix_memalign_ifpages(struct uk_alloc *a, void **memptr,
- size_t align, size_t size);
+ __sz align, __sz size);
void uk_free_ifpages(struct uk_alloc *a, void *ptr);
-ssize_t uk_alloc_availmem_ifpages(struct uk_alloc *a);
-ssize_t uk_alloc_maxalloc_ifpages(struct uk_alloc *a);
+__ssz uk_alloc_availmem_ifpages(struct uk_alloc *a);
+__ssz uk_alloc_maxalloc_ifpages(struct uk_alloc *a);
#if CONFIG_LIBUKALLOC_IFMALLOC
-void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size);
-void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size);
+void *uk_malloc_ifmalloc(struct uk_alloc *a, __sz size);
+void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, __sz size);
int uk_posix_memalign_ifmalloc(struct uk_alloc *a, void **memptr,
- size_t align, size_t size);
+ __sz align, __sz size);
void uk_free_ifmalloc(struct uk_alloc *a, void *ptr);
#endif
/* Functionality that is provided based on malloc() and posix_memalign() */
-void *uk_calloc_compat(struct uk_alloc *a, size_t num, size_t len);
-void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size);
-void *uk_memalign_compat(struct uk_alloc *a, size_t align, size_t len);
+void *uk_calloc_compat(struct uk_alloc *a, __sz num, __sz len);
+void *uk_realloc_compat(struct uk_alloc *a, void *ptr, __sz size);
+void *uk_memalign_compat(struct uk_alloc *a, __sz align, __sz len);
void *uk_palloc_compat(struct uk_alloc *a, unsigned long num_pages);
void uk_pfree_compat(struct uk_alloc *a, void *ptr, unsigned long num_pages);
long uk_alloc_pavail_compat(struct uk_alloc *a);
@@ -110,7 +110,7 @@ static inline void __uk_alloc_stats_refresh_minmax(struct uk_alloc_stats *stats)
}
static inline void _uk_alloc_stats_count_alloc(struct uk_alloc_stats *stats,
- void *ptr, size_t size)
+ void *ptr, __sz size)
{
/* TODO: SMP safety */
uk_preempt_disable();
@@ -128,7 +128,7 @@ static inline void _uk_alloc_stats_count_alloc(struct uk_alloc_stats *stats,
}
static inline void _uk_alloc_stats_count_free(struct uk_alloc_stats *stats,
- void *ptr, size_t size)
+ void *ptr, __sz size)
{
uk_preempt_disable();
if (likely(ptr)) {
diff --git a/lib/ukalloc/libstats.c b/lib/ukalloc/libstats.c
index 0424e8f5..d49d9dcc 100644
--- a/lib/ukalloc/libstats.c
+++ b/lib/ukalloc/libstats.c
@@ -53,10 +53,10 @@ static inline struct uk_alloc *_uk_alloc_get_actual_default(void)
}
#define WATCH_STATS_START(p) \
- ssize_t _before_mem_use; \
- size_t _before_nb_allocs; \
- size_t _before_tot_nb_allocs; \
- size_t _before_nb_enomem; \
+ __ssz _before_mem_use; \
+ __sz _before_nb_allocs; \
+ __sz _before_tot_nb_allocs; \
+ __sz _before_nb_enomem; \
\
uk_preempt_disable(); \
_before_mem_use = (p)->_stats.cur_mem_use; \
@@ -66,8 +66,8 @@ static inline struct uk_alloc *_uk_alloc_get_actual_default(void)
#define WATCH_STATS_END(p, nb_allocs_diff, nb_enomem_diff, \
mem_use_diff, alloc_size) \
- size_t _nb_allocs = (p)->_stats.tot_nb_allocs \
- - _before_tot_nb_allocs; \
+ __sz _nb_allocs = (p)->_stats.tot_nb_allocs \
+ - _before_tot_nb_allocs; \
\
/* NOTE: We assume that an allocator call does at
* most one allocation. Otherwise we cannot currently
@@ -77,9 +77,9 @@ static inline struct uk_alloc *_uk_alloc_get_actual_default(void)
\
*(mem_use_diff) = (p)->_stats.cur_mem_use \
- _before_mem_use; \
- *(nb_allocs_diff) = (ssize_t) (p)->_stats.cur_nb_allocs \
+ *(nb_allocs_diff) = (__ssz) (p)->_stats.cur_nb_allocs \
- _before_nb_allocs; \
- *(nb_enomem_diff) = (ssize_t) (p)->_stats.nb_enomem \
+ *(nb_enomem_diff) = (__ssz) (p)->_stats.nb_enomem \
- _before_nb_enomem; \
if (_nb_allocs > 0) \
*(alloc_size) = (p)->_stats.last_alloc_size; \
@@ -88,10 +88,10 @@ static inline struct uk_alloc *_uk_alloc_get_actual_default(void)
uk_preempt_enable();
static inline void update_stats(struct uk_alloc_stats *stats,
- ssize_t nb_allocs_diff,
- ssize_t nb_enomem_diff,
- ssize_t mem_use_diff,
- size_t last_alloc_size)
+ __ssz nb_allocs_diff,
+ __ssz nb_enomem_diff,
+ __ssz mem_use_diff,
+ __sz last_alloc_size)
{
uk_preempt_disable();
if (nb_allocs_diff >= 0)
@@ -107,11 +107,11 @@ static inline void update_stats(struct uk_alloc_stats *stats,
uk_preempt_enable();
}
-static void *wrapper_malloc(struct uk_alloc *a, size_t size)
+static void *wrapper_malloc(struct uk_alloc *a, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
@@ -125,11 +125,11 @@ static void *wrapper_malloc(struct uk_alloc *a, size_t size)
return ret;
}
-static void *wrapper_calloc(struct uk_alloc *a, size_t nmemb, size_t size)
+static void *wrapper_calloc(struct uk_alloc *a, __sz nmemb, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
@@ -138,16 +138,17 @@ static void *wrapper_calloc(struct uk_alloc *a, size_t nmemb, size_t size)
ret = uk_do_calloc(p, nmemb, size);
WATCH_STATS_END(p, &nb_allocs, &nb_enomem, &mem_use, &alloc_size);
- update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use, alloc_size);
Shouldn't we make this check for `ret` in the others wrappers as well?
+ update_stats(&a->_stats, nb_allocs, nb_enomem, mem_use,
+ ret != NULL ? alloc_size : 0);
return ret;
}
static int wrapper_posix_memalign(struct uk_alloc *a, void **memptr,
- size_t align, size_t size)
+ __sz align, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
int ret;
UK_ASSERT(p);
@@ -160,11 +161,11 @@ static int wrapper_posix_memalign(struct uk_alloc *a, void **memptr,
return ret;
}
-static void *wrapper_memalign(struct uk_alloc *a, size_t align, size_t size)
+static void *wrapper_memalign(struct uk_alloc *a, __sz align, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
@@ -177,11 +178,11 @@ static void *wrapper_memalign(struct uk_alloc *a, size_t align, size_t size)
return ret;
}
-static void *wrapper_realloc(struct uk_alloc *a, void *ptr, size_t size)
+static void *wrapper_realloc(struct uk_alloc *a, void *ptr, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
@@ -197,8 +198,8 @@ static void *wrapper_realloc(struct uk_alloc *a, void *ptr, size_t size)
static void wrapper_free(struct uk_alloc *a, void *ptr)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
UK_ASSERT(p);
@@ -212,8 +213,8 @@ static void wrapper_free(struct uk_alloc *a, void *ptr)
static void *wrapper_palloc(struct uk_alloc *a, unsigned long num_pages)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
@@ -230,8 +231,8 @@ static void wrapper_pfree(struct uk_alloc *a, void *ptr,
unsigned long num_pages)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
UK_ASSERT(p);
@@ -245,7 +246,7 @@ static void wrapper_pfree(struct uk_alloc *a, void *ptr,
/* The following interfaces do not change allocation statistics,
* this is why we just forward the calls
*/
-static int wrapper_addmem(struct uk_alloc *a __unused, void *base, size_t size)
+static int wrapper_addmem(struct uk_alloc *a __unused, void *base, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
@@ -253,7 +254,7 @@ static int wrapper_addmem(struct uk_alloc *a __unused, void *base, size_t size)
return uk_alloc_addmem(p, base, size);
}
-static size_t wrapper_maxalloc(struct uk_alloc *a __unused)
+static __ssz wrapper_maxalloc(struct uk_alloc *a __unused)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
@@ -261,7 +262,7 @@ static size_t wrapper_maxalloc(struct uk_alloc *a __unused)
return uk_alloc_maxalloc(p);
}
-static size_t wrapper_availmem(struct uk_alloc *a __unused)
+static __ssz wrapper_availmem(struct uk_alloc *a __unused)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
diff --git a/lib/ukallocregion/region.c b/lib/ukallocregion/region.c
index 963d03c8..2c5098b7 100644
--- a/lib/ukallocregion/region.c
+++ b/lib/ukallocregion/region.c
@@ -47,6 +47,9 @@
* an introduction to region-based memory management.
*/
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/types.h>
#include <uk/allocregion.h>
#include <uk/alloc_impl.h>
#include <uk/page.h> /* round_pgup() */
diff --git a/plat/common/include/pci/pci_bus.h b/plat/common/include/pci/pci_bus.h
index db406a1b..d3235528 100644
--- a/plat/common/include/pci/pci_bus.h
+++ b/plat/common/include/pci/pci_bus.h
@@ -66,6 +66,8 @@
#ifndef __UKPLAT_COMMON_PCI_BUS_H__
#define __UKPLAT_COMMON_PCI_BUS_H__
+#include <stdint.h>
+#include <stddef.h>
#include <uk/bus.h>
#include <uk/alloc.h>
#include <uk/ctors.h>
diff --git a/plat/common/memory.c b/plat/common/memory.c
index c46a4b0e..de4a38a7 100644
--- a/plat/common/memory.c
+++ b/plat/common/memory.c
@@ -34,6 +34,8 @@
#include <uk/plat/memory.h>
#include <uk/plat/common/memory.h>
+#include <uk/alloc.h>
+#include <stddef.h>
static struct uk_alloc *plat_allocator;
diff --git a/plat/xen/gnttab.c b/plat/xen/gnttab.c
index 4f28df99..fd056572 100644
--- a/plat/xen/gnttab.c
+++ b/plat/xen/gnttab.c
@@ -17,6 +17,7 @@
****************************************************************************
*/
#include <stdint.h>
+#include <stddef.h>
#ifdef DBGGNT
#include <string.h>
#endif
diff --git a/plat/xen/memory.c b/plat/xen/memory.c
index 8e7a7dda..365bbaeb 100644
--- a/plat/xen/memory.c
+++ b/plat/xen/memory.c
@@ -34,6 +34,8 @@
*/
#include <string.h>
+#include <stdint.h>
+#include <stddef.h>
#include <uk/plat/common/sections.h>
#include <common/gnttab.h>
diff --git a/plat/xen/x86/gnttab.c b/plat/xen/x86/gnttab.c
index b09d52bf..7a647481 100644
--- a/plat/xen/x86/gnttab.c
+++ b/plat/xen/x86/gnttab.c
@@ -24,6 +24,7 @@
/* Taken from Mini-OS */
#include <stdint.h>
+#include <stddef.h>
#include <uk/print.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
--
2.20.1
|