|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [UNIKRAFT PATCH, v2, 11/15] lib/ukring: Properly commment {ring.c, ring.h}
This commit updates the SPDX header, adds additional authorship,
and properly comments all functions and inlines.
Signed-off-by: Alexander Jung <alexander.jung@xxxxxxxxx>
---
lib/ukring/include/uk/ring.h | 257 +++++++++++++++++++++++------------
lib/ukring/ring.c | 19 ++-
2 files changed, 184 insertions(+), 92 deletions(-)
diff --git a/lib/ukring/include/uk/ring.h b/lib/ukring/include/uk/ring.h
index b51a11a..49f9dfe 100644
--- a/lib/ukring/include/uk/ring.h
+++ b/lib/ukring/include/uk/ring.h
@@ -1,12 +1,16 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+/* SPDX-License-Identifier: BSD-2-Clause-FreeBSD */
+/*
+ * Authors: Kip Macy <kmacy@xxxxxxxxxxx>
+ * Alexander Jung <alexander.jung@xxxxxxxxx>
*
- * Copyright (c) 2007-2009 Kip Macy <kmacy@xxxxxxxxxxx>
- * All rights reserved.
+ * Copyright (c) 2007-2009, Kip Macy <kmacy@xxxxxxxxxxx>
+ * 2020, NEC Laboratories Europe GmbH, NEC Corporation.
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
+ *
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
@@ -26,7 +30,11 @@
* SUCH DAMAGE.
*
* $FreeBSD$
+ */
+/*
+ * Simple ring implementation to handle object references.
*
+ * Inspired by FreeBSD and modified (commit-id: c45cce1).
*/
#ifndef __UK_RING_H__
@@ -40,6 +48,7 @@
#include <uk/plat/lcpu.h>
#include <uk/arch/atomic.h>
+
struct uk_ring {
volatile uint32_t prod_head;
volatile uint32_t prod_tail;
@@ -52,14 +61,18 @@ struct uk_ring {
int cons_mask;
#ifdef CONFIG_LIBUKDEBUG
struct uk_mutex *lock;
-#endif
+#endif /* CONFIG_LIBUKDEBUG */
void *ring[0];
};
-/*
- * multi-producer safe lock-free ring buffer enqueue
+/**
+ * Multi-producer safe lock-free ring buffer enqueue.
*
+ * @param br
+ * Reference to the ring structure.
+ * @param buf
+ * Buffer size for the ring.
*/
static __inline int
uk_ring_enqueue(struct uk_ring *r, void *buf)
@@ -70,16 +83,15 @@ uk_ring_enqueue(struct uk_ring *r, void *buf)
int i;
/*
- * Note: It is possible to encounter an mbuf that was removed
- * via drpeek(), and then re-added via drputback() and
- * trigger spurious critical messages.
+ * Note: It is possible to encounter an mbuf that was removed via drpeek(),
+ * and then re-added via drputback() and trigger a spurious panic.
*/
for (i = r->cons_head; i != r->prod_head;
i = ((i + 1) & r->cons_mask))
if (r->ring[i] == buf)
uk_pr_crit("buf=%p already enqueue at %d prod=%d cons=%d\n",
buf, i, r->prod_tail, r->cons_tail);
-#endif
+#endif /* CONFIG_LIBUKDEBUG_PRINTK_CRIT */
ukplat_lcpu_disable_irq();
@@ -102,20 +114,19 @@ uk_ring_enqueue(struct uk_ring *r, void *buf)
#ifdef CONFIG_LIBUKDEBUG_PRINTK_CRIT
if (r->ring[prod_head] != NULL)
uk_pr_crit("dangling value in enqueue\n");
-#endif
+#endif /* CONFIG_LIBUKDEBUG_PRINTK_CRIT */
r->ring[prod_head] = buf;
/*
- * If there are other enqueues in progress
- * that preceded us, we need to wait for them
- * to complete
+ * If there are other enqueues in progress that preceded us, we need to wait
+ * for them to complete.
*/
/* TODO: Provide cpu_spinwait() */
#if 0
while (r->prod_tail != prod_head)
cpu_spinwait();
-#endif
+#endif /* 0 */
ukarch_store_n(&r->prod_tail, prod_next);
ukplat_lcpu_enable_irq();
@@ -123,9 +134,12 @@ uk_ring_enqueue(struct uk_ring *r, void *buf)
return 0;
}
-/*
- * multi-consumer safe dequeue
+
+/**
+ * Multi-consumer safe dequeue.
*
+ * @param br
+ * Reference to the ring structure.
*/
static __inline void *
uk_ring_dequeue(struct uk_ring *r)
@@ -149,18 +163,17 @@ uk_ring_dequeue(struct uk_ring *r)
#ifdef CONFIG_LIBUKDEBUG
r->ring[cons_head] = NULL;
-#endif
+#endif /* CONFIG_LIBUKDEBUG */
/*
- * If there are other dequeues in progress
- * that preceded us, we need to wait for them
- * to complete
+ * If there are other dequeues in progress that preceded us, we need to wait
+ * for them to complete.
*/
/* TODO: Provide cpu_spinwait() */
#if 0
while (r->cons_tail != cons_head)
cpu_spinwait();
-#endif
+#endif /* 0 */
ukarch_store_n(&r->cons_tail, cons_next);
ukplat_lcpu_enable_irq();
@@ -168,10 +181,13 @@ uk_ring_dequeue(struct uk_ring *r)
return buf;
}
-/*
- * single-consumer dequeue
- * use where dequeue is protected by a lock
- * e.g. a network driver's tx queue lock
+
+/**
+ * Single-consumer dequeue use where dequeue is protected by a lock e.g. a
+ * network driver's tx queue lock.
+ *
+ * @param br
+ * Reference to the ring structure.
*/
static __inline void *
uk_ring_dequeue_single(struct uk_ring *r)
@@ -180,41 +196,49 @@ uk_ring_dequeue_single(struct uk_ring *r)
/* TODO: Support CPU prefetchhing. */
#if 0
uint32_t cons_next_next;
-#endif
+#endif /* 0 */
uint32_t prod_tail;
void *buf;
/*
* This is a workaround to allow using uk_ring on ARM and ARM64.
+ *
* ARM64TODO: Fix uk_ring in a generic way.
+ *
* REMARKS: It is suspected that cons_head does not require
* load_acq operation, but this change was extensively tested
* and confirmed it's working. To be reviewed once again in
* FreeBSD-12.
*
* Preventing following situation:
- * Core(0) - uk_ring_enqueue() Core(1)
- uk_ring_dequeue_single()
- * -----------------------------------------
----------------------------------------------
*
- *
cons_head = r->cons_head;
- * atomic_cmpset_acq_32(&r->prod_head, ...));
- *
buf = r->ring[cons_head]; <see <1>>
- * r->ring[prod_head] = buf;
- * atomic_store_rel_32(&r->prod_tail, ...);
- *
prod_tail = r->prod_tail;
- *
if (cons_head == prod_tail)
- *
return (NULL);
- *
<condition is false and code uses invalid(old) buf>`
+ * | uk_ring_enqueue() | uk_ring_dequeue_single()
|
+ *
|-------------------------------------------+-----------------------------|
+ * | |
|
+ * | | cons_head = r->cons_head;
|
+ * | atomic_cmpset_acq_32(&r->prod_head, ...); |
|
+ * | | buf =
r->ring[cons_head];<1>|
+ * | r->ring[prod_head] = buf; |
|
+ * | atomic_store_rel_32(&r->prod_tail, ...); |
|
+ * | | prod_tail = r->prod_tail;
|
+ * | |
|
+ * | | if (cons_head == prod_tail)
|
+ * | | return NULL;
|
+ * | |
|
+ * | | <condition is false and
|
+ * | | code uses invalid(old)
|
+ * | | buf>
|
*
- * <1> Load (on core 1) from r->ring[cons_head] can be reordered
(speculative readed) by CPU.
- */
+ * <1> Load (on core 1) from r->ring[cons_head] can be reordered (speculative
+ * readed) by CPU.
+ */
#if defined(CONFIG_ARCH_ARM_32) || defined(CONFIG_ARCH_ARM_64)
/* TODO: Provide atomic_load_acq_32() */
/* cons_head = atomic_load_acq_32(&r->cons_head); */
cons_head = &r->cons_head;
#else
cons_head = r->cons_head;
-#endif
+#endif /* defined(CONFIG_ARCH_ARM_32) || defined(CONFIG_ARCH_ARM_64) */
/* TODO: Provide atomic_load_acq_32() */
/* prod_tail = atomic_load_acq_32(&r->prod_tail); */
prod_tail = &r->prod_tail;
@@ -223,7 +247,7 @@ uk_ring_dequeue_single(struct uk_ring *r)
/* TODO: Support CPU prefetchhing. */
#if 0
cons_next_next = (cons_head + 2) & r->cons_mask;
-#endif
+#endif /* 0 */
if (cons_head == prod_tail)
return NULL;
@@ -235,7 +259,7 @@ uk_ring_dequeue_single(struct uk_ring *r)
if (cons_next_next != prod_tail)
prefetch(r->ring[cons_next_next]);
}
-#endif
+#endif /* 0 */
r->cons_head = cons_next;
buf = r->ring[cons_head];
@@ -249,16 +273,19 @@ uk_ring_dequeue_single(struct uk_ring *r)
if (r->cons_tail != cons_head)
uk_pr_crit("inconsistent list cons_tail=%d cons_head=%d\n",
r->cons_tail, cons_head);
-#endif
+#endif /* CONFIG_LIBUKDEBUG_PRINTK_CRIT */
r->cons_tail = cons_next;
return buf;
}
-/*
- * single-consumer advance after a peek
- * use where it is protected by a lock
- * e.g. a network driver's tx queue lock
+
+/**
+ * Single-consumer advance after a peek use where it is protected by a lock
e.g.
+ * a network driver's tx queue lock.
+ *
+ * @param br
+ * Reference to the ring structure.
*/
static __inline void
uk_ring_advance_single(struct uk_ring *r)
@@ -277,26 +304,27 @@ uk_ring_advance_single(struct uk_ring *r)
#ifdef CONFIG_LIBUKDEBUG
r->ring[cons_head] = NULL;
-#endif
+#endif /* CONFIG_LIBUKDEBUG */
r->cons_tail = cons_next;
}
-/*
- * Used to return a buffer (most likely already there)
- * to the top of the ring. The caller should *not*
- * have used any dequeue to pull it out of the ring
- * but instead should have used the peek() function.
- * This is normally used where the transmit queue
- * of a driver is full, and an mbuf must be returned.
- * Most likely whats in the ring-buffer is what
- * is being put back (since it was not removed), but
- * sometimes the lower transmit function may have
- * done a pullup or other function that will have
- * changed it. As an optimization we always put it
- * back (since jhb says the store is probably cheaper),
- * if we have to do a multi-queue version we will need
- * the compare and an atomic.
+
+/**
+ * Used to return a buffer (most likely already there) to the top of the ring.
+ * The caller should *not* have used any dequeue to pull it out of the ring but
+ * instead should have used the peek() function. This is normally used where
+ * the transmit queue of a driver is full, and an mbuf must be returned. Most
+ * likely whats in the ring-buffer is what is being put back (since it was not
+ * removed), but sometimes the lower transmit function may have done a pullup
or
+ * other function that will have changed it. As an optimization we always put
it
+ * back (since jhb says the store is probably cheaper), if we have to do a
+ * multi-queue version we will need the compare and an atomic.
+ *
+ * @param br
+ * Reference to the ring structure.
+ * @param new
+ * The item to be pushed back into the ring.
*/
static __inline void
uk_ring_putback_single(struct uk_ring *r, void *new)
@@ -306,10 +334,13 @@ uk_ring_putback_single(struct uk_ring *r, void *new)
r->ring[r->cons_head] = new;
}
-/*
- * return a pointer to the first entry in the ring
- * without modifying it, or NULL if the ring is empty
- * race-prone if not protected by a lock
+
+/**
+ * Return a pointer to the first entry in the ring without modifying it, or
NULL
+ * if the ring is empty race-prone if not protected by a lock.
+ *
+ * @param br
+ * Reference to the ring structure.
*/
static __inline void *
uk_ring_peek(struct uk_ring *r)
@@ -317,13 +348,12 @@ uk_ring_peek(struct uk_ring *r)
#ifdef CONFIG_LIBUKDEBUG_PRINTK_CRIT
if ((r->lock != NULL) && !uk_mutex_is_locked(r->lock))
uk_pr_crit("lock not held on single consumer dequeue\n");
-#endif
+#endif /* CONFIG_LIBUKDEBUG_PRINTK_CRIT */
/*
- * I believe it is safe to not have a memory barrier
- * here because we control cons and tail is worst case
- * a lagging indicator so we worst case we might
- * return NULL immediately after a buffer has been enqueued
+ * It is safe to not have a memory barrier here because we control cons and
+ * tail is worst case a lagging indicator so we worst case we might return
+ * NULL immediately after a buffer has been enqueued.
*/
if (r->cons_head == r->prod_tail)
return NULL;
@@ -331,6 +361,14 @@ uk_ring_peek(struct uk_ring *r)
return r->ring[r->cons_head];
}
+
+/**
+ * Single-consumer clear the pointer to the first entry of the ring, or NULL if
+ * the ring is empty.
+ *
+ * @param br
+ * Reference to the ring structure.
+ */
static __inline void *
uk_ring_peek_clear_single(struct uk_ring *r)
{
@@ -339,7 +377,7 @@ uk_ring_peek_clear_single(struct uk_ring *r)
if (!uk_mutex_is_locked(r->lock))
uk_pr_crit("lock not held on single consumer dequeue\n");
-#endif
+#endif /* CONFIG_LIBUKDEBUG_PRINTK_CRIT */
if (r->cons_head == r->prod_tail)
return NULL;
@@ -347,17 +385,15 @@ uk_ring_peek_clear_single(struct uk_ring *r)
#if defined(CONFIG_ARCH_ARM_32) || defined(CONFIG_ARCH_ARM_64)
/*
* The barrier is required there on ARM and ARM64 to ensure, that
- * r->ring[r->cons_head] will not be fetched before the above
- * condition is checked.
- * Without the barrier, it is possible, that buffer will be fetched
- * before the enqueue will put mbuf into r, then, in the meantime, the
- * enqueue will update the array and the prod_tail, and the
- * conditional check will be true, so we will return previously fetched
- * (and invalid) buffer.
+ * br->ring[br->cons_head] will not be fetched before the above condition is
+ * checked. Without the barrier, it is possible, that buffer will be fetched
+ * before the enqueue will put mbuf into br, then, in the meantime, the
+ * enqueue will update the array and the prod_tail, and the conditional check
+ * will be true, so we will return previously fetched (and invalid) buffer.
*/
/* TODO: Provide atomic_thread_fence_acq(); */
/* atomic_thread_fence_acq(); */
-#endif
+#endif /* defined(CONFIG_ARCH_ARM_32) || defined(CONFIG_ARCH_ARM_64) */
#ifdef CONFIG_LIBUKDEBUG
/*
@@ -370,21 +406,42 @@ uk_ring_peek_clear_single(struct uk_ring *r)
return ret;
#else
return r->ring[r->cons_head];
-#endif
+#endif /* CONFIG_LIBUKDEBUG */
}
+
+/**
+ * Return whether the ring buffer is full.
+ *
+ * @param br
+ * Reference to the ring structure.
+ */
static __inline int
uk_ring_full(struct uk_ring *r)
{
return ((r->prod_head + 1) & r->prod_mask) == r->cons_tail;
}
+
+/**
+ * Return whether the ring buffer is empty.
+ *
+ * @param br
+ * Reference to the ring structure.
+ */
static __inline int
uk_ring_empty(struct uk_ring *r)
{
return r->cons_head == r->prod_tail;
}
+
+/**
+ * Return the queue size in the ring buffer.
+ *
+ * @param br
+ * Reference to the ring structure.
+ */
static __inline int
uk_ring_count(struct uk_ring *r)
{
@@ -392,8 +449,32 @@ uk_ring_count(struct uk_ring *r)
& r->prod_mask;
}
-struct uk_ring *uk_ring_alloc(struct uk_alloc *a, int count, int flags,
- struct uk_mutex *);
-void uk_ring_free(struct uk_alloc *a, struct uk_ring *r);
-#endif
\ No newline at end of file
+/**
+ * Create a new ring buffer.
+ *
+ * @param count
+ * The size of the ring buffer.
+ * @param a
+ * The memory allocator to use when creating the ring buffer.
+ * @param flags
+ * Additional flags to specify to the ring.
+ * @param lock
+ * The mutex to use when debugging the ring buffer.
+ */
+struct uk_ring *
+uk_ring_alloc(struct uk_alloc *a, int count, int flags, struct uk_mutex *lock);
+
+
+/**
+ * Free the ring from use.
+ *
+ * @param br
+ * Reference to the ring structure.
+ * @param a
+ * The memory allocator to use when freeing the object.
+ */
+void
+uk_ring_free(struct uk_alloc *a, struct uk_ring *r);
+
+#endif /* __UK_RING_H__ */
diff --git a/lib/ukring/ring.c b/lib/ukring/ring.c
index db5600f..0523648 100644
--- a/lib/ukring/ring.c
+++ b/lib/ukring/ring.c
@@ -1,12 +1,16 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+/* SPDX-License-Identifier: BSD-2-Clause-FreeBSD */
+/*
+ * Authors: Kip Macy <kmacy@xxxxxxxxxxx>
+ * Alexander Jung <alexander.jung@xxxxxxxxx>
*
- * Copyright (c) 2007, 2008 Kip Macy <kmacy@xxxxxxxxxxx>
- * All rights reserved.
+ * Copyright (c) 2007-2009, Kip Macy <kmacy@xxxxxxxxxxx>
+ * 2020, NEC Laboratories Europe GmbH, NEC Corporation.
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
+ *
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
@@ -24,6 +28,13 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Simple ring implementation to handle object references.
+ *
+ * Inspired by FreeBSD and modified (commit-id: c45cce1).
*/
#include <sys/param.h>
--
2.20.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |