|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [UNIKRAFT PATCH, v2, 05/15] lib/ukring: Remove br_ prefix and rename br variable
This commit removes the br_ prefix from within the `uk_ring`
struct and renames all instances of the br variable to simple r.
Signed-off-by: Alexander Jung <alexander.jung@xxxxxxxxx>
---
lib/ukring/include/uk/ring.h | 178 +++++++++++++++++------------------
lib/ukring/ring.c | 18 ++--
2 files changed, 98 insertions(+), 98 deletions(-)
diff --git a/lib/ukring/include/uk/ring.h b/lib/ukring/include/uk/ring.h
index e5f5b68..26ac2f8 100644
--- a/lib/ukring/include/uk/ring.h
+++ b/lib/ukring/include/uk/ring.h
@@ -41,19 +41,19 @@
#include <uk/arch/atomic.h>
struct uk_ring {
- volatile uint32_t br_prod_head;
- volatile uint32_t br_prod_tail;
- int br_prod_size;
- int br_prod_mask;
- uint64_t br_drops;
- volatile uint32_t br_cons_head __aligned(CACHE_LINE_SIZE);
- volatile uint32_t br_cons_tail;
- int br_cons_size;
- int br_cons_mask;
+ volatile uint32_t prod_head;
+ volatile uint32_t prod_tail;
+ int prod_size;
+ int prod_mask;
+ uint64_t drops;
+ volatile uint32_t cons_head __aligned(CACHE_LINE_SIZE);
+ volatile uint32_t cons_tail;
+ int cons_size;
+ int cons_mask;
#ifdef DEBUG_BUFRING
- struct uk_mutex *br_lock;
+ struct uk_mutex *lock;
#endif
- void *br_ring[0] __aligned(CACHE_LINE_SIZE);
+ void *ring[0] __aligned(CACHE_LINE_SIZE);
};
@@ -62,7 +62,7 @@ struct uk_ring {
*
*/
static __inline int
-uk_ring_enqueue(struct uk_ring *br, void *buf)
+uk_ring_enqueue(struct uk_ring *r, void *buf)
{
uint32_t prod_head, prod_next, cons_tail;
@@ -71,50 +71,50 @@ uk_ring_enqueue(struct uk_ring *br, void *buf)
/*
* Note: It is possible to encounter an mbuf that was removed
- * via drbr_peek(), and then re-added via drbr_putback() and
+ * via drpeek(), and then re-added via drputback() and
* trigger spurious critical messages.
*/
- for (i = br->br_cons_head; i != br->br_prod_head;
- i = ((i + 1) & br->br_cons_mask))
- if (br->br_ring[i] == buf)
+ for (i = r->cons_head; i != r->prod_head;
+ i = ((i + 1) & r->cons_mask))
+ if (r->ring[i] == buf)
uk_pr_crit("buf=%p already enqueue at %d prod=%d cons=%d\n",
- buf, i, br->br_prod_tail, br->br_cons_tail);
+ buf, i, r->prod_tail, r->cons_tail);
#endif
ukplat_lcpu_disable_irq();
do {
- prod_head = br->br_prod_head;
- prod_next = (prod_head + 1) & br->br_prod_mask;
- cons_tail = br->br_cons_tail;
+ prod_head = r->prod_head;
+ prod_next = (prod_head + 1) & r->prod_mask;
+ cons_tail = r->cons_tail;
if (prod_next == cons_tail) {
rmb();
- if (prod_head == br->br_prod_head && cons_tail == br->br_cons_tail) {
- br->br_drops++;
+ if (prod_head == r->prod_head && cons_tail == r->cons_tail) {
+ r->drops++;
ukplat_lcpu_enable_irq();
return ENOBUFS;
}
continue;
}
- } while (!ukarch_compare_exchange_sync(&br->br_prod_head, prod_head,
prod_next));
+ } while (!ukarch_compare_exchange_sync(&r->prod_head, prod_head, prod_next));
#ifdef DEBUG_BUFRING
- if (br->br_ring[prod_head] != NULL)
+ if (r->ring[prod_head] != NULL)
uk_pr_crit("dangling value in enqueue\n");
#endif
- br->br_ring[prod_head] = buf;
+ r->ring[prod_head] = buf;
/*
* If there are other enqueues in progress
* that preceded us, we need to wait for them
* to complete
*/
- while (br->br_prod_tail != prod_head)
+ while (r->prod_tail != prod_head)
cpu_spinwait();
- ukarch_store_n(&br->br_prod_tail, prod_next);
+ ukarch_store_n(&r->prod_tail, prod_next);
ukplat_lcpu_enable_irq();
return 0;
@@ -125,7 +125,7 @@ uk_ring_enqueue(struct uk_ring *br, void *buf)
*
*/
static __inline void *
-uk_ring_dequeue_mc(struct uk_ring *br)
+uk_ring_dequeue_mc(struct uk_ring *r)
{
uint32_t cons_head, cons_next;
void *buf;
@@ -133,19 +133,19 @@ uk_ring_dequeue_mc(struct uk_ring *br)
ukplat_lcpu_disable_irq();
do {
- cons_head = br->br_cons_head;
- cons_next = (cons_head + 1) & br->br_cons_mask;
+ cons_head = r->cons_head;
+ cons_next = (cons_head + 1) & r->cons_mask;
- if (cons_head == br->br_prod_tail) {
+ if (cons_head == r->prod_tail) {
ukplat_lcpu_enable_irq();
return NULL;
}
- } while (!ukarch_compare_exchange_sync(&br->br_cons_head, cons_head,
cons_next));
+ } while (!ukarch_compare_exchange_sync(&r->cons_head, cons_head, cons_next));
- buf = br->br_ring[cons_head];
+ buf = r->ring[cons_head];
#ifdef DEBUG_BUFRING
- br->br_ring[cons_head] = NULL;
+ r->ring[cons_head] = NULL;
#endif
/*
@@ -153,10 +153,10 @@ uk_ring_dequeue_mc(struct uk_ring *br)
* that preceded us, we need to wait for them
* to complete
*/
- while (br->br_cons_tail != cons_head)
+ while (r->cons_tail != cons_head)
cpu_spinwait();
- ukarch_store_n(&br->br_cons_tail, cons_next);
+ ukarch_store_n(&r->cons_tail, cons_next);
ukplat_lcpu_enable_irq();
return buf;
@@ -168,7 +168,7 @@ uk_ring_dequeue_mc(struct uk_ring *br)
* e.g. a network driver's tx queue lock
*/
static __inline void *
-uk_ring_dequeue_sc(struct uk_ring *br)
+uk_ring_dequeue_sc(struct uk_ring *r)
{
uint32_t cons_head, cons_next;
#ifdef PREFETCH_DEFINED
@@ -180,7 +180,7 @@ uk_ring_dequeue_sc(struct uk_ring *br)
/*
* This is a workaround to allow using uk_ring on ARM and ARM64.
* ARM64TODO: Fix uk_ring in a generic way.
- * REMARKS: It is suspected that br_cons_head does not require
+ * REMARKS: It is suspected that cons_head does not require
* load_acq operation, but this change was extensively tested
* and confirmed it's working. To be reviewed once again in
* FreeBSD-12.
@@ -189,32 +189,32 @@ uk_ring_dequeue_sc(struct uk_ring *br)
* Core(0) - uk_ring_enqueue() Core(1)
- uk_ring_dequeue_sc()
* -----------------------------------------
----------------------------------------------
*
- *
cons_head = br->br_cons_head;
- * atomic_cmpset_acq_32(&br->br_prod_head, ...));
- *
buf = br->br_ring[cons_head]; <see <1>>
- * br->br_ring[prod_head] = buf;
- * atomic_store_rel_32(&br->br_prod_tail, ...);
- *
prod_tail = br->br_prod_tail;
+ *
cons_head = r->cons_head;
+ * atomic_cmpset_acq_32(&r->prod_head, ...));
+ *
buf = r->ring[cons_head]; <see <1>>
+ * r->ring[prod_head] = buf;
+ * atomic_store_rel_32(&r->prod_tail, ...);
+ *
prod_tail = r->prod_tail;
*
if (cons_head == prod_tail)
*
return (NULL);
*
<condition is false and code uses invalid(old) buf>`
*
- * <1> Load (on core 1) from br->br_ring[cons_head] can be reordered
(speculative readed) by CPU.
+ * <1> Load (on core 1) from r->ring[cons_head] can be reordered
(speculative readed) by CPU.
*/
#if defined(CONFIG_ARCH_ARM_32) || defined(CONFIG_ARCH_ARM_64)
/* TODO: Provide atomic_load_acq_32() */
- /* cons_head = atomic_load_acq_32(&br->br_cons_head); */
- cons_head = &br->br_cons_head;
+ /* cons_head = atomic_load_acq_32(&r->cons_head); */
+ cons_head = &r->cons_head;
#else
- cons_head = br->br_cons_head;
+ cons_head = r->cons_head;
#endif
/* TODO: Provide atomic_load_acq_32() */
- /* prod_tail = atomic_load_acq_32(&br->br_prod_tail); */
- prod_tail = &br->br_prod_tail;
+ /* prod_tail = atomic_load_acq_32(&r->prod_tail); */
+ prod_tail = &r->prod_tail;
- cons_next = (cons_head + 1) & br->br_cons_mask;
+ cons_next = (cons_head + 1) & r->cons_mask;
#ifdef PREFETCH_DEFINED
- cons_next_next = (cons_head + 2) & br->br_cons_mask;
+ cons_next_next = (cons_head + 2) & r->cons_mask;
#endif
if (cons_head == prod_tail)
@@ -222,27 +222,27 @@ uk_ring_dequeue_sc(struct uk_ring *br)
#ifdef PREFETCH_DEFINED
if (cons_next != prod_tail) {
- prefetch(br->br_ring[cons_next]);
+ prefetch(r->ring[cons_next]);
if (cons_next_next != prod_tail)
- prefetch(br->br_ring[cons_next_next]);
+ prefetch(r->ring[cons_next_next]);
}
#endif
- br->br_cons_head = cons_next;
- buf = br->br_ring[cons_head];
+ r->cons_head = cons_next;
+ buf = r->ring[cons_head];
#ifdef DEBUG_BUFRING
- br->br_ring[cons_head] = NULL;
+ r->ring[cons_head] = NULL;
if (!uk_mutex_is_locked(r->lock))
uk_pr_crit("lock not held on single consumer dequeue\n");
- if (br->br_cons_tail != cons_head)
+ if (r->cons_tail != cons_head)
uk_pr_crit("inconsistent list cons_tail=%d cons_head=%d\n",
- br->br_cons_tail, cons_head);
+ r->cons_tail, cons_head);
#endif
- br->br_cons_tail = cons_next;
+ r->cons_tail = cons_next;
return buf;
}
@@ -252,25 +252,25 @@ uk_ring_dequeue_sc(struct uk_ring *br)
* e.g. a network driver's tx queue lock
*/
static __inline void
-uk_ring_advance_sc(struct uk_ring *br)
+uk_ring_advance_sc(struct uk_ring *r)
{
uint32_t cons_head, cons_next;
uint32_t prod_tail;
- cons_head = br->br_cons_head;
- prod_tail = br->br_prod_tail;
- cons_next = (cons_head + 1) & br->br_cons_mask;
+ cons_head = r->cons_head;
+ prod_tail = r->prod_tail;
+ cons_next = (cons_head + 1) & r->cons_mask;
if (cons_head == prod_tail)
return;
- br->br_cons_head = cons_next;
+ r->cons_head = cons_next;
#ifdef DEBUG_BUFRING
- br->br_ring[cons_head] = NULL;
+ r->ring[cons_head] = NULL;
#endif
- br->br_cons_tail = cons_next;
+ r->cons_tail = cons_next;
}
/*
@@ -290,11 +290,11 @@ uk_ring_advance_sc(struct uk_ring *br)
* the compare and an atomic.
*/
static __inline void
-uk_ring_putback_sc(struct uk_ring *br, void *new)
+uk_ring_putback_sc(struct uk_ring *r, void *new)
{
/* Buffer ring has none in putback */
- UK_ASSERT(br->br_cons_head != br->br_prod_tail);
- br->br_ring[br->br_cons_head] = new;
+ UK_ASSERT(r->cons_head != r->prod_tail);
+ r->ring[r->cons_head] = new;
}
/*
@@ -303,7 +303,7 @@ uk_ring_putback_sc(struct uk_ring *br, void *new)
* race-prone if not protected by a lock
*/
static __inline void *
-uk_ring_peek(struct uk_ring *br)
+uk_ring_peek(struct uk_ring *r)
{
#ifdef DEBUG_BUFRING
if ((r->lock != NULL) && !uk_mutex_is_locked(r->lock))
@@ -316,14 +316,14 @@ uk_ring_peek(struct uk_ring *br)
* a lagging indicator so we worst case we might
* return NULL immediately after a buffer has been enqueued
*/
- if (br->br_cons_head == br->br_prod_tail)
+ if (r->cons_head == r->prod_tail)
return NULL;
- return br->br_ring[br->br_cons_head];
+ return r->ring[r->cons_head];
}
static __inline void *
-uk_ring_peek_clear_sc(struct uk_ring *br)
+uk_ring_peek_clear_sc(struct uk_ring *r)
{
#ifdef DEBUG_BUFRING
void *ret;
@@ -332,17 +332,17 @@ uk_ring_peek_clear_sc(struct uk_ring *br)
uk_pr_crit("lock not held on single consumer dequeue\n");
#endif
- if (br->br_cons_head == br->br_prod_tail)
+ if (r->cons_head == r->prod_tail)
return NULL;
#if defined(CONFIG_ARCH_ARM_32) || defined(CONFIG_ARCH_ARM_64)
/*
* The barrier is required there on ARM and ARM64 to ensure, that
- * br->br_ring[br->br_cons_head] will not be fetched before the above
+ * r->ring[r->cons_head] will not be fetched before the above
* condition is checked.
* Without the barrier, it is possible, that buffer will be fetched
- * before the enqueue will put mbuf into br, then, in the meantime, the
- * enqueue will update the array and the br_prod_tail, and the
+ * before the enqueue will put mbuf into r, then, in the meantime, the
+ * enqueue will update the array and the prod_tail, and the
* conditional check will be true, so we will return previously fetched
* (and invalid) buffer.
*/
@@ -355,36 +355,36 @@ uk_ring_peek_clear_sc(struct uk_ring *br)
* Single consumer, i.e. cons_head will not move while we are
* running, so atomic_swap_ptr() is not necessary here.
*/
- ret = br->br_ring[br->br_cons_head];
- br->br_ring[br->br_cons_head] = NULL;
+ ret = r->ring[r->cons_head];
+ r->ring[r->cons_head] = NULL;
return ret;
#else
- return br->br_ring[br->br_cons_head];
+ return r->ring[r->cons_head];
#endif
}
static __inline int
-uk_ring_full(struct uk_ring *br)
+uk_ring_full(struct uk_ring *r)
{
- return ((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail;
+ return ((r->prod_head + 1) & r->prod_mask) == r->cons_tail;
}
static __inline int
-uk_ring_empty(struct uk_ring *br)
+uk_ring_empty(struct uk_ring *r)
{
- return br->br_cons_head == br->br_prod_tail;
+ return r->cons_head == r->prod_tail;
}
static __inline int
-uk_ring_count(struct uk_ring *br)
+uk_ring_count(struct uk_ring *r)
{
- return (br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
- & br->br_prod_mask;
+ return (r->prod_size + r->prod_tail - r->cons_tail)
+ & r->prod_mask;
}
struct uk_ring *uk_ring_alloc(struct uk_alloc *a, int count, int flags,
struct uk_mutex *);
-void uk_ring_free(struct uk_alloc *a, struct uk_ring *br);
+void uk_ring_free(struct uk_alloc *a, struct uk_ring *r);
#endif
\ No newline at end of file
diff --git a/lib/ukring/ring.c b/lib/ukring/ring.c
index 658130e..1431cb9 100644
--- a/lib/ukring/ring.c
+++ b/lib/ukring/ring.c
@@ -41,7 +41,7 @@
struct uk_ring *
uk_ring_alloc(struct uk_alloc *a, int count, int flags, struct uk_mutex *lock)
{
- struct uk_ring *br;
+ struct uk_ring *r;
if (a == NULL)
a = uk_alloc_get_default();
@@ -51,25 +51,25 @@ uk_ring_alloc(struct uk_alloc *a, int count, int flags,
struct uk_mutex *lock)
r = uk_malloc(a, sizeof(struct uk_ring) + count * sizeof(caddr_t));
- if (br == NULL) {
+ if (r == NULL) {
uk_pr_err("Could not allocate ring: out of memory\n");
return NULL;
}
#ifdef DEBUG_BUFRING
- br->br_lock = lock;
+ r->lock = lock;
#endif
- br->br_prod_size = br->br_cons_size = count;
- br->br_prod_mask = br->br_cons_mask = count - 1;
- br->br_prod_head = br->br_cons_head = 0;
- br->br_prod_tail = br->br_cons_tail = 0;
+ r->prod_size = r->cons_size = count;
+ r->prod_mask = r->cons_mask = count - 1;
+ r->prod_head = r->cons_head = 0;
+ r->prod_tail = r->cons_tail = 0;
- return br;
+ return r;
}
void
uk_ring_free(struct uk_alloc *a, struct uk_ring *r)
{
- uk_free(a, br);
+ uk_free(a, r);
}
\ No newline at end of file
--
2.20.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |