|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [UNIKRAFT/LWIP PATCH v4 3/5] uknetdev: Per device RX/TX headroom
Store required rx and tx headroom on `lwip_data`.
Signed-off-by: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>
---
uknetdev.c | 39 ++++++++++++++-------------------------
1 file changed, 14 insertions(+), 25 deletions(-)
diff --git a/uknetdev.c b/uknetdev.c
index ccb7368..ba7d41c 100644
--- a/uknetdev.c
+++ b/uknetdev.c
@@ -65,6 +65,8 @@
struct lwip_netdev_data {
uint32_t features;
struct uk_alloc *pkt_a;
+ uint16_t rx_headroom;
+ uint16_t tx_headroom;
#ifdef CONFIG_HAVE_SCHED
struct uk_thread *poll_thread; /* Thread per device */
char *_name; /* Thread name */
@@ -81,18 +83,6 @@ struct lwip_netdev_data {
*/
UK_CTASSERT((sizeof(struct lwip_netdev_data)) <=
CONFIG_UK_NETDEV_SCRATCH_SIZE);
-/*
- * Global headroom settings for buffer allocations used on receive
- * and transmit. We are taking the maximum of all uknetdev devices as
- * simplification.
- * TODO: A per-device setting might be more efficient but requires more data
- * fields for `netif->state`. For now we point directly to the according
- * `struct uk_netdev` in order to avoid another allocation for these
- * per-device fields.
- */
-static uint16_t rx_headroom = ETH_PAD_SIZE;
-static uint16_t tx_headroom = ETH_PAD_SIZE;
-
#define netif_to_uknetdev(nf) \
((struct uk_netdev *) (nf)->state)
@@ -109,7 +99,7 @@ static uint16_t netif_alloc_rxpkts(void *argp, struct
uk_netbuf *nb[],
for (i = 0; i < count; ++i) {
nb[i] = lwip_alloc_netbuf(lwip_data->pkt_a,
UKNETDEV_BUFLEN,
- rx_headroom);
+ lwip_data->rx_headroom);
if (!nb[i]) {
/* we run out of memory */
break;
@@ -139,7 +129,7 @@ static err_t uknetdev_output(struct netif *nf, struct pbuf
*p)
if (!allocation)
return ERR_MEM;
nb = uk_netbuf_prepare_buf(allocation, UKNETDEV_BUFLEN,
- tx_headroom, 0, NULL);
+ lwip_data->tx_headroom, 0, NULL);
UK_ASSERT(nb);
nb->_a = a; /* register allocator for free operation */
nb->_b = allocation;
@@ -495,21 +485,20 @@ err_t uknetdev_init(struct netif *nf)
uk_netdev_info_get(dev, &info);
if (!info.max_rx_queues || !info.max_tx_queues)
return ERR_IF;
- lwip_data->features = info.features;
- lwip_data->pkt_a = a;
+ lwip_data->features = info.features;
+#if ETH_PAD_SIZE
+ lwip_data->rx_headroom = info.nb_encap_rx + ETH_PAD_SIZE;
+ lwip_data->tx_headroom = info.nb_encap_tx + ETH_PAD_SIZE;
+#else
+ lwip_data->rx_headroom = info.nb_encap_rx;
+ lwip_data->tx_headroom = info.nb_encap_tx;
+#endif
+ lwip_data->pkt_a = a;
- /*
- * Update our global (rx|tx)_headroom setting that we use for
- * buffer allocations
- */
- rx_headroom = (rx_headroom < info.nb_encap_rx)
- ? info.nb_encap_rx : rx_headroom;
- tx_headroom = (tx_headroom < info.nb_encap_tx)
- ? info.nb_encap_tx : tx_headroom;
LWIP_DEBUGF(NETIF_DEBUG,
("%s: %c%c%u: Need headroom rx:%"PRIu16", tx:%"PRIu16"\n",
__func__, nf->name[0], nf->name[1], nf->num,
- info.nb_encap_rx, info.nb_encap_tx));
+ lwip_data->rx_headroom, lwip_data->tx_headroom));
/*
* Device configuration,
--
2.20.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |