[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT/LWIP PATCH 3/3] Introduce netbuf allocation helper
On Xen packet buffers need to be page aligned. Therefore we add support for allocating buffer data with alignment. Because packet buffer sizes and alignments may differ from one device to another, we introduce the netbuf allocation helper structure which encapsulates the information required for allocating packet buffers for each network device. We also set ETH_PAD_SIZE to 0 considering that padding and alignment is handled according to each driver capabilities. Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx> --- include/arch/cc.h | 2 +- netbuf.c | 8 +++-- netbuf.h | 2 +- uknetdev.c | 87 +++++++++++++++++++++++++++++++++++++++---------------- 4 files changed, 69 insertions(+), 30 deletions(-) diff --git a/include/arch/cc.h b/include/arch/cc.h index a1d0c34..a5b91b5 100644 --- a/include/arch/cc.h +++ b/include/arch/cc.h @@ -51,7 +51,7 @@ /* 32 bit checksum calculation */ #define LWIP_CHKSUM_ALGORITHM 3 -#define ETH_PAD_SIZE 2 +#define ETH_PAD_SIZE 0 /* rand */ #define LWIP_RAND() uk_swrand_randr() diff --git a/netbuf.c b/netbuf.c index 4878f74..c8c02bf 100644 --- a/netbuf.c +++ b/netbuf.c @@ -50,14 +50,16 @@ static void _netbuf_free(struct pbuf *p) } struct uk_netbuf *lwip_alloc_netbuf(struct uk_alloc *a, size_t alloc_size, - size_t headroom) + size_t alloc_align, size_t headroom) { void *allocation; struct uk_netbuf *b; struct _netbuf_pbuf *np; + int rc; - allocation = uk_malloc(a, alloc_size); - if (unlikely(!allocation)) + rc = uk_posix_memalign(a, (void **) &allocation, alloc_align, + alloc_size); + if (unlikely(rc)) goto err_out; b = uk_netbuf_prepare_buf(allocation, alloc_size, diff --git a/netbuf.h b/netbuf.h index 1492065..d21e084 100644 --- a/netbuf.h +++ b/netbuf.h @@ -61,7 +61,7 @@ struct _netbuf_pbuf { * before handing over the embedded pbuf to the network stack. */ struct uk_netbuf *lwip_alloc_netbuf(struct uk_alloc *a, size_t alloc_size, - size_t headroom); + size_t alloc_align, size_t headroom); /** * Returns the reference of the embedded pbuf of a netbuf diff --git a/uknetdev.c b/uknetdev.c index 8078ee1..46aca39 100644 --- a/uknetdev.c +++ b/uknetdev.c @@ -57,7 +57,6 @@ #include <uk/essentials.h> #define UKNETDEV_BPS 1000000000u -#define UKNETDEV_BUFLEN 2048 #define UKNETDEV_NETIF_NAME0 'e' #define UKNETDEV_NETIF_NAME1 'n' @@ -74,25 +73,67 @@ static uint16_t rx_headroom = ETH_PAD_SIZE; static uint16_t tx_headroom = ETH_PAD_SIZE; +/* Helper structure for allocating packet buffers */ +struct netbuf_alloc_helper { + struct uk_alloc *allocator; /* Packet allocator */ + uint16_t tx_headroom; /* Tx buffer headroom */ + uint16_t rx_headroom; /* Rx buffer headroom */ + uint16_t tx_size; /* Tx buffer size */ + uint16_t rx_size; /* Rx buffer size */ + uint16_t data_align; /* Alignment for packet data */ +}; + +static void netbuf_alloc_helper_init(struct netbuf_alloc_helper *nah, + struct uk_alloc *a, struct uk_netdev_info *dev_info) +{ + uint16_t buf_data_size; + + nah->allocator = a; + nah->tx_headroom = MAX(tx_headroom, dev_info->nb_encap_tx); + nah->rx_headroom = MAX(rx_headroom, dev_info->nb_encap_rx); + + /* Packet data size may be aligned */ + buf_data_size = ALIGN_UP(dev_info->max_mtu, dev_info->align); + + nah->tx_size = sizeof(struct uk_netbuf) + nah->tx_headroom; + /* If buffer data is aligned then buffer header is aligned */ + nah->tx_size = ALIGN_UP(nah->tx_size, dev_info->align); + /* If buffer data is aligned then headroom expands */ + nah->tx_headroom = nah->tx_size - sizeof(struct uk_netbuf); + nah->tx_size += buf_data_size; + + nah->rx_size = sizeof(struct uk_netbuf) + sizeof(struct _netbuf_pbuf) + + nah->rx_headroom; + nah->rx_size = ALIGN_UP(nah->rx_size, dev_info->align); + nah->rx_headroom = nah->rx_size - sizeof(struct uk_netbuf) - + sizeof(struct _netbuf_pbuf); + nah->rx_size += buf_data_size; + + nah->data_align = dev_info->align; +} + struct uk_netif_state { struct uk_netdev *netdev; + struct netbuf_alloc_helper alloc_helper; }; #define netif_to_uknetdev(nf) \ (((struct uk_netif_state *) (nf)->state)->netdev) +#define netif_to_alloc_helper(nf) \ + (&((struct uk_netif_state *) (nf)->state)->alloc_helper) static uint16_t netif_alloc_rxpkts(void *argp, struct uk_netbuf *nb[], uint16_t count) { - struct uk_alloc *a; + struct netbuf_alloc_helper *nah; uint16_t i; UK_ASSERT(argp); - - a = (struct uk_alloc *) argp; + nah = (struct netbuf_alloc_helper *) argp; for (i = 0; i < count; ++i) { - nb[i] = lwip_alloc_netbuf(a, UKNETDEV_BUFLEN, rx_headroom); + nb[i] = lwip_alloc_netbuf(nah->allocator, nah->rx_size, + nah->data_align, nah->rx_headroom); if (!nb[i]) { /* we run out of memory */ break; @@ -104,7 +145,7 @@ static uint16_t netif_alloc_rxpkts(void *argp, struct uk_netbuf *nb[], static err_t uknetdev_output(struct netif *nf, struct pbuf *p) { - struct uk_alloc *a; + struct netbuf_alloc_helper *nah; struct uk_netdev *dev; struct pbuf *q; struct uk_netbuf *nb; @@ -115,18 +156,17 @@ static err_t uknetdev_output(struct netif *nf, struct pbuf *p) UK_ASSERT(nf); dev = netif_to_uknetdev(nf); UK_ASSERT(dev); + nah = netif_to_alloc_helper(nf); + UK_ASSERT(nah); - a = uk_alloc_get_default(); - if (!a) - return ERR_MEM; - - allocation = uk_malloc(a, UKNETDEV_BUFLEN); - if (!allocation) + ret = uk_posix_memalign(nah->allocator, (void **) &allocation, + nah->data_align, nah->tx_size); + if (unlikely(ret)) return ERR_MEM; - nb = uk_netbuf_prepare_buf(allocation, UKNETDEV_BUFLEN, - tx_headroom, 0, NULL); + nb = uk_netbuf_prepare_buf(allocation, nah->tx_size, + nah->tx_headroom, 0, NULL); UK_ASSERT(nb); - nb->_a = a; /* register allocator for free operation */ + nb->_a = nah->allocator; /* register allocator for free operation */ if (unlikely(p->tot_len > uk_netbuf_tailroom(nb))) { LWIP_DEBUGF(NETIF_DEBUG, @@ -356,12 +396,15 @@ err_t uknetdev_init(struct netif *nf) struct uk_netdev_txqueue_conf txq_conf; struct uk_netdev_info info; const struct uk_hwaddr *hwaddr; + struct netbuf_alloc_helper *nah; unsigned int i; int ret; UK_ASSERT(nf); dev = netif_to_uknetdev(nf); UK_ASSERT(dev); + nah = netif_to_alloc_helper(nf); + UK_ASSERT(nah); LWIP_ASSERT("uknetdev needs an input callback (netif_input or tcpip_input)", nf->input != NULL); @@ -394,20 +437,14 @@ err_t uknetdev_init(struct netif *nf) uk_netdev_info_get(dev, &info); if (!info.max_rx_queues || !info.max_tx_queues) return ERR_IF; - - /* - * Update our global (rx|tx)_headroom setting that we use for - * buffer allocations - */ - rx_headroom = (rx_headroom < info.nb_encap_rx) - ? info.nb_encap_rx : rx_headroom; - tx_headroom = (tx_headroom < info.nb_encap_tx) - ? info.nb_encap_tx : tx_headroom; LWIP_DEBUGF(NETIF_DEBUG, ("%s: %c%c%u: Need headroom rx:%"PRIu16", tx:%"PRIu16"\n", __func__, nf->name[0], nf->name[1], nf->num, info.nb_encap_rx, info.nb_encap_tx)); + /* initialize buffer allocation helper */ + netbuf_alloc_helper_init(nah, a, &info); + /* * Device configuration, * we want to use just one queue for each direction @@ -429,7 +466,7 @@ err_t uknetdev_init(struct netif *nf) */ rxq_conf.a = a; rxq_conf.alloc_rxpkts = netif_alloc_rxpkts; - rxq_conf.alloc_rxpkts_argp = a; + rxq_conf.alloc_rxpkts_argp = nah; #ifdef CONFIG_LWIP_NOTHREADS /* * In mainloop mode, we will not use interrupts. -- 2.11.0 _______________________________________________ Minios-devel mailing list Minios-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/minios-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |