DPDK 25.03.0-rc0
examples/l3fwd/l3fwd_event.c
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifdef RTE_LIB_EVENTDEV
#include <stdbool.h>
#include <getopt.h>
#include <rte_malloc.h>
#include "l3fwd.h"
#include "l3fwd_event.h"
static void
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
char buf[RTE_ETHER_ADDR_FMT_SIZE];
rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
struct l3fwd_event_resources *
l3fwd_get_eventdev_rsrc(void)
{
static struct l3fwd_event_resources *rsrc;
if (rsrc != NULL)
return rsrc;
rsrc = rte_zmalloc("l3fwd", sizeof(struct l3fwd_event_resources), 0);
if (rsrc != NULL) {
rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
rsrc->eth_rx_queues = 1;
return rsrc;
}
rte_exit(EXIT_FAILURE, "Unable to allocate memory for eventdev cfg\n");
return NULL;
}
static void
l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
uint16_t nb_ports = rte_eth_dev_count_avail();
unsigned int nb_lcores = rte_lcore_count();
struct rte_eth_conf local_port_conf;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf txconf;
struct rte_eth_rxconf rxconf;
unsigned int nb_mbuf;
uint16_t port_id;
uint8_t eth_qid;
int32_t ret;
/* initialize all ports */
local_port_conf = *port_conf;
/* skip ports that are not enabled */
if ((evt_rsrc->port_mask & (1 << port_id)) == 0) {
printf("\nSkipping disabled port %d\n", port_id);
continue;
}
/* init port */
printf("Initializing port %d ... ", port_id);
fflush(stdout);
printf("Creating queues: nb_rxq=%d nb_txq=1...\n",
evt_rsrc->eth_rx_queues);
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
rte_panic("Error during getting device (port %u) info:"
"%s\n", port_id, strerror(-ret));
ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
if (ret != 0)
rte_exit(EXIT_FAILURE,
"Invalid max packet length: %u (port %u)\n",
max_pkt_len, port_id);
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
port_conf->rx_adv_conf.rss_conf.rss_hf) {
printf("Port %u modified RSS hash function "
"based on hardware support,"
"requested:%#"PRIx64" configured:%#"PRIx64"\n",
port_id,
local_port_conf.rx_adv_conf.rss_conf.rss_hf);
}
ret = rte_eth_dev_configure(port_id, evt_rsrc->eth_rx_queues,
1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot configure device: err=%d, port=%d\n",
ret, port_id);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, "
"port=%d\n", ret, port_id);
rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
print_ethaddr(" Address:", &ports_eth_addr[port_id]);
printf(", ");
print_ethaddr("Destination:",
(const struct rte_ether_addr *)&dest_eth_addr[port_id]);
printf(", ");
/* prepare source MAC for each port. */
rte_ether_addr_copy(&ports_eth_addr[port_id],
(struct rte_ether_addr *)(val_eth + port_id) + 1);
/* init memory */
if (!evt_rsrc->per_port_pool) {
/* port_id = 0; this is *not* signifying the first port,
* rather, it signifies that port_id is ignored.
*/
nb_mbuf = RTE_MAX(nb_ports * nb_rxd +
nb_ports * nb_txd +
nb_ports * nb_lcores *
MAX_PKT_BURST +
nb_lcores * MEMPOOL_CACHE_SIZE,
8192u);
ret = init_mem(0, nb_mbuf);
} else {
nb_mbuf = RTE_MAX(nb_rxd + nb_rxd +
nb_lcores * MAX_PKT_BURST +
nb_lcores * MEMPOOL_CACHE_SIZE,
8192u);
ret = init_mem(port_id, nb_mbuf);
}
/* init Rx queues per port */
rxconf = dev_info.default_rxconf;
rxconf.offloads = local_port_conf.rxmode.offloads;
for (eth_qid = 0; eth_qid < evt_rsrc->eth_rx_queues;
eth_qid++) {
if (!evt_rsrc->per_port_pool)
ret = rte_eth_rx_queue_setup(port_id, eth_qid,
nb_rxd, 0, &rxconf,
evt_rsrc->pkt_pool[0][0]);
else
ret = rte_eth_rx_queue_setup(port_id, eth_qid,
nb_rxd, 0, &rxconf,
evt_rsrc->pkt_pool[port_id][0]);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
"port=%d, eth_qid: %d\n",
ret, port_id, eth_qid);
}
/* init one Tx queue per port */
txconf = dev_info.default_txconf;
txconf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 0, &txconf);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, port_id);
}
}
static void
l3fwd_event_capability_setup(void)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
uint32_t caps = 0;
uint16_t i;
int ret;
if (ret)
rte_exit(EXIT_FAILURE,
"Invalid capability for Tx adptr port %d\n",
i);
evt_rsrc->tx_mode_q |= !(caps &
}
if (evt_rsrc->tx_mode_q)
l3fwd_event_set_generic_ops(&evt_rsrc->ops);
else
l3fwd_event_set_internal_port_ops(&evt_rsrc->ops);
}
int
l3fwd_get_free_event_port(struct l3fwd_event_resources *evt_rsrc)
{
static int index;
int port_id;
rte_spinlock_lock(&evt_rsrc->evp.lock);
if (index >= evt_rsrc->evp.nb_ports) {
printf("No free event port is available\n");
return -1;
}
port_id = evt_rsrc->evp.event_p_id[index];
index++;
rte_spinlock_unlock(&evt_rsrc->evp.lock);
return port_id;
}
void
l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
const event_loop_cb lpm_event_loop[2][2][2] = {
[0][0][0] = lpm_event_main_loop_tx_d,
[0][0][1] = lpm_event_main_loop_tx_d_burst,
[0][1][0] = lpm_event_main_loop_tx_q,
[0][1][1] = lpm_event_main_loop_tx_q_burst,
[1][0][0] = lpm_event_main_loop_tx_d_vector,
[1][0][1] = lpm_event_main_loop_tx_d_burst_vector,
[1][1][0] = lpm_event_main_loop_tx_q_vector,
[1][1][1] = lpm_event_main_loop_tx_q_burst_vector,
};
const event_loop_cb em_event_loop[2][2][2] = {
[0][0][0] = em_event_main_loop_tx_d,
[0][0][1] = em_event_main_loop_tx_d_burst,
[0][1][0] = em_event_main_loop_tx_q,
[0][1][1] = em_event_main_loop_tx_q_burst,
[1][0][0] = em_event_main_loop_tx_d_vector,
[1][0][1] = em_event_main_loop_tx_d_burst_vector,
[1][1][0] = em_event_main_loop_tx_q_vector,
[1][1][1] = em_event_main_loop_tx_q_burst_vector,
};
const event_loop_cb fib_event_loop[2][2][2] = {
[0][0][0] = fib_event_main_loop_tx_d,
[0][0][1] = fib_event_main_loop_tx_d_burst,
[0][1][0] = fib_event_main_loop_tx_q,
[0][1][1] = fib_event_main_loop_tx_q_burst,
[1][0][0] = fib_event_main_loop_tx_d_vector,
[1][0][1] = fib_event_main_loop_tx_d_burst_vector,
[1][1][0] = fib_event_main_loop_tx_q_vector,
[1][1][1] = fib_event_main_loop_tx_q_burst_vector,
};
uint32_t event_queue_cfg;
int ret;
if (!evt_rsrc->enabled)
return;
rte_exit(EXIT_FAILURE, "No Eventdev found");
/* Setup eventdev capability callbacks */
l3fwd_event_capability_setup();
/* Ethernet device configuration */
l3fwd_eth_dev_port_setup(port_conf);
/* Event device configuration */
event_queue_cfg = evt_rsrc->ops.event_device_setup();
/* Event queue configuration */
evt_rsrc->ops.event_queue_setup(event_queue_cfg);
/* Event port configuration */
evt_rsrc->ops.event_port_setup();
/* Rx/Tx adapters configuration */
evt_rsrc->ops.adapter_setup();
/* Start event device */
ret = rte_event_dev_start(evt_rsrc->event_d_id);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error in starting eventdev");
evt_rsrc->ops.lpm_event_loop =
lpm_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
[evt_rsrc->has_burst];
evt_rsrc->ops.em_event_loop =
em_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
[evt_rsrc->has_burst];
evt_rsrc->ops.fib_event_loop =
fib_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
[evt_rsrc->has_burst];
}
static void
l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
{
uint16_t i;
for (i = 0; i < num; i++) {
&events[i].vec->mbufs[events[i].vec->elem_offset],
events[i].vec->nb_elem);
events[i].vec);
}
}
static void
l3fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
void *args __rte_unused)
{
l3fwd_event_vector_array_free(&ev, 1);
else
}
void
l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
struct rte_event events[], uint16_t nb_enq,
uint16_t nb_deq, uint8_t is_vector)
{
int i;
if (nb_deq) {
if (is_vector)
l3fwd_event_vector_array_free(events + nb_enq,
nb_deq - nb_enq);
else
for (i = nb_enq; i < nb_deq; i++)
rte_pktmbuf_free(events[i].mbuf);
for (i = 0; i < nb_deq; i++)
events[i].op = RTE_EVENT_OP_RELEASE;
rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
}
rte_event_port_quiesce(event_d_id, event_p_id, l3fwd_event_port_flush,
NULL);
}
#endif /* RTE_LIB_EVENTDEV */
#define RTE_MAX(a, b)
Definition: rte_common.h:703
__rte_noreturn void rte_exit(int exit_code, const char *format,...) __rte_format_printf(2
#define __rte_unused
Definition: rte_common.h:171
#define rte_panic(...)
Definition: rte_debug.h:43
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
Definition: rte_ethdev.h:1618
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) __rte_warn_unused_result
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
#define RTE_ETH_FOREACH_DEV(p)
Definition: rte_ethdev.h:2177
void rte_ether_format_addr(char *buf, uint16_t size, const struct rte_ether_addr *eth_addr)
static void rte_ether_addr_copy(const struct rte_ether_addr *__restrict ea_from, struct rte_ether_addr *__restrict ea_to)
Definition: rte_ether.h:239
#define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT
#define RTE_SCHED_TYPE_ATOMIC
uint8_t rte_event_dev_count(void)
#define RTE_EVENT_TYPE_VECTOR
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
int rte_event_dev_start(uint8_t dev_id)
#define RTE_EVENT_OP_RELEASE
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
unsigned int rte_lcore_count(void)
void * rte_zmalloc(const char *type, size_t size, unsigned align) __rte_alloc_size(2) __rte_alloc_align(3) __rte_malloc __rte_dealloc_free
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1415
void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
static struct rte_mempool * rte_mempool_from_obj(void *obj)
Definition: rte_mempool.h:392
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1487
static void rte_spinlock_unlock(rte_spinlock_t *sl)
static void rte_spinlock_lock(rte_spinlock_t *sl)
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1527
struct rte_eth_conf::@149 rx_adv_conf
uint64_t rss_hf
Definition: rte_ethdev.h:513
uint32_t event_type
struct rte_mbuf * mbuf